Auto commit, grsecurity-3.1-4.9.13-201703052141.patch added.
[thirdparty/grsecurity-scrape.git] / test / 4.9.13 / grsecurity-3.1-4.9.13-201703052141.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 5385cba..607c6a0 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -7,6 +7,7 @@
6  *.cis
7  *.cpio
8  *.csp
9 +*.dbg
10  *.dsp
11  *.dvi
12  *.elf
13 @@ -16,6 +17,7 @@
14  *.gcov
15  *.gen.S
16  *.gif
17 +*.gmo
18  *.grep
19  *.grp
20  *.gz
21 @@ -52,14 +54,17 @@
22  *.tab.h
23  *.tex
24  *.ver
25 +*.vim
26  *.xml
27  *.xz
28  *_MODULES
29 +*_reg_safe.h
30  *_vga16.c
31  *~
32  \#*#
33  *.9
34 -.*
35 +.[^g]*
36 +.gen*
37  .*.d
38  .mm
39  53c700_d.h
40 @@ -73,9 +78,11 @@ Image
41  Module.markers
42  Module.symvers
43  PENDING
44 +PERF*
45  SCCS
46  System.map*
47  TAGS
48 +TRACEEVENT-CFLAGS
49  aconf
50  af_names.h
51  aic7*reg.h*
52 @@ -84,6 +91,7 @@ aic7*seq.h*
53  aicasm
54  aicdb.h*
55  altivec*.c
56 +ashldi3.S
57  asm-offsets.h
58  asm_offsets.h
59  autoconf.h*
60 @@ -96,11 +104,14 @@ bounds.h
61  bsetup
62  btfixupprep
63  build
64 +builtin-policy.h
65  bvmlinux
66  bzImage*
67  capability_names.h
68  capflags.c
69  classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72  comp*.log
73  compile.h*
74  conf
75 @@ -109,19 +120,23 @@ config-*
76  config_data.h*
77  config.mak
78  config.mak.autogen
79 +config.tmp
80  conmakehash
81  consolemap_deftbl.c*
82  cpustr.h
83  crc32table.h*
84  cscope.*
85  defkeymap.c
86 +devicetable-offsets.h
87  devlist.h*
88  dnotify_test
89  docproc
90  dslm
91 +dtc-lexer.lex.c
92  elf2ecoff
93  elfconfig.h*
94  evergreen_reg_safe.h
95 +exception_policy.conf
96  fixdep
97  flask.h
98  fore200e_mkfirm
99 @@ -129,12 +144,15 @@ fore200e_pca_fw.c*
100  gconf
101  gconf.glade.h
102  gen-devlist
103 +gen-kdb_cmds.c
104  gen_crc32table
105  gen_init_cpio
106  generated
107  genheaders
108  genksyms
109  *_gray256.c
110 +hash
111 +hid-example
112  hpet_example
113  hugepage-mmap
114  hugepage-shm
115 @@ -149,14 +167,14 @@ int32.c
116  int4.c
117  int8.c
118  kallsyms
119 -kconfig
120 +kern_constants.h
121  keywords.c
122  ksym.c*
123  ksym.h*
124  kxgettext
125  lex.c
126  lex.*.c
127 -linux
128 +lib1funcs.S
129  logo_*.c
130  logo_*_clut224.c
131  logo_*_mono.c
132 @@ -167,12 +185,14 @@ machtypes.h
133  map
134  map_hugetlb
135  mconf
136 +mdp
137  miboot*
138  mk_elfconfig
139  mkboot
140  mkbugboot
141  mkcpustr
142  mkdep
143 +mkpiggy
144  mkprep
145  mkregtable
146  mktables
147 @@ -188,6 +208,8 @@ oui.c*
148  page-types
149  parse.c
150  parse.h
151 +parse-events*
152 +pasyms.h
153  patches*
154  pca200e.bin
155  pca200e_ecd.bin2
156 @@ -197,6 +219,7 @@ perf-archive
157  piggyback
158  piggy.gzip
159  piggy.S
160 +pmu-*
161  pnmtologo
162  ppc_defs.h*
163  pss_boot.h
164 @@ -206,7 +229,12 @@ r200_reg_safe.h
165  r300_reg_safe.h
166  r420_reg_safe.h
167  r600_reg_safe.h
168 +randomize_layout_hash.h
169 +randomize_layout_seed.h
170 +realmode.lds
171 +realmode.relocs
172  recordmcount
173 +regdb.c
174  relocs
175  rlim_names.h
176  rn50_reg_safe.h
177 @@ -216,8 +244,17 @@ series
178  setup
179  setup.bin
180  setup.elf
181 +signing_key*
182 +aux.h
183 +disable.h
184 +e_fields.h
185 +e_fns.h
186 +e_fptrs.h
187 +e_vars.h
188  sImage
189 +slabinfo
190  sm_tbl*
191 +sortextable
192  split-include
193  syscalltab.h
194  tables.c
195 @@ -227,6 +264,7 @@ tftpboot.img
196  timeconst.h
197  times.h*
198  trix_boot.h
199 +user_constants.h
200  utsrelease.h*
201  vdso-syms.lds
202  vdso.lds
203 @@ -238,13 +276,17 @@ vdso32.lds
204  vdso32.so.dbg
205  vdso64.lds
206  vdso64.so.dbg
207 +vdsox32.lds
208 +vdsox32-syms.lds
209  version.h*
210  vmImage
211  vmlinux
212  vmlinux-*
213  vmlinux.aout
214  vmlinux.bin.all
215 +vmlinux.bin.bz2
216  vmlinux.lds
217 +vmlinux.relocs
218  vmlinuz
219  voffset.h
220  vsyscall.lds
221 @@ -252,9 +294,12 @@ vsyscall_32.lds
222  wanxlfw.inc
223  uImage
224  unifdef
225 +utsrelease.h
226  wakeup.bin
227  wakeup.elf
228  wakeup.lds
229 +x509*
230  zImage*
231  zconf.hash.c
232 +zconf.lex.c
233  zoffset.h
234 diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
235 index 9b9c479..5a635ff 100644
236 --- a/Documentation/kbuild/makefiles.txt
237 +++ b/Documentation/kbuild/makefiles.txt
238 @@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
239         === 4 Host Program support
240            --- 4.1 Simple Host Program
241            --- 4.2 Composite Host Programs
242 -          --- 4.3 Using C++ for host programs
243 -          --- 4.4 Controlling compiler options for host programs
244 -          --- 4.5 When host programs are actually built
245 -          --- 4.6 Using hostprogs-$(CONFIG_FOO)
246 +          --- 4.3 Defining shared libraries
247 +          --- 4.4 Using C++ for host programs
248 +          --- 4.5 Controlling compiler options for host programs
249 +          --- 4.6 When host programs are actually built
250 +          --- 4.7 Using hostprogs-$(CONFIG_FOO)
251  
252         === 5 Kbuild clean infrastructure
253  
254 @@ -645,7 +646,29 @@ Both possibilities are described in the following.
255         Finally, the two .o files are linked to the executable, lxdialog.
256         Note: The syntax <executable>-y is not permitted for host-programs.
257  
258 ---- 4.3 Using C++ for host programs
259 +--- 4.3 Defining shared libraries
260 +
261 +       Objects with extension .so are considered shared libraries, and
262 +       will be compiled as position independent objects.
263 +       Kbuild provides support for shared libraries, but the usage
264 +       shall be restricted.
265 +       In the following example the libkconfig.so shared library is used
266 +       to link the executable conf.
267 +
268 +       Example:
269 +               #scripts/kconfig/Makefile
270 +               hostprogs-y     := conf
271 +               conf-objs       := conf.o libkconfig.so
272 +               libkconfig-objs := expr.o type.o
273 +
274 +       Shared libraries always require a corresponding -objs line, and
275 +       in the example above the shared library libkconfig is composed by
276 +       the two objects expr.o and type.o.
277 +       expr.o and type.o will be built as position independent code and
278 +       linked as a shared library libkconfig.so. C++ is not supported for
279 +       shared libraries.
280 +
281 +--- 4.4 Using C++ for host programs
282  
283         kbuild offers support for host programs written in C++. This was
284         introduced solely to support kconfig, and is not recommended
285 @@ -668,7 +691,7 @@ Both possibilities are described in the following.
286                 qconf-cxxobjs := qconf.o
287                 qconf-objs    := check.o
288  
289 ---- 4.4 Controlling compiler options for host programs
290 +--- 4.5 Controlling compiler options for host programs
291  
292         When compiling host programs, it is possible to set specific flags.
293         The programs will always be compiled utilising $(HOSTCC) passed
294 @@ -696,7 +719,7 @@ Both possibilities are described in the following.
295         When linking qconf, it will be passed the extra option
296         "-L$(QTDIR)/lib".
297  
298 ---- 4.5 When host programs are actually built
299 +--- 4.6 When host programs are actually built
300  
301         Kbuild will only build host-programs when they are referenced
302         as a prerequisite.
303 @@ -727,7 +750,7 @@ Both possibilities are described in the following.
304         This will tell kbuild to build lxdialog even if not referenced in
305         any rule.
306  
307 ---- 4.6 Using hostprogs-$(CONFIG_FOO)
308 +--- 4.7 Using hostprogs-$(CONFIG_FOO)
309  
310         A typical pattern in a Kbuild file looks like this:
311  
312 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
313 index 65b05ba..725a42a 100644
314 --- a/Documentation/kernel-parameters.txt
315 +++ b/Documentation/kernel-parameters.txt
316 @@ -1426,6 +1426,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
317                         [KNL] Should the hard-lockup detector generate
318                         backtraces on all cpus.
319                         Format: <integer>
320 +       grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
321 +                       ignore grsecurity's /proc restrictions
322 +
323 +       grsec_sysfs_restrict= Format: 0 | 1
324 +                       Default: 1
325 +                       Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
326  
327         hashdist=       [KNL,NUMA] Large hashes allocated during boot
328                         are distributed across NUMA nodes.  Defaults on
329 @@ -2655,6 +2661,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
330                         noexec=on: enable non-executable mappings (default)
331                         noexec=off: disable non-executable mappings
332  
333 +       nopcid          [X86-64]
334 +                       Disable PCID (Process-Context IDentifier) even if it
335 +                       is supported by the processor.
336 +
337         nosmap          [X86]
338                         Disable SMAP (Supervisor Mode Access Prevention)
339                         even if it is supported by processor.
340 @@ -2963,6 +2973,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
341                         the specified number of seconds.  This is to be used if
342                         your oopses keep scrolling off the screen.
343  
344 +       pax_nouderef    [X86] disables UDEREF.  Most likely needed under certain
345 +                       virtualization environments that don't cope well with the
346 +                       expand down segment used by UDEREF on X86-32 or the frequent
347 +                       page table updates on X86-64.
348 +
349 +       pax_sanitize_slab=
350 +                       Format: { 0 | 1 | off | fast | full }
351 +                       Options '0' and '1' are only provided for backward
352 +                       compatibility, 'off' or 'fast' should be used instead.
353 +                       0|off : disable slab object sanitization
354 +                       1|fast: enable slab object sanitization excluding
355 +                               whitelisted slabs (default)
356 +                       full  : sanitize all slabs, even the whitelisted ones
357 +
358 +       pax_softmode=   0/1 to disable/enable PaX softmode on boot already.
359 +
360 +       pax_extra_latent_entropy
361 +                       Enable a very simple form of latent entropy extraction
362 +                       from the first 4GB of memory as the bootmem allocator
363 +                       passes the memory pages to the buddy allocator.
364 +
365 +       pax_size_overflow_report_only
366 +                       Enables rate-limited logging of size_overflow plugin
367 +                       violations while disabling killing of the violating
368 +                       task.
369 +
370 +       pax_weakuderef  [X86-64] enables the weaker but faster form of UDEREF
371 +                       when the processor supports PCID.
372 +
373         pcbit=          [HW,ISDN]
374  
375         pcd.            [PARIDE]
376 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
377 index ffab8b5..b8fcd61 100644
378 --- a/Documentation/sysctl/kernel.txt
379 +++ b/Documentation/sysctl/kernel.txt
380 @@ -42,6 +42,7 @@ show up in /proc/sys/kernel:
381  - kptr_restrict
382  - kstack_depth_to_print       [ X86 only ]
383  - l2cr                        [ PPC only ]
384 +- modify_ldt                  [ X86 only ]
385  - modprobe                    ==> Documentation/debugging-modules.txt
386  - modules_disabled
387  - msg_next_id                [ sysv ipc ]
388 @@ -409,6 +410,20 @@ This flag controls the L2 cache of G3 processor boards. If
389  
390  ==============================================================
391  
392 +modify_ldt: (X86 only)
393 +
394 +Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
395 +(Local Descriptor Table) may be needed to run a 16-bit or segmented code
396 +such as Dosemu or Wine. This is done via a system call which is not needed
397 +to run portable applications, and which can sometimes be abused to exploit
398 +some weaknesses of the architecture, opening new vulnerabilities.
399 +
400 +This sysctl allows one to increase the system's security by disabling the
401 +system call, or to restore compatibility with specific applications when it
402 +was already disabled.
403 +
404 +==============================================================
405 +
406  modules_disabled:
407  
408  A toggle value indicating if modules are allowed to be loaded
409 diff --git a/Kbuild b/Kbuild
410 index 3d0ae15..84e5412 100644
411 --- a/Kbuild
412 +++ b/Kbuild
413 @@ -91,6 +91,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
414  always += missing-syscalls
415  targets += missing-syscalls
416  
417 +GCC_PLUGINS_missing-syscalls := n
418  quiet_cmd_syscalls = CALL    $<
419        cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
420  
421 diff --git a/Makefile b/Makefile
422 index 14dc275..3ff2e6b 100644
423 --- a/Makefile
424 +++ b/Makefile
425 @@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
426  HOSTCC       = gcc
427  HOSTCXX      = g++
428  HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
429 -HOSTCXXFLAGS = -O2
430 +HOSTCFLAGS   = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
431 +HOSTCFLAGS  += $(call cc-option, -Wno-empty-body)
432 +HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
433  
434  ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
435  HOSTCFLAGS  += -Wno-unused-value -Wno-unused-parameter \
436 @@ -731,7 +733,7 @@ KBUILD_CFLAGS   += $(call cc-option, -gsplit-dwarf, -g)
437  else
438  KBUILD_CFLAGS  += -g
439  endif
440 -KBUILD_AFLAGS  += -Wa,-gdwarf-2
441 +KBUILD_AFLAGS  += -Wa,--gdwarf-2
442  endif
443  ifdef CONFIG_DEBUG_INFO_DWARF4
444  KBUILD_CFLAGS  += $(call cc-option, -gdwarf-4,)
445 @@ -910,7 +912,7 @@ export mod_sign_cmd
446  
447  
448  ifeq ($(KBUILD_EXTMOD),)
449 -core-y         += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
450 +core-y         += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
451  
452  vmlinux-dirs   := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
453                      $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
454 @@ -1274,7 +1276,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
455                   Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
456                   signing_key.pem signing_key.priv signing_key.x509     \
457                   x509.genkey extra_certificates signing_key.x509.keyid \
458 -                 signing_key.x509.signer vmlinux-gdb.py
459 +                 signing_key.x509.signer vmlinux-gdb.py \
460 +                 scripts/gcc-plugins/size_overflow_plugin/e_*.h \
461 +                 scripts/gcc-plugins/size_overflow_plugin/disable.h \
462 +                 scripts/gcc-plugins/randomize_layout_seed.h
463  
464  # clean - Delete most, but leave enough to build external modules
465  #
466 @@ -1314,7 +1319,7 @@ distclean: mrproper
467         @find $(srctree) $(RCS_FIND_IGNORE) \
468                 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
469                 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
470 -               -o -name '.*.rej' -o -name '*%'  -o -name 'core' \) \
471 +               -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
472                 -type f -print | xargs rm -f
473  
474  
475 diff --git a/arch/Kconfig b/arch/Kconfig
476 index 659bdd0..4179181 100644
477 --- a/arch/Kconfig
478 +++ b/arch/Kconfig
479 @@ -164,6 +164,7 @@ config ARCH_USE_BUILTIN_BSWAP
480  config KRETPROBES
481         def_bool y
482         depends on KPROBES && HAVE_KRETPROBES
483 +       depends on !PAX_RAP
484  
485  config USER_RETURN_NOTIFIER
486         bool
487 @@ -355,7 +356,7 @@ config HAVE_GCC_PLUGINS
488  menuconfig GCC_PLUGINS
489         bool "GCC plugins"
490         depends on HAVE_GCC_PLUGINS
491 -       depends on !COMPILE_TEST
492 +       default y
493         help
494           GCC plugins are loadable modules that provide extra features to the
495           compiler. They are useful for runtime instrumentation and static analysis.
496 @@ -759,6 +760,7 @@ config VMAP_STACK
497         default y
498         bool "Use a virtually-mapped stack"
499         depends on HAVE_ARCH_VMAP_STACK && !KASAN
500 +       depends on !GRKERNSEC_KSTACKOVERFLOW
501         ---help---
502           Enable this if you want the use virtually-mapped kernel stacks
503           with guard pages.  This causes kernel stack overflows to be
504 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
505 index 498933a..78d2b22 100644
506 --- a/arch/alpha/include/asm/atomic.h
507 +++ b/arch/alpha/include/asm/atomic.h
508 @@ -308,4 +308,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
509  #define atomic_dec(v) atomic_sub(1,(v))
510  #define atomic64_dec(v) atomic64_sub(1,(v))
511  
512 +#define atomic64_read_unchecked(v)             atomic64_read(v)
513 +#define atomic64_set_unchecked(v, i)           atomic64_set((v), (i))
514 +#define atomic64_add_unchecked(a, v)           atomic64_add((a), (v))
515 +#define atomic64_add_return_unchecked(a, v)    atomic64_add_return((a), (v))
516 +#define atomic64_sub_unchecked(a, v)           atomic64_sub((a), (v))
517 +#define atomic64_inc_unchecked(v)              atomic64_inc(v)
518 +#define atomic64_inc_return_unchecked(v)       atomic64_inc_return(v)
519 +#define atomic64_dec_unchecked(v)              atomic64_dec(v)
520 +#define atomic64_cmpxchg_unchecked(v, o, n)    atomic64_cmpxchg((v), (o), (n))
521 +
522  #endif /* _ALPHA_ATOMIC_H */
523 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
524 index ad368a9..fbe0f25 100644
525 --- a/arch/alpha/include/asm/cache.h
526 +++ b/arch/alpha/include/asm/cache.h
527 @@ -4,19 +4,19 @@
528  #ifndef __ARCH_ALPHA_CACHE_H
529  #define __ARCH_ALPHA_CACHE_H
530  
531 +#include <linux/const.h>
532  
533  /* Bytes per L1 (data) cache line. */
534  #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
535 -# define L1_CACHE_BYTES     64
536  # define L1_CACHE_SHIFT     6
537  #else
538  /* Both EV4 and EV5 are write-through, read-allocate,
539     direct-mapped, physical.
540  */
541 -# define L1_CACHE_BYTES     32
542  # define L1_CACHE_SHIFT     5
543  #endif
544  
545 +#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
546  #define SMP_CACHE_BYTES    L1_CACHE_BYTES
547  
548  #endif
549 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
550 index 968d999..d36b2df 100644
551 --- a/arch/alpha/include/asm/elf.h
552 +++ b/arch/alpha/include/asm/elf.h
553 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
554  
555  #define ELF_ET_DYN_BASE                (TASK_UNMAPPED_BASE + 0x1000000)
556  
557 +#ifdef CONFIG_PAX_ASLR
558 +#define PAX_ELF_ET_DYN_BASE    (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
559 +
560 +#define PAX_DELTA_MMAP_LEN     (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
561 +#define PAX_DELTA_STACK_LEN    (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
562 +#endif
563 +
564  /* $0 is set by ld.so to a pointer to a function which might be 
565     registered using atexit.  This provides a mean for the dynamic
566     linker to call DT_FINI functions for shared libraries that have
567 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
568 index c2ebb6f..93a0613 100644
569 --- a/arch/alpha/include/asm/pgalloc.h
570 +++ b/arch/alpha/include/asm/pgalloc.h
571 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
572         pgd_set(pgd, pmd);
573  }
574  
575 +static inline void
576 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
577 +{
578 +       pgd_populate(mm, pgd, pmd);
579 +}
580 +
581  extern pgd_t *pgd_alloc(struct mm_struct *mm);
582  
583  static inline void
584 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
585 index a9a1195..e9b8417 100644
586 --- a/arch/alpha/include/asm/pgtable.h
587 +++ b/arch/alpha/include/asm/pgtable.h
588 @@ -101,6 +101,17 @@ struct vm_area_struct;
589  #define PAGE_SHARED    __pgprot(_PAGE_VALID | __ACCESS_BITS)
590  #define PAGE_COPY      __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
591  #define PAGE_READONLY  __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
592 +
593 +#ifdef CONFIG_PAX_PAGEEXEC
594 +# define PAGE_SHARED_NOEXEC    __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
595 +# define PAGE_COPY_NOEXEC      __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
596 +# define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
597 +#else
598 +# define PAGE_SHARED_NOEXEC    PAGE_SHARED
599 +# define PAGE_COPY_NOEXEC      PAGE_COPY
600 +# define PAGE_READONLY_NOEXEC  PAGE_READONLY
601 +#endif
602 +
603  #define PAGE_KERNEL    __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
604  
605  #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
606 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
607 index 936bc8f..bb1859f 100644
608 --- a/arch/alpha/kernel/module.c
609 +++ b/arch/alpha/kernel/module.c
610 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
611  
612         /* The small sections were sorted to the end of the segment.
613            The following should definitely cover them.  */
614 -       gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
615 +       gp = (u64)me->core_layout.base_rw + me->core_layout.size_rw - 0x8000;
616         got = sechdrs[me->arch.gotsecindex].sh_addr;
617  
618         for (i = 0; i < n; i++) {
619 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
620 index ffb93f49..ced8233 100644
621 --- a/arch/alpha/kernel/osf_sys.c
622 +++ b/arch/alpha/kernel/osf_sys.c
623 @@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
624     generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
625  
626  static unsigned long
627 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
628 -                        unsigned long limit)
629 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
630 +                        unsigned long limit, unsigned long flags)
631  {
632         struct vm_unmapped_area_info info;
633 +       unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
634  
635         info.flags = 0;
636         info.length = len;
637 @@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
638         info.high_limit = limit;
639         info.align_mask = 0;
640         info.align_offset = 0;
641 +       info.threadstack_offset = offset;
642         return vm_unmapped_area(&info);
643  }
644  
645 @@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
646            merely specific addresses, but regions of memory -- perhaps
647            this feature should be incorporated into all ports?  */
648  
649 +#ifdef CONFIG_PAX_RANDMMAP
650 +       if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
651 +#endif
652 +
653         if (addr) {
654 -               addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
655 +               addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
656                 if (addr != (unsigned long) -ENOMEM)
657                         return addr;
658         }
659  
660         /* Next, try allocating at TASK_UNMAPPED_BASE.  */
661 -       addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
662 -                                        len, limit);
663 +       addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
664 +
665         if (addr != (unsigned long) -ENOMEM)
666                 return addr;
667  
668         /* Finally, try allocating in low memory.  */
669 -       addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
670 +       addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
671  
672         return addr;
673  }
674 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
675 index 83e9eee..db02682 100644
676 --- a/arch/alpha/mm/fault.c
677 +++ b/arch/alpha/mm/fault.c
678 @@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
679         __reload_thread(pcb);
680  }
681  
682 +#ifdef CONFIG_PAX_PAGEEXEC
683 +/*
684 + * PaX: decide what to do with offenders (regs->pc = fault address)
685 + *
686 + * returns 1 when task should be killed
687 + *         2 when patched PLT trampoline was detected
688 + *         3 when unpatched PLT trampoline was detected
689 + */
690 +static int pax_handle_fetch_fault(struct pt_regs *regs)
691 +{
692 +
693 +#ifdef CONFIG_PAX_EMUPLT
694 +       int err;
695 +
696 +       do { /* PaX: patched PLT emulation #1 */
697 +               unsigned int ldah, ldq, jmp;
698 +
699 +               err = get_user(ldah, (unsigned int *)regs->pc);
700 +               err |= get_user(ldq, (unsigned int *)(regs->pc+4));
701 +               err |= get_user(jmp, (unsigned int *)(regs->pc+8));
702 +
703 +               if (err)
704 +                       break;
705 +
706 +               if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
707 +                   (ldq & 0xFFFF0000U) == 0xA77B0000U &&
708 +                   jmp == 0x6BFB0000U)
709 +               {
710 +                       unsigned long r27, addr;
711 +                       unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
712 +                       unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
713 +
714 +                       addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
715 +                       err = get_user(r27, (unsigned long *)addr);
716 +                       if (err)
717 +                               break;
718 +
719 +                       regs->r27 = r27;
720 +                       regs->pc = r27;
721 +                       return 2;
722 +               }
723 +       } while (0);
724 +
725 +       do { /* PaX: patched PLT emulation #2 */
726 +               unsigned int ldah, lda, br;
727 +
728 +               err = get_user(ldah, (unsigned int *)regs->pc);
729 +               err |= get_user(lda, (unsigned int *)(regs->pc+4));
730 +               err |= get_user(br, (unsigned int *)(regs->pc+8));
731 +
732 +               if (err)
733 +                       break;
734 +
735 +               if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
736 +                   (lda & 0xFFFF0000U) == 0xA77B0000U &&
737 +                   (br & 0xFFE00000U) == 0xC3E00000U)
738 +               {
739 +                       unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
740 +                       unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
741 +                       unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
742 +
743 +                       regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
744 +                       regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
745 +                       return 2;
746 +               }
747 +       } while (0);
748 +
749 +       do { /* PaX: unpatched PLT emulation */
750 +               unsigned int br;
751 +
752 +               err = get_user(br, (unsigned int *)regs->pc);
753 +
754 +               if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
755 +                       unsigned int br2, ldq, nop, jmp;
756 +                       unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
757 +
758 +                       addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
759 +                       err = get_user(br2, (unsigned int *)addr);
760 +                       err |= get_user(ldq, (unsigned int *)(addr+4));
761 +                       err |= get_user(nop, (unsigned int *)(addr+8));
762 +                       err |= get_user(jmp, (unsigned int *)(addr+12));
763 +                       err |= get_user(resolver, (unsigned long *)(addr+16));
764 +
765 +                       if (err)
766 +                               break;
767 +
768 +                       if (br2 == 0xC3600000U &&
769 +                           ldq == 0xA77B000CU &&
770 +                           nop == 0x47FF041FU &&
771 +                           jmp == 0x6B7B0000U)
772 +                       {
773 +                               regs->r28 = regs->pc+4;
774 +                               regs->r27 = addr+16;
775 +                               regs->pc = resolver;
776 +                               return 3;
777 +                       }
778 +               }
779 +       } while (0);
780 +#endif
781 +
782 +       return 1;
783 +}
784 +
785 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
786 +{
787 +       unsigned long i;
788 +
789 +       printk(KERN_ERR "PAX: bytes at PC: ");
790 +       for (i = 0; i < 5; i++) {
791 +               unsigned int c;
792 +               if (get_user(c, (unsigned int *)pc+i))
793 +                       printk(KERN_CONT "???????? ");
794 +               else
795 +                       printk(KERN_CONT "%08x ", c);
796 +       }
797 +       printk("\n");
798 +}
799 +#endif
800  
801  /*
802   * This routine handles page faults.  It determines the address,
803 @@ -132,8 +250,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
804   good_area:
805         si_code = SEGV_ACCERR;
806         if (cause < 0) {
807 -               if (!(vma->vm_flags & VM_EXEC))
808 +               if (!(vma->vm_flags & VM_EXEC)) {
809 +
810 +#ifdef CONFIG_PAX_PAGEEXEC
811 +                       if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
812 +                               goto bad_area;
813 +
814 +                       up_read(&mm->mmap_sem);
815 +                       switch (pax_handle_fetch_fault(regs)) {
816 +
817 +#ifdef CONFIG_PAX_EMUPLT
818 +                       case 2:
819 +                       case 3:
820 +                               return;
821 +#endif
822 +
823 +                       }
824 +                       pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
825 +                       do_group_exit(SIGKILL);
826 +#else
827                         goto bad_area;
828 +#endif
829 +
830 +               }
831         } else if (!cause) {
832                 /* Allow reads even for write-only mappings */
833                 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
834 diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
835 index 42b0504..6013221 100644
836 --- a/arch/arc/kernel/kprobes.c
837 +++ b/arch/arc/kernel/kprobes.c
838 @@ -424,6 +424,7 @@ static void __used kretprobe_trampoline_holder(void)
839                              "kretprobe_trampoline:\n" "nop\n");
840  }
841  
842 +#ifdef CONFIG_KRETPROBES
843  void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
844                                       struct pt_regs *regs)
845  {
846 @@ -433,6 +434,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
847         /* Replace the return addr with trampoline addr */
848         regs->blink = (unsigned long)&kretprobe_trampoline;
849  }
850 +#endif
851  
852  static int __kprobes trampoline_probe_handler(struct kprobe *p,
853                                               struct pt_regs *regs)
854 @@ -509,6 +511,7 @@ int __init arch_init_kprobes(void)
855         return register_kprobe(&trampoline_p);
856  }
857  
858 +#ifdef CONFIG_KRETPROBES
859  int __kprobes arch_trampoline_kprobe(struct kprobe *p)
860  {
861         if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
862 @@ -516,6 +519,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
863  
864         return 0;
865  }
866 +#endif
867  
868  void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
869  {
870 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
871 index b5d529f..0bb4d4f 100644
872 --- a/arch/arm/Kconfig
873 +++ b/arch/arm/Kconfig
874 @@ -1622,6 +1622,7 @@ config AEABI
875  config OABI_COMPAT
876         bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
877         depends on AEABI && !THUMB2_KERNEL
878 +       depends on !GRKERNSEC
879         help
880           This option preserves the old syscall interface along with the
881           new (ARM EABI) one. It also provides a compatibility layer to
882 @@ -1690,6 +1691,7 @@ config HIGHPTE
883  config CPU_SW_DOMAIN_PAN
884         bool "Enable use of CPU domains to implement privileged no-access"
885         depends on MMU && !ARM_LPAE
886 +       depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
887         default y
888         help
889           Increase kernel security by ensuring that normal kernel accesses
890 @@ -1766,7 +1768,7 @@ config ALIGNMENT_TRAP
891  
892  config UACCESS_WITH_MEMCPY
893         bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
894 -       depends on MMU
895 +       depends on MMU && !PAX_MEMORY_UDEREF
896         default y if CPU_FEROCEON
897         help
898           Implement faster copy_to_user and clear_user methods for CPU
899 @@ -2021,6 +2023,7 @@ config KEXEC
900         depends on (!SMP || PM_SLEEP_SMP)
901         depends on !CPU_V7M
902         select KEXEC_CORE
903 +       depends on !GRKERNSEC_KMEM
904         help
905           kexec is a system call that implements the ability to shutdown your
906           current kernel, and to start another kernel.  It is like a reboot
907 @@ -2065,7 +2068,7 @@ config EFI_STUB
908  
909  config EFI
910         bool "UEFI runtime support"
911 -       depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
912 +       depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL && !PAX_KERNEXEC
913         select UCS2_STRING
914         select EFI_PARAMS_FROM_FDT
915         select EFI_STUB
916 diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
917 index d83f7c3..a6aba4c 100644
918 --- a/arch/arm/Kconfig.debug
919 +++ b/arch/arm/Kconfig.debug
920 @@ -7,6 +7,7 @@ config ARM_PTDUMP
921         depends on DEBUG_KERNEL
922         depends on MMU
923         select DEBUG_FS
924 +       depends on !GRKERNSEC_KMEM
925         ---help---
926           Say Y here if you want to show the kernel pagetable layout in a
927           debugfs file. This information is only useful for kernel developers
928 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
929 index d50430c..39509a6 100644
930 --- a/arch/arm/boot/compressed/Makefile
931 +++ b/arch/arm/boot/compressed/Makefile
932 @@ -24,6 +24,8 @@ endif
933  
934  GCOV_PROFILE           := n
935  
936 +GCC_PLUGINS            := n
937 +
938  #
939  # Architecture dependencies
940  #
941 diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
942 index 6fc73bf..d0af3c7b 100644
943 --- a/arch/arm/crypto/sha1_glue.c
944 +++ b/arch/arm/crypto/sha1_glue.c
945 @@ -27,8 +27,8 @@
946  
947  #include "sha1.h"
948  
949 -asmlinkage void sha1_block_data_order(u32 *digest,
950 -               const unsigned char *data, unsigned int rounds);
951 +asmlinkage void sha1_block_data_order(struct sha1_state *digest,
952 +               const u8 *data, int rounds);
953  
954  int sha1_update_arm(struct shash_desc *desc, const u8 *data,
955                     unsigned int len)
956 @@ -36,22 +36,20 @@ int sha1_update_arm(struct shash_desc *desc, const u8 *data,
957         /* make sure casting to sha1_block_fn() is safe */
958         BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
959  
960 -       return sha1_base_do_update(desc, data, len,
961 -                                  (sha1_block_fn *)sha1_block_data_order);
962 +       return sha1_base_do_update(desc, data, len, sha1_block_data_order);
963  }
964  EXPORT_SYMBOL_GPL(sha1_update_arm);
965  
966  static int sha1_final(struct shash_desc *desc, u8 *out)
967  {
968 -       sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order);
969 +       sha1_base_do_finalize(desc, sha1_block_data_order);
970         return sha1_base_finish(desc, out);
971  }
972  
973  int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
974                    unsigned int len, u8 *out)
975  {
976 -       sha1_base_do_update(desc, data, len,
977 -                           (sha1_block_fn *)sha1_block_data_order);
978 +       sha1_base_do_update(desc, data, len, sha1_block_data_order);
979         return sha1_final(desc, out);
980  }
981  EXPORT_SYMBOL_GPL(sha1_finup_arm);
982 diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
983 index 4e22f12..49902aa 100644
984 --- a/arch/arm/crypto/sha1_neon_glue.c
985 +++ b/arch/arm/crypto/sha1_neon_glue.c
986 @@ -31,8 +31,8 @@
987  
988  #include "sha1.h"
989  
990 -asmlinkage void sha1_transform_neon(void *state_h, const char *data,
991 -                                   unsigned int rounds);
992 +asmlinkage void sha1_transform_neon(struct sha1_state *state_h, const u8 *data,
993 +                                   int rounds);
994  
995  static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
996                           unsigned int len)
997 @@ -45,7 +45,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
998  
999         kernel_neon_begin();
1000         sha1_base_do_update(desc, data, len,
1001 -                           (sha1_block_fn *)sha1_transform_neon);
1002 +                           sha1_transform_neon);
1003         kernel_neon_end();
1004  
1005         return 0;
1006 @@ -60,8 +60,8 @@ static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
1007         kernel_neon_begin();
1008         if (len)
1009                 sha1_base_do_update(desc, data, len,
1010 -                                   (sha1_block_fn *)sha1_transform_neon);
1011 -       sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon);
1012 +                                   sha1_transform_neon);
1013 +       sha1_base_do_finalize(desc, sha1_transform_neon);
1014         kernel_neon_end();
1015  
1016         return sha1_base_finish(desc, out);
1017 diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
1018 index a84e869..53a0c61 100644
1019 --- a/arch/arm/crypto/sha256_glue.c
1020 +++ b/arch/arm/crypto/sha256_glue.c
1021 @@ -30,8 +30,8 @@
1022  
1023  #include "sha256_glue.h"
1024  
1025 -asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
1026 -                                       unsigned int num_blks);
1027 +asmlinkage void sha256_block_data_order(struct sha256_state *digest, const u8 *data,
1028 +                                       int num_blks);
1029  
1030  int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
1031                              unsigned int len)
1032 @@ -39,23 +39,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
1033         /* make sure casting to sha256_block_fn() is safe */
1034         BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
1035  
1036 -       return sha256_base_do_update(desc, data, len,
1037 -                               (sha256_block_fn *)sha256_block_data_order);
1038 +       return sha256_base_do_update(desc, data, len, sha256_block_data_order);
1039  }
1040  EXPORT_SYMBOL(crypto_sha256_arm_update);
1041  
1042  static int sha256_final(struct shash_desc *desc, u8 *out)
1043  {
1044 -       sha256_base_do_finalize(desc,
1045 -                               (sha256_block_fn *)sha256_block_data_order);
1046 +       sha256_base_do_finalize(desc, sha256_block_data_order);
1047         return sha256_base_finish(desc, out);
1048  }
1049  
1050  int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
1051                             unsigned int len, u8 *out)
1052  {
1053 -       sha256_base_do_update(desc, data, len,
1054 -                             (sha256_block_fn *)sha256_block_data_order);
1055 +       sha256_base_do_update(desc, data, len, sha256_block_data_order);
1056         return sha256_final(desc, out);
1057  }
1058  EXPORT_SYMBOL(crypto_sha256_arm_finup);
1059 diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
1060 index 39ccd65..f9511cb 100644
1061 --- a/arch/arm/crypto/sha256_neon_glue.c
1062 +++ b/arch/arm/crypto/sha256_neon_glue.c
1063 @@ -26,8 +26,8 @@
1064  
1065  #include "sha256_glue.h"
1066  
1067 -asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
1068 -                                            unsigned int num_blks);
1069 +asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, const u8 *data,
1070 +                                            int num_blks);
1071  
1072  static int sha256_update(struct shash_desc *desc, const u8 *data,
1073                          unsigned int len)
1074 @@ -39,8 +39,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
1075                 return crypto_sha256_arm_update(desc, data, len);
1076  
1077         kernel_neon_begin();
1078 -       sha256_base_do_update(desc, data, len,
1079 -                       (sha256_block_fn *)sha256_block_data_order_neon);
1080 +       sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
1081         kernel_neon_end();
1082  
1083         return 0;
1084 @@ -54,10 +53,8 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
1085  
1086         kernel_neon_begin();
1087         if (len)
1088 -               sha256_base_do_update(desc, data, len,
1089 -                       (sha256_block_fn *)sha256_block_data_order_neon);
1090 -       sha256_base_do_finalize(desc,
1091 -                       (sha256_block_fn *)sha256_block_data_order_neon);
1092 +               sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
1093 +       sha256_base_do_finalize(desc, sha256_block_data_order_neon);
1094         kernel_neon_end();
1095  
1096         return sha256_base_finish(desc, out);
1097 diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
1098 index 269a394..c7a91f1 100644
1099 --- a/arch/arm/crypto/sha512-glue.c
1100 +++ b/arch/arm/crypto/sha512-glue.c
1101 @@ -28,27 +28,24 @@ MODULE_ALIAS_CRYPTO("sha512");
1102  MODULE_ALIAS_CRYPTO("sha384-arm");
1103  MODULE_ALIAS_CRYPTO("sha512-arm");
1104  
1105 -asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
1106 +asmlinkage void sha512_block_data_order(struct sha512_state *state, u8 const *src, int blocks);
1107  
1108  int sha512_arm_update(struct shash_desc *desc, const u8 *data,
1109                       unsigned int len)
1110  {
1111 -       return sha512_base_do_update(desc, data, len,
1112 -               (sha512_block_fn *)sha512_block_data_order);
1113 +       return sha512_base_do_update(desc, data, len, sha512_block_data_order);
1114  }
1115  
1116  int sha512_arm_final(struct shash_desc *desc, u8 *out)
1117  {
1118 -       sha512_base_do_finalize(desc,
1119 -               (sha512_block_fn *)sha512_block_data_order);
1120 +       sha512_base_do_finalize(desc, sha512_block_data_order);
1121         return sha512_base_finish(desc, out);
1122  }
1123  
1124  int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
1125                      unsigned int len, u8 *out)
1126  {
1127 -       sha512_base_do_update(desc, data, len,
1128 -               (sha512_block_fn *)sha512_block_data_order);
1129 +       sha512_base_do_update(desc, data, len, sha512_block_data_order);
1130         return sha512_arm_final(desc, out);
1131  }
1132  
1133 diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
1134 index 3269368..9fcbc00 100644
1135 --- a/arch/arm/crypto/sha512-neon-glue.c
1136 +++ b/arch/arm/crypto/sha512-neon-glue.c
1137 @@ -22,7 +22,7 @@
1138  MODULE_ALIAS_CRYPTO("sha384-neon");
1139  MODULE_ALIAS_CRYPTO("sha512-neon");
1140  
1141 -asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src,
1142 +asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, u8 const *src,
1143                                              int blocks);
1144  
1145  static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
1146 @@ -35,8 +35,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
1147                 return sha512_arm_update(desc, data, len);
1148  
1149         kernel_neon_begin();
1150 -       sha512_base_do_update(desc, data, len,
1151 -               (sha512_block_fn *)sha512_block_data_order_neon);
1152 +       sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
1153         kernel_neon_end();
1154  
1155         return 0;
1156 @@ -50,10 +49,8 @@ static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
1157  
1158         kernel_neon_begin();
1159         if (len)
1160 -               sha512_base_do_update(desc, data, len,
1161 -                       (sha512_block_fn *)sha512_block_data_order_neon);
1162 -       sha512_base_do_finalize(desc,
1163 -               (sha512_block_fn *)sha512_block_data_order_neon);
1164 +               sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
1165 +       sha512_base_do_finalize(desc, sha512_block_data_order_neon);
1166         kernel_neon_end();
1167  
1168         return sha512_base_finish(desc, out);
1169 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1170 index 66d0e21..8fa3237 100644
1171 --- a/arch/arm/include/asm/atomic.h
1172 +++ b/arch/arm/include/asm/atomic.h
1173 @@ -18,17 +18,41 @@
1174  #include <asm/barrier.h>
1175  #include <asm/cmpxchg.h>
1176  
1177 +#ifdef CONFIG_GENERIC_ATOMIC64
1178 +#include <asm-generic/atomic64.h>
1179 +#endif
1180 +
1181  #define ATOMIC_INIT(i) { (i) }
1182  
1183  #ifdef __KERNEL__
1184  
1185 +#ifdef CONFIG_THUMB2_KERNEL
1186 +#define REFCOUNT_TRAP_INSN "bkpt       0xf1"
1187 +#else
1188 +#define REFCOUNT_TRAP_INSN "bkpt       0xf103"
1189 +#endif
1190 +
1191 +#define _ASM_EXTABLE(from, to)         \
1192 +"      .pushsection __ex_table,\"a\"\n"\
1193 +"      .align  3\n"                    \
1194 +"      .long   " #from ", " #to"\n"    \
1195 +"      .popsection"
1196 +
1197  /*
1198   * On ARM, ordinary assignment (str instruction) doesn't clear the local
1199   * strex/ldrex monitor on some implementations. The reason we can use it for
1200   * atomic_set() is the clrex or dummy strex done on every exception return.
1201   */
1202  #define atomic_read(v) READ_ONCE((v)->counter)
1203 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1204 +{
1205 +       return READ_ONCE(v->counter);
1206 +}
1207  #define atomic_set(v,i)        WRITE_ONCE(((v)->counter), (i))
1208 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1209 +{
1210 +       WRITE_ONCE(v->counter, i);
1211 +}
1212  
1213  #if __LINUX_ARM_ARCH__ >= 6
1214  
1215 @@ -38,45 +62,74 @@
1216   * to ensure that the update happens.
1217   */
1218  
1219 -#define ATOMIC_OP(op, c_op, asm_op)                                    \
1220 -static inline void atomic_##op(int i, atomic_t *v)                     \
1221 +#ifdef CONFIG_PAX_REFCOUNT
1222 +#define __OVERFLOW_POST                        \
1223 +       "       bvc     3f\n"           \
1224 +       "2:     " REFCOUNT_TRAP_INSN "\n"\
1225 +       "3:\n"
1226 +#define __OVERFLOW_POST_RETURN         \
1227 +       "       bvc     3f\n"           \
1228 +       "       mov     %1, %0\n"       \
1229 +       "2:     " REFCOUNT_TRAP_INSN "\n"\
1230 +       "3:\n"
1231 +#define __OVERFLOW_EXTABLE             \
1232 +       "4:\n"                          \
1233 +       _ASM_EXTABLE(2b, 4b)
1234 +#else
1235 +#define __OVERFLOW_POST
1236 +#define __OVERFLOW_POST_RETURN
1237 +#define __OVERFLOW_EXTABLE
1238 +#endif
1239 +
1240 +#define __ATOMIC_OP(op, suffix, c_op, asm_op)                          \
1241 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)   \
1242  {                                                                      \
1243         unsigned long tmp;                                              \
1244         int result;                                                     \
1245                                                                         \
1246         prefetchw(&v->counter);                                         \
1247 -       __asm__ __volatile__("@ atomic_" #op "\n"                       \
1248 +       __asm__ __volatile__("@ atomic_" #op #suffix "\n"               \
1249  "1:    ldrex   %0, [%3]\n"                                             \
1250  "      " #asm_op "     %0, %0, %4\n"                                   \
1251 +       __OVERFLOW_POST                                                 \
1252  "      strex   %1, %0, [%3]\n"                                         \
1253  "      teq     %1, #0\n"                                               \
1254 -"      bne     1b"                                                     \
1255 +"      bne     1b\n"                                                   \
1256 +       __OVERFLOW_EXTABLE                                              \
1257         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
1258         : "r" (&v->counter), "Ir" (i)                                   \
1259         : "cc");                                                        \
1260  }                                                                      \
1261  
1262 -#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
1263 -static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)     \
1264 +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op)\
1265 +                                   __ATOMIC_OP(op, , c_op, asm_op##s)
1266 +
1267 +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op)                   \
1268 +static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\
1269  {                                                                      \
1270 -       unsigned long tmp;                                              \
1271 +       int tmp;                                                        \
1272         int result;                                                     \
1273                                                                         \
1274         prefetchw(&v->counter);                                         \
1275                                                                         \
1276 -       __asm__ __volatile__("@ atomic_" #op "_return\n"                \
1277 +       __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n"     \
1278  "1:    ldrex   %0, [%3]\n"                                             \
1279 -"      " #asm_op "     %0, %0, %4\n"                                   \
1280 -"      strex   %1, %0, [%3]\n"                                         \
1281 -"      teq     %1, #0\n"                                               \
1282 -"      bne     1b"                                                     \
1283 -       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
1284 +"      " #asm_op "     %1, %0, %4\n"                                   \
1285 +       __OVERFLOW_POST_RETURN                                          \
1286 +"      strex   %0, %1, [%3]\n"                                         \
1287 +"      teq     %0, #0\n"                                               \
1288 +"      bne     1b\n"                                                   \
1289 +       __OVERFLOW_EXTABLE                                              \
1290 +       : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)               \
1291         : "r" (&v->counter), "Ir" (i)                                   \
1292         : "cc");                                                        \
1293                                                                         \
1294         return result;                                                  \
1295  }
1296  
1297 +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)\
1298 +                                          __ATOMIC_OP_RETURN(op, , c_op, asm_op##s)
1299 +
1300  #define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
1301  static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)      \
1302  {                                                                      \
1303 @@ -99,6 +152,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)    \
1304  }
1305  
1306  #define atomic_add_return_relaxed      atomic_add_return_relaxed
1307 +#define atomic_add_return_unchecked_relaxed    atomic_add_return_unchecked_relaxed
1308  #define atomic_sub_return_relaxed      atomic_sub_return_relaxed
1309  #define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
1310  #define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
1311 @@ -141,12 +195,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1312         __asm__ __volatile__ ("@ atomic_add_unless\n"
1313  "1:    ldrex   %0, [%4]\n"
1314  "      teq     %0, %5\n"
1315 -"      beq     2f\n"
1316 -"      add     %1, %0, %6\n"
1317 +"      beq     4f\n"
1318 +"      adds    %1, %0, %6\n"
1319 +
1320 +       __OVERFLOW_POST
1321 +
1322  "      strex   %2, %1, [%4]\n"
1323  "      teq     %2, #0\n"
1324  "      bne     1b\n"
1325 -"2:"
1326 +
1327 +       __OVERFLOW_EXTABLE
1328 +
1329         : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1330         : "r" (&v->counter), "r" (u), "r" (a)
1331         : "cc");
1332 @@ -157,14 +216,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1333         return oldval;
1334  }
1335  
1336 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1337 +{
1338 +       unsigned long oldval, res;
1339 +
1340 +       smp_mb();
1341 +
1342 +       do {
1343 +               __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1344 +               "ldrex  %1, [%3]\n"
1345 +               "mov    %0, #0\n"
1346 +               "teq    %1, %4\n"
1347 +               "strexeq %0, %5, [%3]\n"
1348 +                   : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1349 +                   : "r" (&ptr->counter), "Ir" (old), "r" (new)
1350 +                   : "cc");
1351 +       } while (res);
1352 +
1353 +       smp_mb();
1354 +
1355 +       return oldval;
1356 +}
1357 +
1358  #else /* ARM_ARCH_6 */
1359  
1360  #ifdef CONFIG_SMP
1361  #error SMP not supported on pre-ARMv6 CPUs
1362  #endif
1363  
1364 -#define ATOMIC_OP(op, c_op, asm_op)                                    \
1365 -static inline void atomic_##op(int i, atomic_t *v)                     \
1366 +#define __ATOMIC_OP(op, suffix, c_op, asm_op)                          \
1367 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)   \
1368  {                                                                      \
1369         unsigned long flags;                                            \
1370                                                                         \
1371 @@ -173,8 +254,11 @@ static inline void atomic_##op(int i, atomic_t *v)                 \
1372         raw_local_irq_restore(flags);                                   \
1373  }                                                                      \
1374  
1375 -#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
1376 -static inline int atomic_##op##_return(int i, atomic_t *v)             \
1377 +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op)    \
1378 +                                   __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1379 +
1380 +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op)                   \
1381 +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1382  {                                                                      \
1383         unsigned long flags;                                            \
1384         int val;                                                        \
1385 @@ -201,6 +285,9 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                     \
1386         return val;                                                     \
1387  }
1388  
1389 +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1390 +                                          __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1391 +
1392  static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1393  {
1394         int ret;
1395 @@ -215,6 +302,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1396         return ret;
1397  }
1398  
1399 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1400 +{
1401 +       return atomic_cmpxchg((atomic_t *)v, old, new);
1402 +}
1403 +
1404  static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1405  {
1406         int c, old;
1407 @@ -250,16 +342,29 @@ ATOMIC_OPS(xor, ^=, eor)
1408  #undef ATOMIC_OPS
1409  #undef ATOMIC_FETCH_OP
1410  #undef ATOMIC_OP_RETURN
1411 +#undef __ATOMIC_OP_RETURN
1412  #undef ATOMIC_OP
1413 +#undef __ATOMIC_OP
1414  
1415  #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1416 +#define atomic_xchg_unchecked(v, new) (xchg_unchecked(&((v)->counter), new))
1417  
1418  #define atomic_inc(v)          atomic_add(1, v)
1419 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1420 +{
1421 +       atomic_add_unchecked(1, v);
1422 +}
1423  #define atomic_dec(v)          atomic_sub(1, v)
1424 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1425 +{
1426 +       atomic_sub_unchecked(1, v);
1427 +}
1428  
1429  #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1430 +#define atomic_inc_and_test_unchecked(v)       (atomic_add_return_unchecked(1, v) == 0)
1431  #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1432  #define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
1433 +#define atomic_inc_return_unchecked_relaxed(v)    (atomic_add_return_unchecked_relaxed(1, v))
1434  #define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
1435  #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1436  
1437 @@ -270,6 +375,14 @@ typedef struct {
1438         long long counter;
1439  } atomic64_t;
1440  
1441 +#ifdef CONFIG_PAX_REFCOUNT
1442 +typedef struct {
1443 +       long long counter;
1444 +} atomic64_unchecked_t;
1445 +#else
1446 +typedef atomic64_t atomic64_unchecked_t;
1447 +#endif
1448 +
1449  #define ATOMIC64_INIT(i) { (i) }
1450  
1451  #ifdef CONFIG_ARM_LPAE
1452 @@ -286,6 +399,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1453         return result;
1454  }
1455  
1456 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1457 +{
1458 +       long long result;
1459 +
1460 +       __asm__ __volatile__("@ atomic64_read_unchecked\n"
1461 +"      ldrd    %0, %H0, [%1]"
1462 +       : "=&r" (result)
1463 +       : "r" (&v->counter), "Qo" (v->counter)
1464 +       );
1465 +
1466 +       return result;
1467 +}
1468 +
1469  static inline void atomic64_set(atomic64_t *v, long long i)
1470  {
1471         __asm__ __volatile__("@ atomic64_set\n"
1472 @@ -294,6 +420,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1473         : "r" (&v->counter), "r" (i)
1474         );
1475  }
1476 +
1477 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1478 +{
1479 +       __asm__ __volatile__("@ atomic64_set_unchecked\n"
1480 +"      strd    %2, %H2, [%1]"
1481 +       : "=Qo" (v->counter)
1482 +       : "r" (&v->counter), "r" (i)
1483 +       );
1484 +}
1485  #else
1486  static inline long long atomic64_read(const atomic64_t *v)
1487  {
1488 @@ -308,6 +443,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1489         return result;
1490  }
1491  
1492 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1493 +{
1494 +       long long result;
1495 +
1496 +       __asm__ __volatile__("@ atomic64_read_unchecked\n"
1497 +"      ldrexd  %0, %H0, [%1]"
1498 +       : "=&r" (result)
1499 +       : "r" (&v->counter), "Qo" (v->counter)
1500 +       );
1501 +
1502 +       return result;
1503 +}
1504 +
1505  static inline void atomic64_set(atomic64_t *v, long long i)
1506  {
1507         long long tmp;
1508 @@ -322,50 +470,82 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1509         : "r" (&v->counter), "r" (i)
1510         : "cc");
1511  }
1512 +
1513 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1514 +{
1515 +       long long tmp;
1516 +
1517 +       prefetchw(&v->counter);
1518 +       __asm__ __volatile__("@ atomic64_set_unchecked\n"
1519 +"1:    ldrexd  %0, %H0, [%2]\n"
1520 +"      strexd  %0, %3, %H3, [%2]\n"
1521 +"      teq     %0, #0\n"
1522 +"      bne     1b"
1523 +       : "=&r" (tmp), "=Qo" (v->counter)
1524 +       : "r" (&v->counter), "r" (i)
1525 +       : "cc");
1526 +}
1527  #endif
1528  
1529 -#define ATOMIC64_OP(op, op1, op2)                                      \
1530 -static inline void atomic64_##op(long long i, atomic64_t *v)           \
1531 +#define __OVERFLOW_POST_RETURN64       \
1532 +       "       bvc     3f\n"           \
1533 +"      mov     %Q1, %Q0\n"             \
1534 +"      mov     %R1, %R0\n"             \
1535 +       "2:     " REFCOUNT_TRAP_INSN "\n"\
1536 +       "3:\n"
1537 +
1538 +#define __ATOMIC64_OP(op, suffix, op1, op2)                            \
1539 +static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1540  {                                                                      \
1541         long long result;                                               \
1542         unsigned long tmp;                                              \
1543                                                                         \
1544         prefetchw(&v->counter);                                         \
1545 -       __asm__ __volatile__("@ atomic64_" #op "\n"                     \
1546 +       __asm__ __volatile__("@ atomic64_" #op #suffix "\n"             \
1547  "1:    ldrexd  %0, %H0, [%3]\n"                                        \
1548  "      " #op1 " %Q0, %Q0, %Q4\n"                                       \
1549  "      " #op2 " %R0, %R0, %R4\n"                                       \
1550 +       __OVERFLOW_POST                                                 \
1551  "      strexd  %1, %0, %H0, [%3]\n"                                    \
1552  "      teq     %1, #0\n"                                               \
1553 -"      bne     1b"                                                     \
1554 +"      bne     1b\n"                                                   \
1555 +       __OVERFLOW_EXTABLE                                              \
1556         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
1557         : "r" (&v->counter), "r" (i)                                    \
1558         : "cc");                                                        \
1559  }                                                                      \
1560  
1561 -#define ATOMIC64_OP_RETURN(op, op1, op2)                               \
1562 +#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2) \
1563 +                                 __ATOMIC64_OP(op, , op1, op2##s)
1564 +
1565 +#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2)                     \
1566  static inline long long                                                        \
1567 -atomic64_##op##_return_relaxed(long long i, atomic64_t *v)             \
1568 +atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \
1569  {                                                                      \
1570         long long result;                                               \
1571 -       unsigned long tmp;                                              \
1572 +       long long tmp;                                                  \
1573                                                                         \
1574         prefetchw(&v->counter);                                         \
1575                                                                         \
1576 -       __asm__ __volatile__("@ atomic64_" #op "_return\n"              \
1577 +       __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n"   \
1578  "1:    ldrexd  %0, %H0, [%3]\n"                                        \
1579 -"      " #op1 " %Q0, %Q0, %Q4\n"                                       \
1580 -"      " #op2 " %R0, %R0, %R4\n"                                       \
1581 -"      strexd  %1, %0, %H0, [%3]\n"                                    \
1582 -"      teq     %1, #0\n"                                               \
1583 -"      bne     1b"                                                     \
1584 -       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
1585 +"      " #op1 " %Q1, %Q0, %Q4\n"                                       \
1586 +"      " #op2 " %R1, %R0, %R4\n"                                       \
1587 +       __OVERFLOW_POST_RETURN64                                        \
1588 +"      strexd  %0, %1, %H1, [%3]\n"                                    \
1589 +"      teq     %0, #0\n"                                               \
1590 +"      bne     1b\n"                                                   \
1591 +       __OVERFLOW_EXTABLE                                              \
1592 +       : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)               \
1593         : "r" (&v->counter), "r" (i)                                    \
1594         : "cc");                                                        \
1595                                                                         \
1596         return result;                                                  \
1597  }
1598  
1599 +#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2) \
1600 +                                        __ATOMIC64_OP_RETURN(op, , op1, op2##s)
1601 +
1602  #define ATOMIC64_FETCH_OP(op, op1, op2)                                        \
1603  static inline long long                                                        \
1604  atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v)              \
1605 @@ -398,6 +578,7 @@ ATOMIC64_OPS(add, adds, adc)
1606  ATOMIC64_OPS(sub, subs, sbc)
1607  
1608  #define atomic64_add_return_relaxed    atomic64_add_return_relaxed
1609 +#define atomic64_add_return_unchecked_relaxed  atomic64_add_return_unchecked_relaxed
1610  #define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
1611  #define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
1612  #define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
1613 @@ -422,7 +603,10 @@ ATOMIC64_OPS(xor, eor, eor)
1614  #undef ATOMIC64_OPS
1615  #undef ATOMIC64_FETCH_OP
1616  #undef ATOMIC64_OP_RETURN
1617 +#undef __ATOMIC64_OP_RETURN
1618  #undef ATOMIC64_OP
1619 +#undef __ATOMIC64_OP
1620 +#undef __OVERFLOW_POST_RETURN
1621  
1622  static inline long long
1623  atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
1624 @@ -448,6 +632,13 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
1625  }
1626  #define atomic64_cmpxchg_relaxed       atomic64_cmpxchg_relaxed
1627  
1628 +static inline long long
1629 +atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old, long long new)
1630 +{
1631 +       return atomic64_cmpxchg_relaxed((atomic64_t *)ptr, old, new);
1632 +}
1633 +#define atomic64_cmpxchg_unchecked_relaxed     atomic64_cmpxchg_unchecked_relaxed
1634 +
1635  static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
1636  {
1637         long long result;
1638 @@ -468,25 +659,36 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
1639  }
1640  #define atomic64_xchg_relaxed          atomic64_xchg_relaxed
1641  
1642 +static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new)
1643 +{
1644 +       return atomic64_xchg_relaxed((atomic64_t *)ptr, new);
1645 +}
1646 +#define atomic64_xchg_unchecked_relaxed                atomic64_xchg_unchecked_relaxed
1647 +
1648  static inline long long atomic64_dec_if_positive(atomic64_t *v)
1649  {
1650         long long result;
1651 -       unsigned long tmp;
1652 +       u64 tmp;
1653  
1654         smp_mb();
1655         prefetchw(&v->counter);
1656  
1657         __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1658  "1:    ldrexd  %0, %H0, [%3]\n"
1659 -"      subs    %Q0, %Q0, #1\n"
1660 -"      sbc     %R0, %R0, #0\n"
1661 -"      teq     %R0, #0\n"
1662 -"      bmi     2f\n"
1663 -"      strexd  %1, %0, %H0, [%3]\n"
1664 -"      teq     %1, #0\n"
1665 +"      subs    %Q1, %Q0, #1\n"
1666 +"      sbcs    %R1, %R0, #0\n"
1667 +
1668 +       __OVERFLOW_POST_RETURN64
1669 +
1670 +"      teq     %R1, #0\n"
1671 +"      bmi     4f\n"
1672 +"      strexd  %0, %1, %H1, [%3]\n"
1673 +"      teq     %0, #0\n"
1674  "      bne     1b\n"
1675 -"2:"
1676 -       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1677 +
1678 +       __OVERFLOW_EXTABLE
1679 +
1680 +       : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)
1681         : "r" (&v->counter)
1682         : "cc");
1683  
1684 @@ -509,13 +711,18 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1685  "      teq     %0, %5\n"
1686  "      teqeq   %H0, %H5\n"
1687  "      moveq   %1, #0\n"
1688 -"      beq     2f\n"
1689 +"      beq     4f\n"
1690  "      adds    %Q0, %Q0, %Q6\n"
1691 -"      adc     %R0, %R0, %R6\n"
1692 +"      adcs    %R0, %R0, %R6\n"
1693 +
1694 +       __OVERFLOW_POST
1695 +
1696  "      strexd  %2, %0, %H0, [%4]\n"
1697  "      teq     %2, #0\n"
1698  "      bne     1b\n"
1699 -"2:"
1700 +
1701 +       __OVERFLOW_EXTABLE
1702 +
1703         : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1704         : "r" (&v->counter), "r" (u), "r" (a)
1705         : "cc");
1706 @@ -526,12 +733,19 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1707         return ret;
1708  }
1709  
1710 +#undef __OVERFLOW_EXTABLE
1711 +#undef __OVERFLOW_POST_RETURN64
1712 +#undef __OVERFLOW_POST
1713 +
1714  #define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
1715  #define atomic64_inc(v)                        atomic64_add(1LL, (v))
1716 +#define atomic64_inc_unchecked(v)      atomic64_add_unchecked(1LL, (v))
1717  #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
1718 +#define atomic64_inc_return_unchecked_relaxed(v)       atomic64_add_return_unchecked_relaxed(1LL, (v))
1719  #define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
1720  #define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
1721  #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
1722 +#define atomic64_dec_unchecked(v)      atomic64_sub_unchecked(1LL, (v))
1723  #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
1724  #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
1725  #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
1726 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1727 index 75fe66b..2255c86 100644
1728 --- a/arch/arm/include/asm/cache.h
1729 +++ b/arch/arm/include/asm/cache.h
1730 @@ -4,8 +4,10 @@
1731  #ifndef __ASMARM_CACHE_H
1732  #define __ASMARM_CACHE_H
1733  
1734 +#include <linux/const.h>
1735 +
1736  #define L1_CACHE_SHIFT         CONFIG_ARM_L1_CACHE_SHIFT
1737 -#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
1738 +#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
1739  
1740  /*
1741   * Memory returned by kmalloc() may be used for DMA, so we must make
1742 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1743 index bdd283b..e66fb83 100644
1744 --- a/arch/arm/include/asm/cacheflush.h
1745 +++ b/arch/arm/include/asm/cacheflush.h
1746 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1747         void (*dma_unmap_area)(const void *, size_t, int);
1748  
1749         void (*dma_flush_range)(const void *, const void *);
1750 -};
1751 +} __no_const __no_randomize_layout;
1752  
1753  /*
1754   * Select the calling method
1755 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1756 index 524692f..a8871ec 100644
1757 --- a/arch/arm/include/asm/checksum.h
1758 +++ b/arch/arm/include/asm/checksum.h
1759 @@ -37,7 +37,19 @@ __wsum
1760  csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1761  
1762  __wsum
1763 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1764 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1765 +
1766 +static inline __wsum
1767 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1768 +{
1769 +       __wsum ret;
1770 +       pax_open_userland();
1771 +       ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1772 +       pax_close_userland();
1773 +       return ret;
1774 +}
1775 +
1776 +
1777  
1778  /*
1779   *     Fold a partial checksum without adding pseudo headers
1780 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1781 index 97882f9..ff9d6ac 100644
1782 --- a/arch/arm/include/asm/cmpxchg.h
1783 +++ b/arch/arm/include/asm/cmpxchg.h
1784 @@ -117,6 +117,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1785         (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),           \
1786                                    sizeof(*(ptr)));                     \
1787  })
1788 +#define xchg_unchecked_relaxed(ptr, x) ({                              \
1789 +       (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),           \
1790 +                                  sizeof(*(ptr)));                     \
1791 +})
1792  
1793  #include <asm-generic/cmpxchg-local.h>
1794  
1795 @@ -128,6 +132,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1796  #endif
1797  
1798  #define xchg xchg_relaxed
1799 +#define xchg_unchecked xchg_unchecked_relaxed
1800  
1801  /*
1802   * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
1803 diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
1804 index baefe1d..29cb35a 100644
1805 --- a/arch/arm/include/asm/cpuidle.h
1806 +++ b/arch/arm/include/asm/cpuidle.h
1807 @@ -32,7 +32,7 @@ struct device_node;
1808  struct cpuidle_ops {
1809         int (*suspend)(unsigned long arg);
1810         int (*init)(struct device_node *, int cpu);
1811 -};
1812 +} __no_const;
1813  
1814  struct of_cpuidle_method {
1815         const char *method;
1816 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1817 index 99d9f63..ec44cb5 100644
1818 --- a/arch/arm/include/asm/domain.h
1819 +++ b/arch/arm/include/asm/domain.h
1820 @@ -42,7 +42,6 @@
1821  #define DOMAIN_USER    1
1822  #define DOMAIN_IO      0
1823  #endif
1824 -#define DOMAIN_VECTORS 3
1825  
1826  /*
1827   * Domain types
1828 @@ -51,9 +50,28 @@
1829  #define DOMAIN_CLIENT  1
1830  #ifdef CONFIG_CPU_USE_DOMAINS
1831  #define DOMAIN_MANAGER 3
1832 +#define DOMAIN_VECTORS 3
1833 +#define DOMAIN_USERCLIENT      DOMAIN_CLIENT
1834  #else
1835 +
1836 +#ifdef CONFIG_PAX_KERNEXEC
1837  #define DOMAIN_MANAGER 1
1838 +#define DOMAIN_KERNEXEC        3
1839 +#else
1840 +#define DOMAIN_MANAGER 1
1841 +#endif
1842 +
1843 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1844 +#define DOMAIN_USERCLIENT      0
1845 +#define DOMAIN_UDEREF          1
1846 +#define DOMAIN_VECTORS         DOMAIN_KERNEL
1847 +#else
1848 +#define DOMAIN_USERCLIENT      1
1849 +#define DOMAIN_VECTORS         DOMAIN_USER
1850 +#endif
1851 +
1852  #endif
1853 +#define DOMAIN_KERNELCLIENT    1
1854  
1855  #define domain_mask(dom)       ((3) << (2 * (dom)))
1856  #define domain_val(dom,type)   ((type) << (2 * (dom)))
1857 @@ -62,13 +80,19 @@
1858  #define DACR_INIT \
1859         (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
1860          domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
1861 -        domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
1862 +        domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
1863          domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
1864 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
1865 +       /* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */
1866 +#define DACR_INIT \
1867 +       (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
1868 +        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
1869 +        domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
1870  #else
1871  #define DACR_INIT \
1872 -       (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
1873 +       (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
1874          domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
1875 -        domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
1876 +        domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
1877          domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
1878  #endif
1879  
1880 @@ -124,6 +148,17 @@ static inline void set_domain(unsigned val)
1881                 set_domain(domain);                             \
1882         } while (0)
1883  
1884 +#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1885 +#define modify_domain(dom,type)                                        \
1886 +       do {                                                    \
1887 +               struct thread_info *thread = current_thread_info(); \
1888 +               unsigned int domain = get_domain();             \
1889 +               domain &= ~domain_mask(dom);                    \
1890 +               domain = domain | domain_val(dom, type);        \
1891 +               thread->cpu_domain = domain;                    \
1892 +               set_domain(domain);                             \
1893 +       } while (0)
1894 +
1895  #else
1896  static inline void modify_domain(unsigned dom, unsigned type)  { }
1897  #endif
1898 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1899 index d2315ff..f60b47b 100644
1900 --- a/arch/arm/include/asm/elf.h
1901 +++ b/arch/arm/include/asm/elf.h
1902 @@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1903     the loader.  We need to make sure that it is out of the way of the program
1904     that it will "exec", and that there is sufficient room for the brk.  */
1905  
1906 -#define ELF_ET_DYN_BASE        (TASK_SIZE / 3 * 2)
1907 +#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
1908 +
1909 +#ifdef CONFIG_PAX_ASLR
1910 +#define PAX_ELF_ET_DYN_BASE    0x00008000UL
1911 +
1912 +#define PAX_DELTA_MMAP_LEN     ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1913 +#define PAX_DELTA_STACK_LEN    ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1914 +#endif
1915  
1916  /* When the program starts, a1 contains a pointer to a function to be 
1917     registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
1918 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1919 index de53547..52b9a28 100644
1920 --- a/arch/arm/include/asm/fncpy.h
1921 +++ b/arch/arm/include/asm/fncpy.h
1922 @@ -81,7 +81,9 @@
1923         BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||             \
1924                 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1925                                                                         \
1926 +       pax_open_kernel();                                              \
1927         memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);   \
1928 +       pax_close_kernel();                                             \
1929         flush_icache_range((unsigned long)(dest_buf),                   \
1930                 (unsigned long)(dest_buf) + (size));                    \
1931                                                                         \
1932 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1933 index 6795368..6c4d749 100644
1934 --- a/arch/arm/include/asm/futex.h
1935 +++ b/arch/arm/include/asm/futex.h
1936 @@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1937                 return -EFAULT;
1938  
1939         preempt_disable();
1940 +
1941         __ua_flags = uaccess_save_and_enable();
1942         __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1943         "1:     " TUSER(ldr) "  %1, [%4]\n"
1944 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1945 index 83eb2f7..ed77159 100644
1946 --- a/arch/arm/include/asm/kmap_types.h
1947 +++ b/arch/arm/include/asm/kmap_types.h
1948 @@ -4,6 +4,6 @@
1949  /*
1950   * This is the "bare minimum".  AIO seems to require this.
1951   */
1952 -#define KM_TYPE_NR 16
1953 +#define KM_TYPE_NR 17
1954  
1955  #endif
1956 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1957 index 9e614a1..3302cca 100644
1958 --- a/arch/arm/include/asm/mach/dma.h
1959 +++ b/arch/arm/include/asm/mach/dma.h
1960 @@ -22,7 +22,7 @@ struct dma_ops {
1961         int     (*residue)(unsigned int, dma_t *);              /* optional */
1962         int     (*setspeed)(unsigned int, dma_t *, int);        /* optional */
1963         const char *type;
1964 -};
1965 +} __do_const;
1966  
1967  struct dma_struct {
1968         void            *addr;          /* single DMA address           */
1969 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1970 index 9b7c328..2dfe68b 100644
1971 --- a/arch/arm/include/asm/mach/map.h
1972 +++ b/arch/arm/include/asm/mach/map.h
1973 @@ -23,17 +23,19 @@ struct map_desc {
1974  
1975  /* types 0-3 are defined in asm/io.h */
1976  enum {
1977 -       MT_UNCACHED = 4,
1978 -       MT_CACHECLEAN,
1979 -       MT_MINICLEAN,
1980 +       MT_UNCACHED_RW = 4,
1981 +       MT_CACHECLEAN_RO,
1982 +       MT_MINICLEAN_RO,
1983         MT_LOW_VECTORS,
1984         MT_HIGH_VECTORS,
1985 -       MT_MEMORY_RWX,
1986 +       __MT_MEMORY_RWX,
1987         MT_MEMORY_RW,
1988 -       MT_ROM,
1989 -       MT_MEMORY_RWX_NONCACHED,
1990 +       MT_MEMORY_RX,
1991 +       MT_ROM_RX,
1992 +       MT_MEMORY_RW_NONCACHED,
1993 +       MT_MEMORY_RX_NONCACHED,
1994         MT_MEMORY_RW_DTCM,
1995 -       MT_MEMORY_RWX_ITCM,
1996 +       MT_MEMORY_RX_ITCM,
1997         MT_MEMORY_RW_SO,
1998         MT_MEMORY_DMA_READY,
1999  };
2000 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
2001 index c2bf24f..69e437c 100644
2002 --- a/arch/arm/include/asm/outercache.h
2003 +++ b/arch/arm/include/asm/outercache.h
2004 @@ -39,7 +39,7 @@ struct outer_cache_fns {
2005         /* This is an ARM L2C thing */
2006         void (*write_sec)(unsigned long, unsigned);
2007         void (*configure)(const struct l2x0_regs *);
2008 -};
2009 +} __no_const;
2010  
2011  extern struct outer_cache_fns outer_cache;
2012  
2013 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
2014 index 4355f0e..cd9168e 100644
2015 --- a/arch/arm/include/asm/page.h
2016 +++ b/arch/arm/include/asm/page.h
2017 @@ -23,6 +23,7 @@
2018  
2019  #else
2020  
2021 +#include <linux/compiler.h>
2022  #include <asm/glue.h>
2023  
2024  /*
2025 @@ -114,7 +115,7 @@ struct cpu_user_fns {
2026         void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
2027         void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
2028                         unsigned long vaddr, struct vm_area_struct *vma);
2029 -};
2030 +} __no_const;
2031  
2032  #ifdef MULTI_USER
2033  extern struct cpu_user_fns cpu_user;
2034 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2035 index b2902a5..da11e4d 100644
2036 --- a/arch/arm/include/asm/pgalloc.h
2037 +++ b/arch/arm/include/asm/pgalloc.h
2038 @@ -17,6 +17,7 @@
2039  #include <asm/processor.h>
2040  #include <asm/cacheflush.h>
2041  #include <asm/tlbflush.h>
2042 +#include <asm/system_info.h>
2043  
2044  #define check_pgt_cache()              do { } while (0)
2045  
2046 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2047         set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2048  }
2049  
2050 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2051 +{
2052 +       pud_populate(mm, pud, pmd);
2053 +}
2054 +
2055  #else  /* !CONFIG_ARM_LPAE */
2056  
2057  /*
2058 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2059  #define pmd_alloc_one(mm,addr)         ({ BUG(); ((pmd_t *)2); })
2060  #define pmd_free(mm, pmd)              do { } while (0)
2061  #define pud_populate(mm,pmd,pte)       BUG()
2062 +#define pud_populate_kernel(mm,pmd,pte)        BUG()
2063  
2064  #endif /* CONFIG_ARM_LPAE */
2065  
2066 @@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2067         __free_page(pte);
2068  }
2069  
2070 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2071 +{
2072 +#ifdef CONFIG_ARM_LPAE
2073 +       pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2074 +#else
2075 +       if (addr & SECTION_SIZE)
2076 +               pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2077 +       else
2078 +               pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2079 +#endif
2080 +       flush_pmd_entry(pmdp);
2081 +}
2082 +
2083  static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2084                                   pmdval_t prot)
2085  {
2086 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2087 index 3f82e9d..2a85e8b 100644
2088 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2089 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2090 @@ -28,7 +28,7 @@
2091  /*
2092   *   - section
2093   */
2094 -#define PMD_SECT_PXN    (_AT(pmdval_t, 1) << 0)     /* v7 */
2095 +#define PMD_SECT_PXN           (_AT(pmdval_t, 1) << 0)     /* v7 */
2096  #define PMD_SECT_BUFFERABLE    (_AT(pmdval_t, 1) << 2)
2097  #define PMD_SECT_CACHEABLE     (_AT(pmdval_t, 1) << 3)
2098  #define PMD_SECT_XN            (_AT(pmdval_t, 1) << 4)         /* v6 */
2099 @@ -40,6 +40,7 @@
2100  #define PMD_SECT_nG            (_AT(pmdval_t, 1) << 17)        /* v6 */
2101  #define PMD_SECT_SUPER         (_AT(pmdval_t, 1) << 18)        /* v6 */
2102  #define PMD_SECT_AF            (_AT(pmdval_t, 0))
2103 +#define PMD_SECT_RDONLY                (_AT(pmdval_t, 0))
2104  
2105  #define PMD_SECT_UNCACHED      (_AT(pmdval_t, 0))
2106  #define PMD_SECT_BUFFERED      (PMD_SECT_BUFFERABLE)
2107 @@ -70,6 +71,7 @@
2108   *   - extended small page/tiny page
2109   */
2110  #define PTE_EXT_XN             (_AT(pteval_t, 1) << 0)         /* v6 */
2111 +#define PTE_EXT_PXN            (_AT(pteval_t, 1) << 2)         /* v7 */
2112  #define PTE_EXT_AP_MASK                (_AT(pteval_t, 3) << 4)
2113  #define PTE_EXT_AP0            (_AT(pteval_t, 1) << 4)
2114  #define PTE_EXT_AP1            (_AT(pteval_t, 2) << 4)
2115 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2116 index 92fd2c8..061dae1 100644
2117 --- a/arch/arm/include/asm/pgtable-2level.h
2118 +++ b/arch/arm/include/asm/pgtable-2level.h
2119 @@ -127,6 +127,9 @@
2120  #define L_PTE_SHARED           (_AT(pteval_t, 1) << 10)        /* shared(v6), coherent(xsc3) */
2121  #define L_PTE_NONE             (_AT(pteval_t, 1) << 11)
2122  
2123 +/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2124 +#define L_PTE_PXN              (_AT(pteval_t, 0))
2125 +
2126  /*
2127   * These are the memory types, defined to be compatible with
2128   * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
2129 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2130 index 2a029bc..a0524c7 100644
2131 --- a/arch/arm/include/asm/pgtable-3level.h
2132 +++ b/arch/arm/include/asm/pgtable-3level.h
2133 @@ -80,6 +80,7 @@
2134  #define L_PTE_USER             (_AT(pteval_t, 1) << 6)         /* AP[1] */
2135  #define L_PTE_SHARED           (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
2136  #define L_PTE_YOUNG            (_AT(pteval_t, 1) << 10)        /* AF */
2137 +#define L_PTE_PXN              (_AT(pteval_t, 1) << 53)        /* PXN */
2138  #define L_PTE_XN               (_AT(pteval_t, 1) << 54)        /* XN */
2139  #define L_PTE_DIRTY            (_AT(pteval_t, 1) << 55)
2140  #define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)
2141 @@ -90,10 +91,12 @@
2142  #define L_PMD_SECT_DIRTY       (_AT(pmdval_t, 1) << 55)
2143  #define L_PMD_SECT_NONE                (_AT(pmdval_t, 1) << 57)
2144  #define L_PMD_SECT_RDONLY      (_AT(pteval_t, 1) << 58)
2145 +#define PMD_SECT_RDONLY                PMD_SECT_AP2
2146  
2147  /*
2148   * To be used in assembly code with the upper page attributes.
2149   */
2150 +#define L_PTE_PXN_HIGH         (1 << (53 - 32))
2151  #define L_PTE_XN_HIGH          (1 << (54 - 32))
2152  #define L_PTE_DIRTY_HIGH       (1 << (55 - 32))
2153  
2154 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2155 index a8d656d..2febb8a 100644
2156 --- a/arch/arm/include/asm/pgtable.h
2157 +++ b/arch/arm/include/asm/pgtable.h
2158 @@ -33,6 +33,9 @@
2159  #include <asm/pgtable-2level.h>
2160  #endif
2161  
2162 +#define ktla_ktva(addr)                (addr)
2163 +#define ktva_ktla(addr)                (addr)
2164 +
2165  /*
2166   * Just any arbitrary offset to the start of the vmalloc VM area: the
2167   * current 8MB value just means that there will be a 8MB "hole" after the
2168 @@ -48,6 +51,9 @@
2169  #define LIBRARY_TEXT_START     0x0c000000
2170  
2171  #ifndef __ASSEMBLY__
2172 +extern pteval_t __supported_pte_mask;
2173 +extern pmdval_t __supported_pmd_mask;
2174 +
2175  extern void __pte_error(const char *file, int line, pte_t);
2176  extern void __pmd_error(const char *file, int line, pmd_t);
2177  extern void __pgd_error(const char *file, int line, pgd_t);
2178 @@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2179  #define pmd_ERROR(pmd)         __pmd_error(__FILE__, __LINE__, pmd)
2180  #define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd)
2181  
2182 +#define  __HAVE_ARCH_PAX_OPEN_KERNEL
2183 +#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
2184 +
2185 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2186 +#include <asm/domain.h>
2187 +#include <linux/thread_info.h>
2188 +#include <linux/preempt.h>
2189 +
2190 +static inline int test_domain(int domain, int domaintype)
2191 +{
2192 +       return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2193 +}
2194 +#endif
2195 +
2196 +#ifdef CONFIG_PAX_KERNEXEC
2197 +static inline unsigned long pax_open_kernel(void) {
2198 +#ifdef CONFIG_ARM_LPAE
2199 +       /* TODO */
2200 +#else
2201 +       preempt_disable();
2202 +       BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2203 +       modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2204 +#endif
2205 +       return 0;
2206 +}
2207 +
2208 +static inline unsigned long pax_close_kernel(void) {
2209 +#ifdef CONFIG_ARM_LPAE
2210 +       /* TODO */
2211 +#else
2212 +       BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2213 +       /* DOMAIN_MANAGER = "client" under KERNEXEC */
2214 +       modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2215 +       preempt_enable_no_resched();
2216 +#endif
2217 +       return 0;
2218 +}
2219 +#else
2220 +static inline unsigned long pax_open_kernel(void) { return 0; }
2221 +static inline unsigned long pax_close_kernel(void) { return 0; }
2222 +#endif
2223 +
2224  /*
2225   * This is the lowest virtual address we can permit any user space
2226   * mapping to be mapped at.  This is particularly important for
2227 @@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2228  /*
2229   * The pgprot_* and protection_map entries will be fixed up in runtime
2230   * to include the cachable and bufferable bits based on memory policy,
2231 - * as well as any architecture dependent bits like global/ASID and SMP
2232 - * shared mapping bits.
2233 + * as well as any architecture dependent bits like global/ASID, PXN,
2234 + * and SMP shared mapping bits.
2235   */
2236  #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2237  
2238 @@ -308,7 +356,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2239  static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2240  {
2241         const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2242 -               L_PTE_NONE | L_PTE_VALID;
2243 +               L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2244         pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2245         return pte;
2246  }
2247 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2248 index 3d6dc8b..1262ad3 100644
2249 --- a/arch/arm/include/asm/smp.h
2250 +++ b/arch/arm/include/asm/smp.h
2251 @@ -108,7 +108,7 @@ struct smp_operations {
2252         int  (*cpu_disable)(unsigned int cpu);
2253  #endif
2254  #endif
2255 -};
2256 +} __no_const;
2257  
2258  struct of_cpu_method {
2259         const char *method;
2260 diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
2261 index cf4f3aa..8f2f2d9 100644
2262 --- a/arch/arm/include/asm/string.h
2263 +++ b/arch/arm/include/asm/string.h
2264 @@ -7,19 +7,19 @@
2265   */
2266  
2267  #define __HAVE_ARCH_STRRCHR
2268 -extern char * strrchr(const char * s, int c);
2269 +extern char * strrchr(const char * s, int c) __nocapture(-1);
2270  
2271  #define __HAVE_ARCH_STRCHR
2272 -extern char * strchr(const char * s, int c);
2273 +extern char * strchr(const char * s, int c) __nocapture(-1);
2274  
2275  #define __HAVE_ARCH_MEMCPY
2276 -extern void * memcpy(void *, const void *, __kernel_size_t);
2277 +extern void * memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
2278  
2279  #define __HAVE_ARCH_MEMMOVE
2280 -extern void * memmove(void *, const void *, __kernel_size_t);
2281 +extern void * memmove(void *, const void *, __kernel_size_t) __nocapture(2);
2282  
2283  #define __HAVE_ARCH_MEMCHR
2284 -extern void * memchr(const void *, int, __kernel_size_t);
2285 +extern void * memchr(const void *, int, __kernel_size_t) __nocapture(-1);
2286  
2287  #define __HAVE_ARCH_MEMSET
2288  extern void * memset(void *, int, __kernel_size_t);
2289 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2290 index 776757d..a552c1d 100644
2291 --- a/arch/arm/include/asm/thread_info.h
2292 +++ b/arch/arm/include/asm/thread_info.h
2293 @@ -73,6 +73,9 @@ struct thread_info {
2294         .flags          = 0,                                            \
2295         .preempt_count  = INIT_PREEMPT_COUNT,                           \
2296         .addr_limit     = KERNEL_DS,                                    \
2297 +       .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) |  \
2298 +                         domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2299 +                         domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT),   \
2300  }
2301  
2302  #define init_thread_info       (init_thread_union.thread_info)
2303 @@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2304  #define TIF_SYSCALL_AUDIT      5       /* syscall auditing active */
2305  #define TIF_SYSCALL_TRACEPOINT 6       /* syscall tracepoint instrumentation */
2306  #define TIF_SECCOMP            7       /* seccomp syscall filtering active */
2307 +/* within 8 bits of TIF_SYSCALL_TRACE
2308 + *  to meet flexible second operand requirements
2309 + */
2310 +#define TIF_GRSEC_SETXID       8
2311  
2312  #define TIF_NOHZ               12      /* in adaptive nohz mode */
2313  #define TIF_USING_IWMMXT       17
2314 @@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2315  #define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
2316  #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
2317  #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
2318 +#define _TIF_GRSEC_SETXID      (1 << TIF_GRSEC_SETXID)
2319  
2320  /* Checks for any syscall work in entry-common.S */
2321  #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2322 -                          _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2323 +                          _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2324  
2325  /*
2326   * Change these and you break ASM code in entry-common.S
2327 diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
2328 index f6fcc67..5895d62 100644
2329 --- a/arch/arm/include/asm/timex.h
2330 +++ b/arch/arm/include/asm/timex.h
2331 @@ -13,6 +13,7 @@
2332  #define _ASMARM_TIMEX_H
2333  
2334  typedef unsigned long cycles_t;
2335 +extern int read_current_timer(unsigned long *timer_val);
2336  #define get_cycles()   ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
2337  
2338  #endif
2339 diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2340 index 5f833f7..76e6644 100644
2341 --- a/arch/arm/include/asm/tls.h
2342 +++ b/arch/arm/include/asm/tls.h
2343 @@ -3,6 +3,7 @@
2344  
2345  #include <linux/compiler.h>
2346  #include <asm/thread_info.h>
2347 +#include <asm/pgtable.h>
2348  
2349  #ifdef __ASSEMBLY__
2350  #include <asm/asm-offsets.h>
2351 @@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2352                          * at 0xffff0fe0 must be used instead.  (see
2353                          * entry-armv.S for details)
2354                          */
2355 +                       pax_open_kernel();
2356                         *((unsigned int *)0xffff0ff0) = val;
2357 +                       pax_close_kernel();
2358  #endif
2359                 }
2360  
2361 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2362 index 1f59ea05..81245f0 100644
2363 --- a/arch/arm/include/asm/uaccess.h
2364 +++ b/arch/arm/include/asm/uaccess.h
2365 @@ -18,6 +18,7 @@
2366  #include <asm/domain.h>
2367  #include <asm/unified.h>
2368  #include <asm/compiler.h>
2369 +#include <asm/pgtable.h>
2370  
2371  #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2372  #include <asm-generic/uaccess-unaligned.h>
2373 @@ -50,6 +51,59 @@ struct exception_table_entry
2374  extern int fixup_exception(struct pt_regs *regs);
2375  
2376  /*
2377 + * These two are intentionally not defined anywhere - if the kernel
2378 + * code generates any references to them, that's a bug.
2379 + */
2380 +extern int __get_user_bad(void);
2381 +extern int __put_user_bad(void);
2382 +
2383 +/*
2384 + * Note that this is actually 0x1,0000,0000
2385 + */
2386 +#define KERNEL_DS      0x00000000
2387 +#define get_ds()       (KERNEL_DS)
2388 +
2389 +#ifdef CONFIG_MMU
2390 +
2391 +#define USER_DS                TASK_SIZE
2392 +#define get_fs()       (current_thread_info()->addr_limit)
2393 +
2394 +static inline void set_fs(mm_segment_t fs)
2395 +{
2396 +       current_thread_info()->addr_limit = fs;
2397 +       modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2398 +}
2399 +
2400 +#define segment_eq(a, b)       ((a) == (b))
2401 +
2402 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
2403 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2404 +
2405 +static inline void pax_open_userland(void)
2406 +{
2407 +
2408 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2409 +       if (segment_eq(get_fs(), USER_DS)) {
2410 +               BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2411 +               modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2412 +       }
2413 +#endif
2414 +
2415 +}
2416 +
2417 +static inline void pax_close_userland(void)
2418 +{
2419 +
2420 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2421 +       if (segment_eq(get_fs(), USER_DS)) {
2422 +               BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2423 +               modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2424 +       }
2425 +#endif
2426 +
2427 +}
2428 +
2429 +/*
2430   * These two functions allow hooking accesses to userspace to increase
2431   * system integrity by ensuring that the kernel can not inadvertantly
2432   * perform such accesses (eg, via list poison values) which could then
2433 @@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void)
2434  
2435         return old_domain;
2436  #else
2437 +       pax_open_userland();
2438         return 0;
2439  #endif
2440  }
2441 @@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags)
2442  #ifdef CONFIG_CPU_SW_DOMAIN_PAN
2443         /* Restore the user access mask */
2444         set_domain(flags);
2445 +#else
2446 +       pax_close_userland();
2447  #endif
2448  }
2449  
2450 -/*
2451 - * These two are intentionally not defined anywhere - if the kernel
2452 - * code generates any references to them, that's a bug.
2453 - */
2454 -extern int __get_user_bad(void);
2455 -extern int __put_user_bad(void);
2456 -
2457 -/*
2458 - * Note that this is actually 0x1,0000,0000
2459 - */
2460 -#define KERNEL_DS      0x00000000
2461 -#define get_ds()       (KERNEL_DS)
2462 -
2463 -#ifdef CONFIG_MMU
2464 -
2465 -#define USER_DS                TASK_SIZE
2466 -#define get_fs()       (current_thread_info()->addr_limit)
2467 -
2468 -static inline void set_fs(mm_segment_t fs)
2469 -{
2470 -       current_thread_info()->addr_limit = fs;
2471 -       modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2472 -}
2473 -
2474 -#define segment_eq(a, b)       ((a) == (b))
2475 -
2476  /* We use 33-bit arithmetic here... */
2477  #define __range_ok(addr, size) ({ \
2478         unsigned long flag, roksum; \
2479 @@ -268,6 +299,7 @@ static inline void set_fs(mm_segment_t fs)
2480  
2481  #endif /* CONFIG_MMU */
2482  
2483 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2484  #define access_ok(type, addr, size)    (__range_ok(addr, size) == 0)
2485  
2486  #define user_addr_max() \
2487 @@ -474,10 +506,10 @@ do {                                                                      \
2488  
2489  
2490  #ifdef CONFIG_MMU
2491 -extern unsigned long __must_check
2492 +extern unsigned long __must_check __size_overflow(3)
2493  arm_copy_from_user(void *to, const void __user *from, unsigned long n);
2494  
2495 -static inline unsigned long __must_check
2496 +static inline unsigned long __must_check __size_overflow(3)
2497  __copy_from_user(void *to, const void __user *from, unsigned long n)
2498  {
2499         unsigned int __ua_flags;
2500 @@ -489,9 +521,9 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
2501         return n;
2502  }
2503  
2504 -extern unsigned long __must_check
2505 +extern unsigned long __must_check __size_overflow(3)
2506  arm_copy_to_user(void __user *to, const void *from, unsigned long n);
2507 -extern unsigned long __must_check
2508 +extern unsigned long __must_check __size_overflow(3)
2509  __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2510  
2511  static inline unsigned long __must_check
2512 @@ -511,9 +543,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
2513  #endif
2514  }
2515  
2516 -extern unsigned long __must_check
2517 +extern unsigned long __must_check __size_overflow(2)
2518  arm_clear_user(void __user *addr, unsigned long n);
2519 -extern unsigned long __must_check
2520 +extern unsigned long __must_check __size_overflow(2)
2521  __clear_user_std(void __user *addr, unsigned long n);
2522  
2523  static inline unsigned long __must_check
2524 @@ -534,6 +566,10 @@ __clear_user(void __user *addr, unsigned long n)
2525  static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2526  {
2527         unsigned long res = n;
2528 +
2529 +       if ((long)n < 0)
2530 +               return n;
2531 +
2532         if (likely(access_ok(VERIFY_READ, from, n)))
2533                 res = __copy_from_user(to, from, n);
2534         if (unlikely(res))
2535 @@ -543,6 +579,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2536  
2537  static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2538  {
2539 +       if ((long)n < 0)
2540 +               return n;
2541 +
2542         if (access_ok(VERIFY_WRITE, to, n))
2543                 n = __copy_to_user(to, from, n);
2544         return n;
2545 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2546 index 5af0ed1..cea83883 100644
2547 --- a/arch/arm/include/uapi/asm/ptrace.h
2548 +++ b/arch/arm/include/uapi/asm/ptrace.h
2549 @@ -92,7 +92,7 @@
2550   * ARMv7 groups of PSR bits
2551   */
2552  #define APSR_MASK      0xf80f0000      /* N, Z, C, V, Q and GE flags */
2553 -#define PSR_ISET_MASK  0x01000010      /* ISA state (J, T) mask */
2554 +#define PSR_ISET_MASK  0x01000020      /* ISA state (J, T) mask */
2555  #define PSR_IT_MASK    0x0600fc00      /* If-Then execution state mask */
2556  #define PSR_ENDIAN_MASK        0x00000200      /* Endianness state mask */
2557  
2558 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2559 index 7e45f69..2c047db 100644
2560 --- a/arch/arm/kernel/armksyms.c
2561 +++ b/arch/arm/kernel/armksyms.c
2562 @@ -59,7 +59,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2563  
2564         /* networking */
2565  EXPORT_SYMBOL(csum_partial);
2566 -EXPORT_SYMBOL(csum_partial_copy_from_user);
2567 +EXPORT_SYMBOL(__csum_partial_copy_from_user);
2568  EXPORT_SYMBOL(csum_partial_copy_nocheck);
2569  EXPORT_SYMBOL(__csum_ipv6_magic);
2570  
2571 diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
2572 index 9f43ba0..1cee475 100644
2573 --- a/arch/arm/kernel/efi.c
2574 +++ b/arch/arm/kernel/efi.c
2575 @@ -60,9 +60,9 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
2576          * preference.
2577          */
2578         if (md->attribute & EFI_MEMORY_WB)
2579 -               desc.type = MT_MEMORY_RWX;
2580 +               desc.type = __MT_MEMORY_RWX;
2581         else if (md->attribute & EFI_MEMORY_WT)
2582 -               desc.type = MT_MEMORY_RWX_NONCACHED;
2583 +               desc.type = MT_MEMORY_RW_NONCACHED;
2584         else if (md->attribute & EFI_MEMORY_WC)
2585                 desc.type = MT_DEVICE_WC;
2586         else
2587 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2588 index 9f157e7..8e3f857 100644
2589 --- a/arch/arm/kernel/entry-armv.S
2590 +++ b/arch/arm/kernel/entry-armv.S
2591 @@ -50,6 +50,87 @@
2592  9997:
2593         .endm
2594  
2595 +       .macro  pax_enter_kernel
2596 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 +       @ make aligned space for saved DACR
2598 +       sub     sp, sp, #8
2599 +       @ save regs
2600 +       stmdb   sp!, {r1, r2}
2601 +       @ read DACR from cpu_domain into r1
2602 +       mov     r2, sp
2603 +       @ assume 8K pages, since we have to split the immediate in two
2604 +       bic     r2, r2, #(0x1fc0)
2605 +       bic     r2, r2, #(0x3f)
2606 +       ldr     r1, [r2, #TI_CPU_DOMAIN]
2607 +       @ store old DACR on stack
2608 +       str     r1, [sp, #8]
2609 +#ifdef CONFIG_PAX_KERNEXEC
2610 +       @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2611 +       bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2612 +       orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2613 +#endif
2614 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2615 +       @ set current DOMAIN_USER to DOMAIN_NOACCESS
2616 +       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
2617 +#endif
2618 +       @ write r1 to current_thread_info()->cpu_domain
2619 +       str     r1, [r2, #TI_CPU_DOMAIN]
2620 +       @ write r1 to DACR
2621 +       mcr     p15, 0, r1, c3, c0, 0
2622 +       @ instruction sync
2623 +       instr_sync
2624 +       @ restore regs
2625 +       ldmia   sp!, {r1, r2}
2626 +#endif
2627 +       .endm
2628 +
2629 +       .macro  pax_open_userland
2630 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2631 +       @ save regs
2632 +       stmdb   sp!, {r0, r1}
2633 +       @ read DACR from cpu_domain into r1
2634 +       mov     r0, sp
2635 +       @ assume 8K pages, since we have to split the immediate in two
2636 +       bic     r0, r0, #(0x1fc0)
2637 +       bic     r0, r0, #(0x3f)
2638 +       ldr     r1, [r0, #TI_CPU_DOMAIN]
2639 +       @ set current DOMAIN_USER to DOMAIN_CLIENT
2640 +       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
2641 +       orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2642 +       @ write r1 to current_thread_info()->cpu_domain
2643 +       str     r1, [r0, #TI_CPU_DOMAIN]
2644 +       @ write r1 to DACR
2645 +       mcr     p15, 0, r1, c3, c0, 0
2646 +       @ instruction sync
2647 +       instr_sync
2648 +       @ restore regs
2649 +       ldmia   sp!, {r0, r1}
2650 +#endif
2651 +       .endm
2652 +
2653 +       .macro  pax_close_userland
2654 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2655 +       @ save regs
2656 +       stmdb   sp!, {r0, r1}
2657 +       @ read DACR from cpu_domain into r1
2658 +       mov     r0, sp
2659 +       @ assume 8K pages, since we have to split the immediate in two
2660 +       bic     r0, r0, #(0x1fc0)
2661 +       bic     r0, r0, #(0x3f)
2662 +       ldr     r1, [r0, #TI_CPU_DOMAIN]
2663 +       @ set current DOMAIN_USER to DOMAIN_NOACCESS
2664 +       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
2665 +       @ write r1 to current_thread_info()->cpu_domain
2666 +       str     r1, [r0, #TI_CPU_DOMAIN]
2667 +       @ write r1 to DACR
2668 +       mcr     p15, 0, r1, c3, c0, 0
2669 +       @ instruction sync
2670 +       instr_sync
2671 +       @ restore regs
2672 +       ldmia   sp!, {r0, r1}
2673 +#endif
2674 +       .endm
2675 +
2676         .macro  pabt_helper
2677         @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2678  #ifdef MULTI_PABORT
2679 @@ -92,11 +173,15 @@
2680   * Invalid mode handlers
2681   */
2682         .macro  inv_entry, reason
2683 +
2684 +       pax_enter_kernel
2685 +
2686         sub     sp, sp, #PT_REGS_SIZE
2687   ARM(  stmib   sp, {r1 - lr}           )
2688   THUMB(        stmia   sp, {r0 - r12}          )
2689   THUMB(        str     sp, [sp, #S_SP]         )
2690   THUMB(        str     lr, [sp, #S_LR]         )
2691 +
2692         mov     r1, #\reason
2693         .endm
2694  
2695 @@ -152,6 +237,9 @@ ENDPROC(__und_invalid)
2696         .macro  svc_entry, stack_hole=0, trace=1, uaccess=1
2697   UNWIND(.fnstart               )
2698   UNWIND(.save {r0 - pc}                )
2699 +
2700 +       pax_enter_kernel
2701 +
2702         sub     sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
2703  #ifdef CONFIG_THUMB2_KERNEL
2704   SPFIX(        str     r0, [sp]        )       @ temporarily saved
2705 @@ -167,7 +255,12 @@ ENDPROC(__und_invalid)
2706         ldmia   r0, {r3 - r5}
2707         add     r7, sp, #S_SP - 4       @ here for interlock avoidance
2708         mov     r6, #-1                 @  ""  ""      ""       ""
2709 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710 +       @ offset sp by 8 as done in pax_enter_kernel
2711 +       add     r2, sp, #(SVC_REGS_SIZE + \stack_hole + 4)
2712 +#else
2713         add     r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
2714 +#endif
2715   SPFIX(        addeq   r2, r2, #4      )
2716         str     r3, [sp, #-4]!          @ save the "real" r0 copied
2717                                         @ from the exception stack
2718 @@ -382,6 +475,9 @@ ENDPROC(__fiq_abt)
2719         .macro  usr_entry, trace=1, uaccess=1
2720   UNWIND(.fnstart       )
2721   UNWIND(.cantunwind    )       @ don't unwind the user space
2722 +
2723 +       pax_enter_kernel_user
2724 +
2725         sub     sp, sp, #PT_REGS_SIZE
2726   ARM(  stmib   sp, {r1 - r12}  )
2727   THUMB(        stmia   sp, {r0 - r12}  )
2728 @@ -495,7 +591,9 @@ __und_usr:
2729         tst     r3, #PSR_T_BIT                  @ Thumb mode?
2730         bne     __und_usr_thumb
2731         sub     r4, r2, #4                      @ ARM instr at LR - 4
2732 +       pax_open_userland
2733  1:     ldrt    r0, [r4]
2734 +       pax_close_userland
2735   ARM_BE8(rev   r0, r0)                         @ little endian instruction
2736  
2737         uaccess_disable ip
2738 @@ -531,11 +629,15 @@ __und_usr_thumb:
2739   */
2740         .arch   armv6t2
2741  #endif
2742 +       pax_open_userland
2743  2:     ldrht   r5, [r4]
2744 +       pax_close_userland
2745  ARM_BE8(rev16  r5, r5)                         @ little endian instruction
2746         cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
2747         blo     __und_usr_fault_16_pan          @ 16bit undefined instruction
2748 +       pax_open_userland
2749  3:     ldrht   r0, [r2]
2750 +       pax_close_userland
2751  ARM_BE8(rev16  r0, r0)                         @ little endian instruction
2752         uaccess_disable ip
2753         add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
2754 @@ -566,7 +668,8 @@ ENDPROC(__und_usr)
2755   */
2756         .pushsection .text.fixup, "ax"
2757         .align  2
2758 -4:     str     r4, [sp, #S_PC]                 @ retry current instruction
2759 +4:     pax_close_userland
2760 +       str     r4, [sp, #S_PC]                 @ retry current instruction
2761         ret     r9
2762         .popsection
2763         .pushsection __ex_table,"a"
2764 @@ -788,7 +891,7 @@ ENTRY(__switch_to)
2765   THUMB(        str     lr, [ip], #4               )
2766         ldr     r4, [r2, #TI_TP_VALUE]
2767         ldr     r5, [r2, #TI_TP_VALUE + 4]
2768 -#ifdef CONFIG_CPU_USE_DOMAINS
2769 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2770         mrc     p15, 0, r6, c3, c0, 0           @ Get domain register
2771         str     r6, [r1, #TI_CPU_DOMAIN]        @ Save old domain register
2772         ldr     r6, [r2, #TI_CPU_DOMAIN]
2773 @@ -799,7 +902,7 @@ ENTRY(__switch_to)
2774         ldr     r8, =__stack_chk_guard
2775         ldr     r7, [r7, #TSK_STACK_CANARY]
2776  #endif
2777 -#ifdef CONFIG_CPU_USE_DOMAINS
2778 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2779         mcr     p15, 0, r6, c3, c0, 0           @ Set domain register
2780  #endif
2781         mov     r5, r0
2782 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2783 index 10c3283..c47cdf5 100644
2784 --- a/arch/arm/kernel/entry-common.S
2785 +++ b/arch/arm/kernel/entry-common.S
2786 @@ -11,18 +11,46 @@
2787  #include <asm/assembler.h>
2788  #include <asm/unistd.h>
2789  #include <asm/ftrace.h>
2790 +#include <asm/domain.h>
2791  #include <asm/unwind.h>
2792  
2793 +#include "entry-header.S"
2794 +
2795  #ifdef CONFIG_NEED_RET_TO_USER
2796  #include <mach/entry-macro.S>
2797  #else
2798         .macro  arch_ret_to_user, tmp1, tmp2
2799 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2800 +       @ save regs
2801 +       stmdb   sp!, {r1, r2}
2802 +       @ read DACR from cpu_domain into r1
2803 +       mov     r2, sp
2804 +       @ assume 8K pages, since we have to split the immediate in two
2805 +       bic     r2, r2, #(0x1fc0)
2806 +       bic     r2, r2, #(0x3f)
2807 +       ldr     r1, [r2, #TI_CPU_DOMAIN]
2808 +#ifdef CONFIG_PAX_KERNEXEC
2809 +       @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2810 +       bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2811 +       orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2812 +#endif
2813 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2814 +       @ set current DOMAIN_USER to DOMAIN_UDEREF
2815 +       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
2816 +       orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2817 +#endif
2818 +       @ write r1 to current_thread_info()->cpu_domain
2819 +       str     r1, [r2, #TI_CPU_DOMAIN]
2820 +       @ write r1 to DACR
2821 +       mcr     p15, 0, r1, c3, c0, 0
2822 +       @ instruction sync
2823 +       instr_sync
2824 +       @ restore regs
2825 +       ldmia   sp!, {r1, r2}
2826 +#endif
2827         .endm
2828  #endif
2829  
2830 -#include "entry-header.S"
2831 -
2832 -
2833         .align  5
2834  #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
2835  /*
2836 @@ -36,7 +64,9 @@ ret_fast_syscall:
2837   UNWIND(.cantunwind    )
2838         disable_irq_notrace                     @ disable interrupts
2839         ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
2840 -       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
2841 +       tst     r1, #_TIF_SYSCALL_WORK
2842 +       bne     fast_work_pending
2843 +       tst     r1, #_TIF_WORK_MASK
2844         bne     fast_work_pending
2845  
2846         /* perform architecture specific actions before user return */
2847 @@ -62,7 +92,9 @@ ret_fast_syscall:
2848         str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
2849         disable_irq_notrace                     @ disable interrupts
2850         ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
2851 -       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
2852 +       tst     r1, #_TIF_SYSCALL_WORK
2853 +       bne     __sys_trace_return_nosave
2854 +       tst     r1, #_TIF_WORK_MASK
2855         beq     no_work_pending
2856   UNWIND(.fnend         )
2857  ENDPROC(ret_fast_syscall)
2858 @@ -199,6 +231,12 @@ ENTRY(vector_swi)
2859  
2860         uaccess_disable tbl
2861  
2862 +       /*
2863 +        * do this here to avoid a performance hit of wrapping the code above
2864 +        * that directly dereferences userland to parse the SWI instruction
2865 +        */
2866 +       pax_enter_kernel_user
2867 +
2868         adr     tbl, sys_call_table             @ load syscall table pointer
2869  
2870  #if defined(CONFIG_OABI_COMPAT)
2871 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2872 index 6391728..6bf90b8 100644
2873 --- a/arch/arm/kernel/entry-header.S
2874 +++ b/arch/arm/kernel/entry-header.S
2875 @@ -196,6 +196,59 @@
2876         msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
2877         .endm
2878  
2879 +       .macro  pax_enter_kernel_user
2880 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2881 +       @ save regs
2882 +       stmdb   sp!, {r0, r1}
2883 +       @ read DACR from cpu_domain into r1
2884 +       mov     r0, sp
2885 +       @ assume 8K pages, since we have to split the immediate in two
2886 +       bic     r0, r0, #(0x1fc0)
2887 +       bic     r0, r0, #(0x3f)
2888 +       ldr     r1, [r0, #TI_CPU_DOMAIN]
2889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2890 +       @ set current DOMAIN_USER to DOMAIN_NOACCESS
2891 +       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
2892 +#endif
2893 +#ifdef CONFIG_PAX_KERNEXEC
2894 +       @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2895 +       bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2896 +       orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2897 +#endif
2898 +       @ write r1 to current_thread_info()->cpu_domain
2899 +       str     r1, [r0, #TI_CPU_DOMAIN]
2900 +       @ write r1 to DACR
2901 +       mcr     p15, 0, r1, c3, c0, 0
2902 +       @ instruction sync
2903 +       instr_sync
2904 +       @ restore regs
2905 +       ldmia   sp!, {r0, r1}
2906 +#endif
2907 +       .endm
2908 +
2909 +       .macro  pax_exit_kernel
2910 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2911 +       @ save regs
2912 +       stmdb   sp!, {r0, r1}
2913 +       @ read old DACR from stack into r1
2914 +       ldr     r1, [sp, #(8 + S_SP)]
2915 +       sub     r1, r1, #8
2916 +       ldr     r1, [r1]
2917 +
2918 +       @ write r1 to current_thread_info()->cpu_domain
2919 +       mov     r0, sp
2920 +       @ assume 8K pages, since we have to split the immediate in two
2921 +       bic     r0, r0, #(0x1fc0)
2922 +       bic     r0, r0, #(0x3f)
2923 +       str     r1, [r0, #TI_CPU_DOMAIN]
2924 +       @ write r1 to DACR
2925 +       mcr     p15, 0, r1, c3, c0, 0
2926 +       @ instruction sync
2927 +       instr_sync
2928 +       @ restore regs
2929 +       ldmia   sp!, {r0, r1}
2930 +#endif
2931 +       .endm
2932  
2933         .macro  svc_exit, rpsr, irq = 0
2934         .if     \irq != 0
2935 @@ -219,6 +272,8 @@
2936         uaccess_restore
2937         str     r1, [tsk, #TI_ADDR_LIMIT]
2938  
2939 +       pax_exit_kernel
2940 +
2941  #ifndef CONFIG_THUMB2_KERNEL
2942         @ ARM mode SVC restore
2943         msr     spsr_cxsf, \rpsr
2944 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2945 index 059c3da..8e45cfc 100644
2946 --- a/arch/arm/kernel/fiq.c
2947 +++ b/arch/arm/kernel/fiq.c
2948 @@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2949         void *base = vectors_page;
2950         unsigned offset = FIQ_OFFSET;
2951  
2952 +       pax_open_kernel();
2953         memcpy(base + offset, start, length);
2954 +       pax_close_kernel();
2955 +
2956         if (!cache_is_vipt_nonaliasing())
2957                 flush_icache_range((unsigned long)base + offset, offset +
2958                                    length);
2959 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2960 index 4f14b5c..91ff261 100644
2961 --- a/arch/arm/kernel/module.c
2962 +++ b/arch/arm/kernel/module.c
2963 @@ -38,17 +38,47 @@
2964  #endif
2965  
2966  #ifdef CONFIG_MMU
2967 -void *module_alloc(unsigned long size)
2968 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2969  {
2970 -       void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2971 -                               GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2972 +       void *p;
2973 +
2974 +       if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
2975 +               return NULL;
2976 +
2977 +       p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2978 +                               GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2979                                 __builtin_return_address(0));
2980         if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
2981                 return p;
2982         return __vmalloc_node_range(size, 1,  VMALLOC_START, VMALLOC_END,
2983 -                               GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2984 +                               GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2985                                 __builtin_return_address(0));
2986  }
2987 +
2988 +void *module_alloc(unsigned long size)
2989 +{
2990 +
2991 +#ifdef CONFIG_PAX_KERNEXEC
2992 +       return __module_alloc(size, PAGE_KERNEL);
2993 +#else
2994 +       return __module_alloc(size, PAGE_KERNEL_EXEC);
2995 +#endif
2996 +
2997 +}
2998 +
2999 +#ifdef CONFIG_PAX_KERNEXEC
3000 +void module_memfree_exec(void *module_region)
3001 +{
3002 +       module_memfree(module_region);
3003 +}
3004 +EXPORT_SYMBOL(module_memfree_exec);
3005 +
3006 +void *module_alloc_exec(unsigned long size)
3007 +{
3008 +       return __module_alloc(size, PAGE_KERNEL_EXEC);
3009 +}
3010 +EXPORT_SYMBOL(module_alloc_exec);
3011 +#endif
3012  #endif
3013  
3014  int