1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 9de9813..1462492 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
44 @@ -72,9 +78,11 @@ Image
56 @@ -83,6 +91,7 @@ aic7*seq.h*
64 @@ -95,32 +104,40 @@ bounds.h
92 +devicetable-offsets.h
101 +exception_policy.conf
105 @@ -128,12 +145,15 @@ fore200e_pca_fw.c*
121 @@ -148,14 +168,14 @@ int32.c
138 @@ -165,14 +185,15 @@ mach-types.h
155 @@ -188,6 +209,8 @@ oui.c*
164 @@ -197,6 +220,7 @@ perf-archive
172 @@ -206,7 +230,12 @@ r200_reg_safe.h
176 +randomize_layout_hash.h
177 +randomize_layout_seed.h
185 @@ -216,8 +245,12 @@ series
190 +size_overflow_hash.h
198 @@ -227,6 +260,7 @@ tftpboot.img
206 @@ -238,13 +272,17 @@ vdso32.lds
224 @@ -252,9 +290,12 @@ vsyscall_32.lds
237 diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238 index 13f888a..250729b 100644
239 --- a/Documentation/kbuild/makefiles.txt
240 +++ b/Documentation/kbuild/makefiles.txt
241 @@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245 - --- 4.3 Using C++ for host programs
246 - --- 4.4 Controlling compiler options for host programs
247 - --- 4.5 When host programs are actually built
248 - --- 4.6 Using hostprogs-$(CONFIG_FOO)
249 + --- 4.3 Defining shared libraries
250 + --- 4.4 Using C++ for host programs
251 + --- 4.5 Controlling compiler options for host programs
252 + --- 4.6 When host programs are actually built
253 + --- 4.7 Using hostprogs-$(CONFIG_FOO)
255 === 5 Kbuild clean infrastructure
257 @@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
261 ---- 4.3 Using C++ for host programs
262 +--- 4.3 Defining shared libraries
264 + Objects with extension .so are considered shared libraries, and
265 + will be compiled as position independent objects.
266 + Kbuild provides support for shared libraries, but the usage
267 + shall be restricted.
268 + In the following example the libkconfig.so shared library is used
269 + to link the executable conf.
272 + #scripts/kconfig/Makefile
273 + hostprogs-y := conf
274 + conf-objs := conf.o libkconfig.so
275 + libkconfig-objs := expr.o type.o
277 + Shared libraries always require a corresponding -objs line, and
278 + in the example above the shared library libkconfig is composed by
279 + the two objects expr.o and type.o.
280 + expr.o and type.o will be built as position independent code and
281 + linked as a shared library libkconfig.so. C++ is not supported for
284 +--- 4.4 Using C++ for host programs
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288 @@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
292 ---- 4.4 Controlling compiler options for host programs
293 +--- 4.5 Controlling compiler options for host programs
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297 @@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
301 ---- 4.5 When host programs are actually built
302 +--- 4.6 When host programs are actually built
304 Kbuild will only build host-programs when they are referenced
306 @@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
310 ---- 4.6 Using hostprogs-$(CONFIG_FOO)
311 +--- 4.7 Using hostprogs-$(CONFIG_FOO)
313 A typical pattern in a Kbuild file looks like this:
315 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316 index 1d6f045..2714987 100644
317 --- a/Documentation/kernel-parameters.txt
318 +++ b/Documentation/kernel-parameters.txt
319 @@ -1244,6 +1244,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
323 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324 + ignore grsecurity's /proc restrictions
326 + grsec_sysfs_restrict= Format: 0 | 1
328 + Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333 @@ -2364,6 +2371,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
338 + Disable PCID (Process-Context IDentifier) even if it
339 + is supported by the processor.
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344 @@ -2662,6 +2673,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
348 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349 + virtualization environments that don't cope well with the
350 + expand down segment used by UDEREF on X86-32 or the frequent
351 + page table updates on X86-64.
354 + Format: { 0 | 1 | off | fast | full }
355 + Options '0' and '1' are only provided for backward
356 + compatibility, 'off' or 'fast' should be used instead.
357 + 0|off : disable slab object sanitization
358 + 1|fast: enable slab object sanitization excluding
359 + whitelisted slabs (default)
360 + full : sanitize all slabs, even the whitelisted ones
362 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
364 + pax_extra_latent_entropy
365 + Enable a very simple form of latent entropy extraction
366 + from the first 4GB of memory as the bootmem allocator
367 + passes the memory pages to the buddy allocator.
369 + pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370 + when the processor supports PCID.
375 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
376 index 6fccb69..60c7c7a 100644
377 --- a/Documentation/sysctl/kernel.txt
378 +++ b/Documentation/sysctl/kernel.txt
379 @@ -41,6 +41,7 @@ show up in /proc/sys/kernel:
381 - kstack_depth_to_print [ X86 only ]
383 +- modify_ldt [ X86 only ]
384 - modprobe ==> Documentation/debugging-modules.txt
386 - msg_next_id [ sysv ipc ]
387 @@ -391,6 +392,20 @@ This flag controls the L2 cache of G3 processor boards. If
389 ==============================================================
391 +modify_ldt: (X86 only)
393 +Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
394 +(Local Descriptor Table) may be needed to run a 16-bit or segmented code
395 +such as Dosemu or Wine. This is done via a system call which is not needed
396 +to run portable applications, and which can sometimes be abused to exploit
397 +some weaknesses of the architecture, opening new vulnerabilities.
399 +This sysctl allows one to increase the system's security by disabling the
400 +system call, or to restore compatibility with specific applications when it
401 +was already disabled.
403 +==============================================================
407 A toggle value indicating if modules are allowed to be loaded
408 diff --git a/Makefile b/Makefile
409 index 9ef3739..20b7716 100644
412 @@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
415 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
417 +HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
418 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
419 +HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
421 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
422 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
423 @@ -434,8 +436,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
424 # Rules shared between *config targets and build targets
426 # Basic helpers built in scripts/
427 -PHONY += scripts_basic
429 +PHONY += scripts_basic gcc-plugins
430 +scripts_basic: gcc-plugins
431 $(Q)$(MAKE) $(build)=scripts/basic
432 $(Q)rm -f .tmp_quiet_recordmcount
434 @@ -615,6 +617,74 @@ endif
435 # Tell gcc to never replace conditional load with a non-conditional one
436 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
438 +ifndef DISABLE_PAX_PLUGINS
439 +ifeq ($(call cc-ifversion, -ge, 0408, y), y)
440 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
442 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
444 +ifneq ($(PLUGINCC),)
445 +ifdef CONFIG_PAX_CONSTIFY_PLUGIN
446 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
448 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
449 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
450 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
452 +ifdef CONFIG_KALLOCSTAT_PLUGIN
453 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
455 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
456 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
457 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
458 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
460 +ifdef CONFIG_GRKERNSEC_RANDSTRUCT
461 +RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
462 +ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
463 +RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
466 +ifdef CONFIG_CHECKER_PLUGIN
467 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
468 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
471 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
472 +ifdef CONFIG_PAX_SIZE_OVERFLOW
473 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
475 +ifdef CONFIG_PAX_LATENT_ENTROPY
476 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
478 +ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
479 +STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
481 +INITIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/initify_plugin.so -DINITIFY_PLUGIN
482 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
483 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
484 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
485 +GCC_PLUGINS_CFLAGS += $(INITIFY_PLUGIN_CFLAGS)
486 +GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
487 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
488 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
489 +ifeq ($(KBUILD_EXTMOD),)
491 + $(Q)$(MAKE) $(build)=tools/gcc
497 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
498 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
500 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
502 + $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
506 ifdef CONFIG_READABLE_ASM
507 # Disable optimizations that make assembler listings hard to read.
508 # reorder blocks reorders the control in the function
509 @@ -714,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
513 -KBUILD_AFLAGS += -Wa,-gdwarf-2
514 +KBUILD_AFLAGS += -Wa,--gdwarf-2
516 ifdef CONFIG_DEBUG_INFO_DWARF4
517 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
518 @@ -886,7 +956,7 @@ export mod_sign_cmd
521 ifeq ($(KBUILD_EXTMOD),)
522 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
523 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
525 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
526 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
527 @@ -936,6 +1006,8 @@ endif
529 # The actual objects are generated when descending,
530 # make sure no implicit rule kicks in
531 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
532 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
533 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
535 # Handle descending into subdirectories listed in $(vmlinux-dirs)
536 @@ -945,7 +1017,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
537 # Error messages still appears in the original language
539 PHONY += $(vmlinux-dirs)
540 -$(vmlinux-dirs): prepare scripts
541 +$(vmlinux-dirs): gcc-plugins prepare scripts
542 $(Q)$(MAKE) $(build)=$@
544 define filechk_kernel.release
545 @@ -988,10 +1060,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
547 archprepare: archheaders archscripts prepare1 scripts_basic
549 +prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
550 +prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
551 prepare0: archprepare FORCE
552 $(Q)$(MAKE) $(build)=.
554 # All the preparing..
555 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
558 # Generate some files
559 @@ -1099,6 +1174,8 @@ all: modules
560 # using awk while concatenating to the final file.
563 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
564 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
565 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
566 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
567 @$(kecho) ' Building modules, stage 2.';
568 @@ -1114,7 +1191,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
570 # Target to prepare building external modules
571 PHONY += modules_prepare
572 -modules_prepare: prepare scripts
573 +modules_prepare: gcc-plugins prepare scripts
575 # Target to install modules
576 PHONY += modules_install
577 @@ -1180,7 +1257,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
578 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
579 signing_key.priv signing_key.x509 x509.genkey \
580 extra_certificates signing_key.x509.keyid \
581 - signing_key.x509.signer vmlinux-gdb.py
582 + signing_key.x509.signer vmlinux-gdb.py \
583 + tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
584 + tools/gcc/size_overflow_plugin/size_overflow_hash.h \
585 + tools/gcc/randomize_layout_seed.h
587 # clean - Delete most, but leave enough to build external modules
589 @@ -1219,7 +1299,7 @@ distclean: mrproper
590 @find $(srctree) $(RCS_FIND_IGNORE) \
591 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
592 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
593 - -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
594 + -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
595 -type f -print | xargs rm -f
598 @@ -1385,6 +1465,8 @@ PHONY += $(module-dirs) modules
599 $(module-dirs): crmodverdir $(objtree)/Module.symvers
600 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
602 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
603 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
604 modules: $(module-dirs)
605 @$(kecho) ' Building modules, stage 2.';
606 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
607 @@ -1525,17 +1607,21 @@ else
608 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
611 -%.s: %.c prepare scripts FORCE
612 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
613 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
614 +%.s: %.c gcc-plugins prepare scripts FORCE
615 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
616 %.i: %.c prepare scripts FORCE
617 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
618 -%.o: %.c prepare scripts FORCE
619 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
620 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
621 +%.o: %.c gcc-plugins prepare scripts FORCE
622 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
623 %.lst: %.c prepare scripts FORCE
624 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
625 -%.s: %.S prepare scripts FORCE
626 +%.s: %.S gcc-plugins prepare scripts FORCE
627 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
628 -%.o: %.S prepare scripts FORCE
629 +%.o: %.S gcc-plugins prepare scripts FORCE
630 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
631 %.symtypes: %.c prepare scripts FORCE
632 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
633 @@ -1547,11 +1633,15 @@ endif
634 $(build)=$(build-dir)
635 # Make sure the latest headers are built for Documentation
636 Documentation/: headers_install
637 -%/: prepare scripts FORCE
638 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
639 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
640 +%/: gcc-plugins prepare scripts FORCE
642 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
643 $(build)=$(build-dir)
644 -%.ko: prepare scripts FORCE
645 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
646 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
647 +%.ko: gcc-plugins prepare scripts FORCE
649 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
650 $(build)=$(build-dir) $(@:.ko=.o)
651 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
652 index 8f8eafb..3405f46 100644
653 --- a/arch/alpha/include/asm/atomic.h
654 +++ b/arch/alpha/include/asm/atomic.h
655 @@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
656 #define atomic_dec(v) atomic_sub(1,(v))
657 #define atomic64_dec(v) atomic64_sub(1,(v))
659 +#define atomic64_read_unchecked(v) atomic64_read(v)
660 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
661 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
662 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
663 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
664 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
665 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
666 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
667 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
669 #endif /* _ALPHA_ATOMIC_H */
670 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
671 index ad368a9..fbe0f25 100644
672 --- a/arch/alpha/include/asm/cache.h
673 +++ b/arch/alpha/include/asm/cache.h
675 #ifndef __ARCH_ALPHA_CACHE_H
676 #define __ARCH_ALPHA_CACHE_H
678 +#include <linux/const.h>
680 /* Bytes per L1 (data) cache line. */
681 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
682 -# define L1_CACHE_BYTES 64
683 # define L1_CACHE_SHIFT 6
685 /* Both EV4 and EV5 are write-through, read-allocate,
686 direct-mapped, physical.
688 -# define L1_CACHE_BYTES 32
689 # define L1_CACHE_SHIFT 5
692 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
693 #define SMP_CACHE_BYTES L1_CACHE_BYTES
696 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
697 index 968d999..d36b2df 100644
698 --- a/arch/alpha/include/asm/elf.h
699 +++ b/arch/alpha/include/asm/elf.h
700 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
702 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
704 +#ifdef CONFIG_PAX_ASLR
705 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
707 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
708 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
711 /* $0 is set by ld.so to a pointer to a function which might be
712 registered using atexit. This provides a mean for the dynamic
713 linker to call DT_FINI functions for shared libraries that have
714 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
715 index aab14a0..b4fa3e7 100644
716 --- a/arch/alpha/include/asm/pgalloc.h
717 +++ b/arch/alpha/include/asm/pgalloc.h
718 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
723 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
725 + pgd_populate(mm, pgd, pmd);
728 extern pgd_t *pgd_alloc(struct mm_struct *mm);
731 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
732 index a9a1195..e9b8417 100644
733 --- a/arch/alpha/include/asm/pgtable.h
734 +++ b/arch/alpha/include/asm/pgtable.h
735 @@ -101,6 +101,17 @@ struct vm_area_struct;
736 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
737 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
738 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
740 +#ifdef CONFIG_PAX_PAGEEXEC
741 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
742 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
743 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
745 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
746 +# define PAGE_COPY_NOEXEC PAGE_COPY
747 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
750 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
752 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
753 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
754 index 2fd00b7..cfd5069 100644
755 --- a/arch/alpha/kernel/module.c
756 +++ b/arch/alpha/kernel/module.c
757 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
759 /* The small sections were sorted to the end of the segment.
760 The following should definitely cover them. */
761 - gp = (u64)me->module_core + me->core_size - 0x8000;
762 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
763 got = sechdrs[me->arch.gotsecindex].sh_addr;
765 for (i = 0; i < n; i++) {
766 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
767 index 36dc91a..6769cb0 100644
768 --- a/arch/alpha/kernel/osf_sys.c
769 +++ b/arch/alpha/kernel/osf_sys.c
770 @@ -1295,10 +1295,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
771 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
774 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
775 - unsigned long limit)
776 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
777 + unsigned long limit, unsigned long flags)
779 struct vm_unmapped_area_info info;
780 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
784 @@ -1306,6 +1307,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
785 info.high_limit = limit;
787 info.align_offset = 0;
788 + info.threadstack_offset = offset;
789 return vm_unmapped_area(&info);
792 @@ -1338,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
793 merely specific addresses, but regions of memory -- perhaps
794 this feature should be incorporated into all ports? */
796 +#ifdef CONFIG_PAX_RANDMMAP
797 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
801 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
802 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
803 if (addr != (unsigned long) -ENOMEM)
807 /* Next, try allocating at TASK_UNMAPPED_BASE. */
808 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
810 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
812 if (addr != (unsigned long) -ENOMEM)
815 /* Finally, try allocating in low memory. */
816 - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
817 + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
821 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
822 index 4a905bd..0a4da53 100644
823 --- a/arch/alpha/mm/fault.c
824 +++ b/arch/alpha/mm/fault.c
825 @@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
826 __reload_thread(pcb);
829 +#ifdef CONFIG_PAX_PAGEEXEC
831 + * PaX: decide what to do with offenders (regs->pc = fault address)
833 + * returns 1 when task should be killed
834 + * 2 when patched PLT trampoline was detected
835 + * 3 when unpatched PLT trampoline was detected
837 +static int pax_handle_fetch_fault(struct pt_regs *regs)
840 +#ifdef CONFIG_PAX_EMUPLT
843 + do { /* PaX: patched PLT emulation #1 */
844 + unsigned int ldah, ldq, jmp;
846 + err = get_user(ldah, (unsigned int *)regs->pc);
847 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
848 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
853 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
854 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
855 + jmp == 0x6BFB0000U)
857 + unsigned long r27, addr;
858 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
859 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
861 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
862 + err = get_user(r27, (unsigned long *)addr);
872 + do { /* PaX: patched PLT emulation #2 */
873 + unsigned int ldah, lda, br;
875 + err = get_user(ldah, (unsigned int *)regs->pc);
876 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
877 + err |= get_user(br, (unsigned int *)(regs->pc+8));
882 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
883 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
884 + (br & 0xFFE00000U) == 0xC3E00000U)
886 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
887 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
888 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
890 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
891 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
896 + do { /* PaX: unpatched PLT emulation */
899 + err = get_user(br, (unsigned int *)regs->pc);
901 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
902 + unsigned int br2, ldq, nop, jmp;
903 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
905 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
906 + err = get_user(br2, (unsigned int *)addr);
907 + err |= get_user(ldq, (unsigned int *)(addr+4));
908 + err |= get_user(nop, (unsigned int *)(addr+8));
909 + err |= get_user(jmp, (unsigned int *)(addr+12));
910 + err |= get_user(resolver, (unsigned long *)(addr+16));
915 + if (br2 == 0xC3600000U &&
916 + ldq == 0xA77B000CU &&
917 + nop == 0x47FF041FU &&
918 + jmp == 0x6B7B0000U)
920 + regs->r28 = regs->pc+4;
921 + regs->r27 = addr+16;
922 + regs->pc = resolver;
932 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
936 + printk(KERN_ERR "PAX: bytes at PC: ");
937 + for (i = 0; i < 5; i++) {
939 + if (get_user(c, (unsigned int *)pc+i))
940 + printk(KERN_CONT "???????? ");
942 + printk(KERN_CONT "%08x ", c);
949 * This routine handles page faults. It determines the address,
950 @@ -132,8 +250,29 @@ retry:
952 si_code = SEGV_ACCERR;
954 - if (!(vma->vm_flags & VM_EXEC))
955 + if (!(vma->vm_flags & VM_EXEC)) {
957 +#ifdef CONFIG_PAX_PAGEEXEC
958 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
961 + up_read(&mm->mmap_sem);
962 + switch (pax_handle_fetch_fault(regs)) {
964 +#ifdef CONFIG_PAX_EMUPLT
971 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
972 + do_group_exit(SIGKILL);
979 /* Allow reads even for write-only mappings */
980 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
981 diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
982 index bd4670d..920c97a 100644
983 --- a/arch/arc/Kconfig
984 +++ b/arch/arc/Kconfig
985 @@ -485,6 +485,7 @@ config ARC_DBG_TLB_MISS_COUNT
986 bool "Profile TLB Misses"
989 + depends on !GRKERNSEC_KMEM
991 Counts number of I and D TLB Misses and exports them via Debugfs
992 The counters can be cleared via Debugfs as well
993 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
994 index ede2526..9e12300 100644
995 --- a/arch/arm/Kconfig
996 +++ b/arch/arm/Kconfig
997 @@ -1770,7 +1770,7 @@ config ALIGNMENT_TRAP
999 config UACCESS_WITH_MEMCPY
1000 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
1002 + depends on MMU && !PAX_MEMORY_UDEREF
1003 default y if CPU_FEROCEON
1005 Implement faster copy_to_user and clear_user methods for CPU
1006 @@ -2006,6 +2006,7 @@ config KEXEC
1007 bool "Kexec system call (EXPERIMENTAL)"
1008 depends on (!SMP || PM_SLEEP_SMP)
1010 + depends on !GRKERNSEC_KMEM
1012 kexec is a system call that implements the ability to shutdown your
1013 current kernel, and to start another kernel. It is like a reboot
1014 diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
1015 index a2e16f9..b26e911 100644
1016 --- a/arch/arm/Kconfig.debug
1017 +++ b/arch/arm/Kconfig.debug
1018 @@ -7,6 +7,7 @@ config ARM_PTDUMP
1019 depends on DEBUG_KERNEL
1022 + depends on !GRKERNSEC_KMEM
1024 Say Y here if you want to show the kernel pagetable layout in a
1025 debugfs file. This information is only useful for kernel developers
1026 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1027 index e22c119..abe7041 100644
1028 --- a/arch/arm/include/asm/atomic.h
1029 +++ b/arch/arm/include/asm/atomic.h
1031 #include <asm/barrier.h>
1032 #include <asm/cmpxchg.h>
1034 +#ifdef CONFIG_GENERIC_ATOMIC64
1035 +#include <asm-generic/atomic64.h>
1038 #define ATOMIC_INIT(i) { (i) }
1042 +#ifdef CONFIG_THUMB2_KERNEL
1043 +#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
1045 +#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
1048 +#define _ASM_EXTABLE(from, to) \
1049 +" .pushsection __ex_table,\"a\"\n"\
1051 +" .long " #from ", " #to"\n" \
1055 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1056 * strex/ldrex monitor on some implementations. The reason we can use it for
1057 * atomic_set() is the clrex or dummy strex done on every exception return.
1059 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1060 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1062 + return ACCESS_ONCE(v->counter);
1064 #define atomic_set(v,i) (((v)->counter) = (i))
1065 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1070 #if __LINUX_ARM_ARCH__ >= 6
1073 * to ensure that the update happens.
1076 -#define ATOMIC_OP(op, c_op, asm_op) \
1077 -static inline void atomic_##op(int i, atomic_t *v) \
1078 +#ifdef CONFIG_PAX_REFCOUNT
1079 +#define __OVERFLOW_POST \
1081 + "2: " REFCOUNT_TRAP_INSN "\n"\
1083 +#define __OVERFLOW_POST_RETURN \
1086 + "2: " REFCOUNT_TRAP_INSN "\n"\
1088 +#define __OVERFLOW_EXTABLE \
1090 + _ASM_EXTABLE(2b, 4b)
1092 +#define __OVERFLOW_POST
1093 +#define __OVERFLOW_POST_RETURN
1094 +#define __OVERFLOW_EXTABLE
1097 +#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1098 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1100 unsigned long tmp; \
1103 prefetchw(&v->counter); \
1104 - __asm__ __volatile__("@ atomic_" #op "\n" \
1105 + __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1106 "1: ldrex %0, [%3]\n" \
1107 " " #asm_op " %0, %0, %4\n" \
1109 " strex %1, %0, [%3]\n" \
1114 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1115 : "r" (&v->counter), "Ir" (i) \
1119 -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1120 -static inline int atomic_##op##_return(int i, atomic_t *v) \
1121 +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1122 + __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1124 +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1125 +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1127 unsigned long tmp; \
1129 @@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1131 prefetchw(&v->counter); \
1133 - __asm__ __volatile__("@ atomic_" #op "_return\n" \
1134 + __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1135 "1: ldrex %0, [%3]\n" \
1136 " " #asm_op " %0, %0, %4\n" \
1138 " strex %1, %0, [%3]\n" \
1143 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1144 : "r" (&v->counter), "Ir" (i) \
1146 @@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1150 +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1151 + __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1153 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1156 @@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1157 __asm__ __volatile__ ("@ atomic_add_unless\n"
1158 "1: ldrex %0, [%4]\n"
1161 -" add %1, %0, %6\n"
1163 +" adds %1, %0, %6\n"
1165 +#ifdef CONFIG_PAX_REFCOUNT
1167 +"2: " REFCOUNT_TRAP_INSN "\n"
1171 " strex %2, %1, [%4]\n"
1177 +#ifdef CONFIG_PAX_REFCOUNT
1178 + _ASM_EXTABLE(2b, 4b)
1181 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1182 : "r" (&v->counter), "r" (u), "r" (a)
1184 @@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1188 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1190 + unsigned long oldval, res;
1195 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1196 + "ldrex %1, [%3]\n"
1199 + "strexeq %0, %5, [%3]\n"
1200 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1201 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1210 #else /* ARM_ARCH_6 */
1213 #error SMP not supported on pre-ARMv6 CPUs
1216 -#define ATOMIC_OP(op, c_op, asm_op) \
1217 -static inline void atomic_##op(int i, atomic_t *v) \
1218 +#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1219 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1221 unsigned long flags; \
1223 @@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1224 raw_local_irq_restore(flags); \
1227 -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1228 -static inline int atomic_##op##_return(int i, atomic_t *v) \
1229 +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1230 + __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1232 +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1233 +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1235 unsigned long flags; \
1237 @@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1241 +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1242 + __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1244 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1247 @@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1251 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1253 + return atomic_cmpxchg((atomic_t *)v, old, new);
1256 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1259 @@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1262 #undef ATOMIC_OP_RETURN
1263 +#undef __ATOMIC_OP_RETURN
1267 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1268 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1270 + return xchg(&v->counter, new);
1273 #define atomic_inc(v) atomic_add(1, v)
1274 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1276 + atomic_add_unchecked(1, v);
1278 #define atomic_dec(v) atomic_sub(1, v)
1279 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1281 + atomic_sub_unchecked(1, v);
1284 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1285 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1287 + return atomic_add_return_unchecked(1, v) == 0;
1289 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1290 #define atomic_inc_return(v) (atomic_add_return(1, v))
1291 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1293 + return atomic_add_return_unchecked(1, v);
1295 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1296 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1298 @@ -216,6 +336,14 @@ typedef struct {
1302 +#ifdef CONFIG_PAX_REFCOUNT
1304 + long long counter;
1305 +} atomic64_unchecked_t;
1307 +typedef atomic64_t atomic64_unchecked_t;
1310 #define ATOMIC64_INIT(i) { (i) }
1312 #ifdef CONFIG_ARM_LPAE
1313 @@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1317 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1321 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1322 +" ldrd %0, %H0, [%1]"
1324 + : "r" (&v->counter), "Qo" (v->counter)
1330 static inline void atomic64_set(atomic64_t *v, long long i)
1332 __asm__ __volatile__("@ atomic64_set\n"
1333 @@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1334 : "r" (&v->counter), "r" (i)
1338 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1340 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1341 +" strd %2, %H2, [%1]"
1342 + : "=Qo" (v->counter)
1343 + : "r" (&v->counter), "r" (i)
1347 static inline long long atomic64_read(const atomic64_t *v)
1349 @@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1353 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1357 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1358 +" ldrexd %0, %H0, [%1]"
1360 + : "r" (&v->counter), "Qo" (v->counter)
1366 static inline void atomic64_set(atomic64_t *v, long long i)
1369 @@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1370 : "r" (&v->counter), "r" (i)
1374 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1378 + prefetchw(&v->counter);
1379 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1380 +"1: ldrexd %0, %H0, [%2]\n"
1381 +" strexd %0, %3, %H3, [%2]\n"
1384 + : "=&r" (tmp), "=Qo" (v->counter)
1385 + : "r" (&v->counter), "r" (i)
1390 -#define ATOMIC64_OP(op, op1, op2) \
1391 -static inline void atomic64_##op(long long i, atomic64_t *v) \
1392 +#undef __OVERFLOW_POST_RETURN
1393 +#define __OVERFLOW_POST_RETURN \
1396 +" mov %H0, %H1\n" \
1397 + "2: " REFCOUNT_TRAP_INSN "\n"\
1400 +#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1401 +static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1404 unsigned long tmp; \
1406 prefetchw(&v->counter); \
1407 - __asm__ __volatile__("@ atomic64_" #op "\n" \
1408 + __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1409 "1: ldrexd %0, %H0, [%3]\n" \
1410 " " #op1 " %Q0, %Q0, %Q4\n" \
1411 " " #op2 " %R0, %R0, %R4\n" \
1413 " strexd %1, %0, %H0, [%3]\n" \
1418 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1419 : "r" (&v->counter), "r" (i) \
1423 -#define ATOMIC64_OP_RETURN(op, op1, op2) \
1424 -static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1425 +#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1426 + __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1428 +#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1429 +static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1432 unsigned long tmp; \
1433 @@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1435 prefetchw(&v->counter); \
1437 - __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1438 + __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1439 "1: ldrexd %0, %H0, [%3]\n" \
1440 " " #op1 " %Q0, %Q0, %Q4\n" \
1441 " " #op2 " %R0, %R0, %R4\n" \
1443 " strexd %1, %0, %H0, [%3]\n" \
1448 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1449 : "r" (&v->counter), "r" (i) \
1451 @@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1455 +#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1456 + __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1458 #define ATOMIC64_OPS(op, op1, op2) \
1459 ATOMIC64_OP(op, op1, op2) \
1460 ATOMIC64_OP_RETURN(op, op1, op2)
1461 @@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1464 #undef ATOMIC64_OP_RETURN
1465 +#undef __ATOMIC64_OP_RETURN
1467 +#undef __ATOMIC64_OP
1468 +#undef __OVERFLOW_EXTABLE
1469 +#undef __OVERFLOW_POST_RETURN
1470 +#undef __OVERFLOW_POST
1472 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1474 @@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1478 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1482 + unsigned long res;
1487 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1488 + "ldrexd %1, %H1, [%3]\n"
1491 + "teqeq %H1, %H4\n"
1492 + "strexdeq %0, %5, %H5, [%3]"
1493 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1494 + : "r" (&ptr->counter), "r" (old), "r" (new)
1503 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1506 @@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1507 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1510 - unsigned long tmp;
1514 prefetchw(&v->counter);
1516 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1517 -"1: ldrexd %0, %H0, [%3]\n"
1518 -" subs %Q0, %Q0, #1\n"
1519 -" sbc %R0, %R0, #0\n"
1520 +"1: ldrexd %1, %H1, [%3]\n"
1521 +" subs %Q0, %Q1, #1\n"
1522 +" sbcs %R0, %R1, #0\n"
1524 +#ifdef CONFIG_PAX_REFCOUNT
1528 +"2: " REFCOUNT_TRAP_INSN "\n"
1535 " strexd %1, %0, %H0, [%3]\n"
1541 +#ifdef CONFIG_PAX_REFCOUNT
1542 + _ASM_EXTABLE(2b, 4b)
1545 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1548 @@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1554 " adds %Q0, %Q0, %Q6\n"
1555 -" adc %R0, %R0, %R6\n"
1556 +" adcs %R0, %R0, %R6\n"
1558 +#ifdef CONFIG_PAX_REFCOUNT
1560 +"2: " REFCOUNT_TRAP_INSN "\n"
1564 " strexd %2, %0, %H0, [%4]\n"
1570 +#ifdef CONFIG_PAX_REFCOUNT
1571 + _ASM_EXTABLE(2b, 4b)
1574 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1575 : "r" (&v->counter), "r" (u), "r" (a)
1577 @@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1579 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1580 #define atomic64_inc(v) atomic64_add(1LL, (v))
1581 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1582 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1583 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1584 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1585 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1586 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1587 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1588 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1589 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1590 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1591 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1592 index 6c2327e..85beac4 100644
1593 --- a/arch/arm/include/asm/barrier.h
1594 +++ b/arch/arm/include/asm/barrier.h
1597 compiletime_assert_atomic_type(*p); \
1599 - ACCESS_ONCE(*p) = (v); \
1600 + ACCESS_ONCE_RW(*p) = (v); \
1603 #define smp_load_acquire(p) \
1604 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1605 index 75fe66b..ba3dee4 100644
1606 --- a/arch/arm/include/asm/cache.h
1607 +++ b/arch/arm/include/asm/cache.h
1609 #ifndef __ASMARM_CACHE_H
1610 #define __ASMARM_CACHE_H
1612 +#include <linux/const.h>
1614 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1615 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1616 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1619 * Memory returned by kmalloc() may be used for DMA, so we must make
1623 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1624 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1627 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1628 index 4812cda..9da8116 100644
1629 --- a/arch/arm/include/asm/cacheflush.h
1630 +++ b/arch/arm/include/asm/cacheflush.h
1631 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1632 void (*dma_unmap_area)(const void *, size_t, int);
1634 void (*dma_flush_range)(const void *, const void *);
1639 * Select the calling method
1640 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1641 index 5233151..87a71fa 100644
1642 --- a/arch/arm/include/asm/checksum.h
1643 +++ b/arch/arm/include/asm/checksum.h
1644 @@ -37,7 +37,19 @@ __wsum
1645 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1648 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1649 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1651 +static inline __wsum
1652 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1655 + pax_open_userland();
1656 + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1657 + pax_close_userland();
1664 * Fold a partial checksum without adding pseudo headers
1665 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1666 index 1692a05..1835802 100644
1667 --- a/arch/arm/include/asm/cmpxchg.h
1668 +++ b/arch/arm/include/asm/cmpxchg.h
1669 @@ -107,6 +107,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1670 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
1673 +#define xchg_unchecked(ptr, x) ({ \
1674 + (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
1675 + sizeof(*(ptr))); \
1678 #include <asm-generic/cmpxchg-local.h>
1680 diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
1681 index 0f84249..8e83c55 100644
1682 --- a/arch/arm/include/asm/cpuidle.h
1683 +++ b/arch/arm/include/asm/cpuidle.h
1684 @@ -32,7 +32,7 @@ struct device_node;
1685 struct cpuidle_ops {
1686 int (*suspend)(int cpu, unsigned long arg);
1687 int (*init)(struct device_node *, int cpu);
1691 struct of_cpuidle_method {
1693 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1694 index 6ddbe44..b5e38b1a 100644
1695 --- a/arch/arm/include/asm/domain.h
1696 +++ b/arch/arm/include/asm/domain.h
1700 #define DOMAIN_NOACCESS 0
1701 -#define DOMAIN_CLIENT 1
1702 #ifdef CONFIG_CPU_USE_DOMAINS
1703 +#define DOMAIN_USERCLIENT 1
1704 +#define DOMAIN_KERNELCLIENT 1
1705 #define DOMAIN_MANAGER 3
1706 +#define DOMAIN_VECTORS DOMAIN_USER
1709 +#ifdef CONFIG_PAX_KERNEXEC
1710 #define DOMAIN_MANAGER 1
1711 +#define DOMAIN_KERNEXEC 3
1713 +#define DOMAIN_MANAGER 1
1716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1717 +#define DOMAIN_USERCLIENT 0
1718 +#define DOMAIN_UDEREF 1
1719 +#define DOMAIN_VECTORS DOMAIN_KERNEL
1721 +#define DOMAIN_USERCLIENT 1
1722 +#define DOMAIN_VECTORS DOMAIN_USER
1724 +#define DOMAIN_KERNELCLIENT 1
1728 #define domain_val(dom,type) ((type) << (2*(dom)))
1730 #ifndef __ASSEMBLY__
1732 -#ifdef CONFIG_CPU_USE_DOMAINS
1733 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1734 static inline void set_domain(unsigned val)
1737 @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1741 -#define modify_domain(dom,type) \
1743 - struct thread_info *thread = current_thread_info(); \
1744 - unsigned int domain = thread->cpu_domain; \
1745 - domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1746 - thread->cpu_domain = domain | domain_val(dom, type); \
1747 - set_domain(thread->cpu_domain); \
1750 +extern void modify_domain(unsigned int dom, unsigned int type);
1752 static inline void set_domain(unsigned val) { }
1753 static inline void modify_domain(unsigned dom, unsigned type) { }
1754 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1755 index d2315ff..f60b47b 100644
1756 --- a/arch/arm/include/asm/elf.h
1757 +++ b/arch/arm/include/asm/elf.h
1758 @@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1759 the loader. We need to make sure that it is out of the way of the program
1760 that it will "exec", and that there is sufficient room for the brk. */
1762 -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1763 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1765 +#ifdef CONFIG_PAX_ASLR
1766 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1768 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1769 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1772 /* When the program starts, a1 contains a pointer to a function to be
1773 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1774 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1775 index de53547..52b9a28 100644
1776 --- a/arch/arm/include/asm/fncpy.h
1777 +++ b/arch/arm/include/asm/fncpy.h
1779 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1780 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1782 + pax_open_kernel(); \
1783 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1784 + pax_close_kernel(); \
1785 flush_icache_range((unsigned long)(dest_buf), \
1786 (unsigned long)(dest_buf) + (size)); \
1788 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1789 index 5eed828..365e018 100644
1790 --- a/arch/arm/include/asm/futex.h
1791 +++ b/arch/arm/include/asm/futex.h
1792 @@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1793 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1796 + pax_open_userland();
1799 /* Prefetching cannot fault */
1801 @@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1805 + pax_close_userland();
1810 @@ -94,6 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1814 + pax_open_userland();
1816 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1817 "1: " TUSER(ldr) " %1, [%4]\n"
1819 @@ -104,6 +110,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1820 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1823 + pax_close_userland();
1828 @@ -131,6 +139,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1831 pagefault_disable();
1832 + pax_open_userland();
1836 @@ -152,6 +161,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1840 + pax_close_userland();
1844 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1845 index 83eb2f7..ed77159 100644
1846 --- a/arch/arm/include/asm/kmap_types.h
1847 +++ b/arch/arm/include/asm/kmap_types.h
1850 * This is the "bare minimum". AIO seems to require this.
1852 -#define KM_TYPE_NR 16
1853 +#define KM_TYPE_NR 17
1856 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1857 index 9e614a1..3302cca 100644
1858 --- a/arch/arm/include/asm/mach/dma.h
1859 +++ b/arch/arm/include/asm/mach/dma.h
1860 @@ -22,7 +22,7 @@ struct dma_ops {
1861 int (*residue)(unsigned int, dma_t *); /* optional */
1862 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1868 void *addr; /* single DMA address */
1869 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1870 index f98c7f3..e5c626d 100644
1871 --- a/arch/arm/include/asm/mach/map.h
1872 +++ b/arch/arm/include/asm/mach/map.h
1873 @@ -23,17 +23,19 @@ struct map_desc {
1875 /* types 0-3 are defined in asm/io.h */
1880 + MT_UNCACHED_RW = 4,
1889 - MT_MEMORY_RWX_NONCACHED,
1892 + MT_MEMORY_RW_NONCACHED,
1893 + MT_MEMORY_RX_NONCACHED,
1895 - MT_MEMORY_RWX_ITCM,
1896 + MT_MEMORY_RX_ITCM,
1898 MT_MEMORY_DMA_READY,
1900 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1901 index 563b92f..689d58e 100644
1902 --- a/arch/arm/include/asm/outercache.h
1903 +++ b/arch/arm/include/asm/outercache.h
1904 @@ -39,7 +39,7 @@ struct outer_cache_fns {
1905 /* This is an ARM L2C thing */
1906 void (*write_sec)(unsigned long, unsigned);
1907 void (*configure)(const struct l2x0_regs *);
1911 extern struct outer_cache_fns outer_cache;
1913 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1914 index 4355f0e..cd9168e 100644
1915 --- a/arch/arm/include/asm/page.h
1916 +++ b/arch/arm/include/asm/page.h
1921 +#include <linux/compiler.h>
1922 #include <asm/glue.h>
1925 @@ -114,7 +115,7 @@ struct cpu_user_fns {
1926 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1927 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1928 unsigned long vaddr, struct vm_area_struct *vma);
1933 extern struct cpu_user_fns cpu_user;
1934 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1935 index 19cfab5..3f5c7e9 100644
1936 --- a/arch/arm/include/asm/pgalloc.h
1937 +++ b/arch/arm/include/asm/pgalloc.h
1939 #include <asm/processor.h>
1940 #include <asm/cacheflush.h>
1941 #include <asm/tlbflush.h>
1942 +#include <asm/system_info.h>
1944 #define check_pgt_cache() do { } while (0)
1946 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1947 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1950 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1952 + pud_populate(mm, pud, pmd);
1955 #else /* !CONFIG_ARM_LPAE */
1958 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1959 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1960 #define pmd_free(mm, pmd) do { } while (0)
1961 #define pud_populate(mm,pmd,pte) BUG()
1962 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1964 #endif /* CONFIG_ARM_LPAE */
1966 @@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1970 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1972 +#ifdef CONFIG_ARM_LPAE
1973 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1975 + if (addr & SECTION_SIZE)
1976 + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1978 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1980 + flush_pmd_entry(pmdp);
1983 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1986 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1987 index 5e68278..1869bae 100644
1988 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1989 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1994 -#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1995 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1996 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1997 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1998 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2000 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2001 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2002 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2003 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2005 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2006 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2008 * - extended small page/tiny page
2010 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2011 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2012 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2013 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2014 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2015 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2016 index aeddd28..207745c 100644
2017 --- a/arch/arm/include/asm/pgtable-2level.h
2018 +++ b/arch/arm/include/asm/pgtable-2level.h
2020 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2021 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2023 +/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2024 +#define L_PTE_PXN (_AT(pteval_t, 0))
2027 * These are the memory types, defined to be compatible with
2028 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
2029 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2030 index a745a2a..481350a 100644
2031 --- a/arch/arm/include/asm/pgtable-3level.h
2032 +++ b/arch/arm/include/asm/pgtable-3level.h
2034 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
2035 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2036 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2037 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2038 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2039 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
2040 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
2042 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
2043 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
2044 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
2045 +#define PMD_SECT_RDONLY PMD_SECT_AP2
2048 * To be used in assembly code with the upper page attributes.
2050 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
2051 #define L_PTE_XN_HIGH (1 << (54 - 32))
2052 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2054 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2055 index f403541..b10df68 100644
2056 --- a/arch/arm/include/asm/pgtable.h
2057 +++ b/arch/arm/include/asm/pgtable.h
2059 #include <asm/pgtable-2level.h>
2062 +#define ktla_ktva(addr) (addr)
2063 +#define ktva_ktla(addr) (addr)
2066 * Just any arbitrary offset to the start of the vmalloc VM area: the
2067 * current 8MB value just means that there will be a 8MB "hole" after the
2069 #define LIBRARY_TEXT_START 0x0c000000
2071 #ifndef __ASSEMBLY__
2072 +extern pteval_t __supported_pte_mask;
2073 +extern pmdval_t __supported_pmd_mask;
2075 extern void __pte_error(const char *file, int line, pte_t);
2076 extern void __pmd_error(const char *file, int line, pmd_t);
2077 extern void __pgd_error(const char *file, int line, pgd_t);
2078 @@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2079 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2080 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2082 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
2083 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2085 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2086 +#include <asm/domain.h>
2087 +#include <linux/thread_info.h>
2088 +#include <linux/preempt.h>
2090 +static inline int test_domain(int domain, int domaintype)
2092 + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2096 +#ifdef CONFIG_PAX_KERNEXEC
2097 +static inline unsigned long pax_open_kernel(void) {
2098 +#ifdef CONFIG_ARM_LPAE
2101 + preempt_disable();
2102 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2103 + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2108 +static inline unsigned long pax_close_kernel(void) {
2109 +#ifdef CONFIG_ARM_LPAE
2112 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2113 + /* DOMAIN_MANAGER = "client" under KERNEXEC */
2114 + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2115 + preempt_enable_no_resched();
2120 +static inline unsigned long pax_open_kernel(void) { return 0; }
2121 +static inline unsigned long pax_close_kernel(void) { return 0; }
2125 * This is the lowest virtual address we can permit any user space
2126 * mapping to be mapped at. This is particularly important for
2127 @@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2129 * The pgprot_* and protection_map entries will be fixed up in runtime
2130 * to include the cachable and bufferable bits based on memory policy,
2131 - * as well as any architecture dependent bits like global/ASID and SMP
2132 - * shared mapping bits.
2133 + * as well as any architecture dependent bits like global/ASID, PXN,
2134 + * and SMP shared mapping bits.
2136 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2138 @@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2139 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2141 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2142 - L_PTE_NONE | L_PTE_VALID;
2143 + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2144 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2147 diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2148 index c25ef3e..735f14b 100644
2149 --- a/arch/arm/include/asm/psci.h
2150 +++ b/arch/arm/include/asm/psci.h
2151 @@ -32,7 +32,7 @@ struct psci_operations {
2152 int (*affinity_info)(unsigned long target_affinity,
2153 unsigned long lowest_affinity_level);
2154 int (*migrate_info_type)(void);
2158 extern struct psci_operations psci_ops;
2159 extern struct smp_operations psci_smp_ops;
2160 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2161 index 2f3ac1b..67182ae0 100644
2162 --- a/arch/arm/include/asm/smp.h
2163 +++ b/arch/arm/include/asm/smp.h
2164 @@ -108,7 +108,7 @@ struct smp_operations {
2165 int (*cpu_disable)(unsigned int cpu);
2171 struct of_cpu_method {
2173 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2174 index bd32ede..bd90a0b 100644
2175 --- a/arch/arm/include/asm/thread_info.h
2176 +++ b/arch/arm/include/asm/thread_info.h
2177 @@ -74,9 +74,9 @@ struct thread_info {
2179 .preempt_count = INIT_PREEMPT_COUNT, \
2180 .addr_limit = KERNEL_DS, \
2181 - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2182 - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2183 - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2184 + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2185 + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2186 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2189 #define init_thread_info (init_thread_union.thread_info)
2190 @@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2191 #define TIF_SYSCALL_AUDIT 9
2192 #define TIF_SYSCALL_TRACEPOINT 10
2193 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2194 -#define TIF_NOHZ 12 /* in adaptive nohz mode */
2195 +/* within 8 bits of TIF_SYSCALL_TRACE
2196 + * to meet flexible second operand requirements
2198 +#define TIF_GRSEC_SETXID 12
2199 +#define TIF_NOHZ 13 /* in adaptive nohz mode */
2200 #define TIF_USING_IWMMXT 17
2201 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2202 #define TIF_RESTORE_SIGMASK 20
2203 @@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2204 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2205 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2206 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2207 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2209 /* Checks for any syscall work in entry-common.S */
2210 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2211 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2212 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2215 * Change these and you break ASM code in entry-common.S
2216 diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2217 index 5f833f7..76e6644 100644
2218 --- a/arch/arm/include/asm/tls.h
2219 +++ b/arch/arm/include/asm/tls.h
2222 #include <linux/compiler.h>
2223 #include <asm/thread_info.h>
2224 +#include <asm/pgtable.h>
2227 #include <asm/asm-offsets.h>
2228 @@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2229 * at 0xffff0fe0 must be used instead. (see
2230 * entry-armv.S for details)
2232 + pax_open_kernel();
2233 *((unsigned int *)0xffff0ff0) = val;
2234 + pax_close_kernel();
2238 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2239 index 74b17d0..7e6da4b 100644
2240 --- a/arch/arm/include/asm/uaccess.h
2241 +++ b/arch/arm/include/asm/uaccess.h
2243 #include <asm/domain.h>
2244 #include <asm/unified.h>
2245 #include <asm/compiler.h>
2246 +#include <asm/pgtable.h>
2248 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2249 #include <asm-generic/uaccess-unaligned.h>
2250 @@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2251 static inline void set_fs(mm_segment_t fs)
2253 current_thread_info()->addr_limit = fs;
2254 - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2255 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2258 #define segment_eq(a, b) ((a) == (b))
2260 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
2261 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2263 +static inline void pax_open_userland(void)
2266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2267 + if (segment_eq(get_fs(), USER_DS)) {
2268 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2269 + modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2275 +static inline void pax_close_userland(void)
2278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2279 + if (segment_eq(get_fs(), USER_DS)) {
2280 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2281 + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2287 #define __addr_ok(addr) ({ \
2288 unsigned long flag; \
2289 __asm__("cmp %2, %0; movlo %0, #0" \
2290 @@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2292 #define get_user(x, p) \
2296 - __get_user_check(x, p); \
2297 + pax_open_userland(); \
2298 + __e = __get_user_check((x), (p)); \
2299 + pax_close_userland(); \
2303 extern int __put_user_1(void *, unsigned int);
2304 @@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2306 #define put_user(x, p) \
2310 - __put_user_check(x, p); \
2311 + pax_open_userland(); \
2312 + __e = __put_user_check((x), (p)); \
2313 + pax_close_userland(); \
2317 #else /* CONFIG_MMU */
2318 @@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2320 #endif /* CONFIG_MMU */
2322 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2323 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2325 #define user_addr_max() \
2326 @@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2327 #define __get_user(x, ptr) \
2329 long __gu_err = 0; \
2330 + pax_open_userland(); \
2331 __get_user_err((x), (ptr), __gu_err); \
2332 + pax_close_userland(); \
2336 #define __get_user_error(x, ptr, err) \
2338 + pax_open_userland(); \
2339 __get_user_err((x), (ptr), err); \
2340 + pax_close_userland(); \
2344 @@ -368,13 +409,17 @@ do { \
2345 #define __put_user(x, ptr) \
2347 long __pu_err = 0; \
2348 + pax_open_userland(); \
2349 __put_user_err((x), (ptr), __pu_err); \
2350 + pax_close_userland(); \
2354 #define __put_user_error(x, ptr, err) \
2356 + pax_open_userland(); \
2357 __put_user_err((x), (ptr), err); \
2358 + pax_close_userland(); \
2362 @@ -474,11 +519,44 @@ do { \
2366 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2367 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2368 -extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2369 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2370 -extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2371 +extern unsigned long __must_check __size_overflow(3) ___copy_from_user(void *to, const void __user *from, unsigned long n);
2372 +extern unsigned long __must_check __size_overflow(3) ___copy_to_user(void __user *to, const void *from, unsigned long n);
2374 +static inline unsigned long __must_check __size_overflow(3) __copy_from_user(void *to, const void __user *from, unsigned long n)
2376 + unsigned long ret;
2378 + check_object_size(to, n, false);
2379 + pax_open_userland();
2380 + ret = ___copy_from_user(to, from, n);
2381 + pax_close_userland();
2385 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2387 + unsigned long ret;
2389 + check_object_size(from, n, true);
2390 + pax_open_userland();
2391 + ret = ___copy_to_user(to, from, n);
2392 + pax_close_userland();
2396 +extern unsigned long __must_check __size_overflow(3) __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2397 +extern unsigned long __must_check __size_overflow(2) ___clear_user(void __user *addr, unsigned long n);
2398 +extern unsigned long __must_check __size_overflow(2) __clear_user_std(void __user *addr, unsigned long n);
2400 +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2402 + unsigned long ret;
2403 + pax_open_userland();
2404 + ret = ___clear_user(addr, n);
2405 + pax_close_userland();
2410 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2411 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2412 @@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2414 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2419 if (access_ok(VERIFY_READ, from, n))
2420 n = __copy_from_user(to, from, n);
2421 else /* security hole - plug it */
2422 @@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2424 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2429 if (access_ok(VERIFY_WRITE, to, n))
2430 n = __copy_to_user(to, from, n);
2432 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2433 index 5af0ed1..cea83883 100644
2434 --- a/arch/arm/include/uapi/asm/ptrace.h
2435 +++ b/arch/arm/include/uapi/asm/ptrace.h
2437 * ARMv7 groups of PSR bits
2439 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2440 -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2441 +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2442 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2443 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2445 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2446 index 5e5a51a..b21eeef 100644
2447 --- a/arch/arm/kernel/armksyms.c
2448 +++ b/arch/arm/kernel/armksyms.c
2449 @@ -58,7 +58,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2452 EXPORT_SYMBOL(csum_partial);
2453 -EXPORT_SYMBOL(csum_partial_copy_from_user);
2454 +EXPORT_SYMBOL(__csum_partial_copy_from_user);
2455 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2456 EXPORT_SYMBOL(__csum_ipv6_magic);
2458 @@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
2460 EXPORT_SYMBOL(copy_page);
2462 -EXPORT_SYMBOL(__copy_from_user);
2463 -EXPORT_SYMBOL(__copy_to_user);
2464 -EXPORT_SYMBOL(__clear_user);
2465 +EXPORT_SYMBOL(___copy_from_user);
2466 +EXPORT_SYMBOL(___copy_to_user);
2467 +EXPORT_SYMBOL(___clear_user);
2469 EXPORT_SYMBOL(__get_user_1);
2470 EXPORT_SYMBOL(__get_user_2);
2471 diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
2472 index 318da33..373689f 100644
2473 --- a/arch/arm/kernel/cpuidle.c
2474 +++ b/arch/arm/kernel/cpuidle.c
2475 @@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
2476 static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
2477 __used __section(__cpuidle_method_of_table_end);
2479 -static struct cpuidle_ops cpuidle_ops[NR_CPUS];
2480 +static struct cpuidle_ops cpuidle_ops[NR_CPUS] __read_only;
2483 * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
2484 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2485 index cb4fb1e..dc7fcaf 100644
2486 --- a/arch/arm/kernel/entry-armv.S
2487 +++ b/arch/arm/kernel/entry-armv.S
2492 + .macro pax_enter_kernel
2493 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2494 + @ make aligned space for saved DACR
2497 + stmdb sp!, {r1, r2}
2498 + @ read DACR from cpu_domain into r1
2500 + @ assume 8K pages, since we have to split the immediate in two
2501 + bic r2, r2, #(0x1fc0)
2502 + bic r2, r2, #(0x3f)
2503 + ldr r1, [r2, #TI_CPU_DOMAIN]
2504 + @ store old DACR on stack
2506 +#ifdef CONFIG_PAX_KERNEXEC
2507 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2508 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2509 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2511 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2512 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2513 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2515 + @ write r1 to current_thread_info()->cpu_domain
2516 + str r1, [r2, #TI_CPU_DOMAIN]
2517 + @ write r1 to DACR
2518 + mcr p15, 0, r1, c3, c0, 0
2519 + @ instruction sync
2522 + ldmia sp!, {r1, r2}
2526 + .macro pax_open_userland
2527 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2529 + stmdb sp!, {r0, r1}
2530 + @ read DACR from cpu_domain into r1
2532 + @ assume 8K pages, since we have to split the immediate in two
2533 + bic r0, r0, #(0x1fc0)
2534 + bic r0, r0, #(0x3f)
2535 + ldr r1, [r0, #TI_CPU_DOMAIN]
2536 + @ set current DOMAIN_USER to DOMAIN_CLIENT
2537 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2538 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2539 + @ write r1 to current_thread_info()->cpu_domain
2540 + str r1, [r0, #TI_CPU_DOMAIN]
2541 + @ write r1 to DACR
2542 + mcr p15, 0, r1, c3, c0, 0
2543 + @ instruction sync
2546 + ldmia sp!, {r0, r1}
2550 + .macro pax_close_userland
2551 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2553 + stmdb sp!, {r0, r1}
2554 + @ read DACR from cpu_domain into r1
2556 + @ assume 8K pages, since we have to split the immediate in two
2557 + bic r0, r0, #(0x1fc0)
2558 + bic r0, r0, #(0x3f)
2559 + ldr r1, [r0, #TI_CPU_DOMAIN]
2560 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2561 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2562 + @ write r1 to current_thread_info()->cpu_domain
2563 + str r1, [r0, #TI_CPU_DOMAIN]
2564 + @ write r1 to DACR
2565 + mcr p15, 0, r1, c3, c0, 0
2566 + @ instruction sync
2569 + ldmia sp!, {r0, r1}
2574 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2576 @@ -92,11 +173,15 @@
2577 * Invalid mode handlers
2579 .macro inv_entry, reason
2583 sub sp, sp, #S_FRAME_SIZE
2584 ARM( stmib sp, {r1 - lr} )
2585 THUMB( stmia sp, {r0 - r12} )
2586 THUMB( str sp, [sp, #S_SP] )
2587 THUMB( str lr, [sp, #S_LR] )
2592 @@ -152,7 +237,11 @@ ENDPROC(__und_invalid)
2593 .macro svc_entry, stack_hole=0, trace=1
2595 UNWIND(.save {r0 - pc} )
2599 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2601 #ifdef CONFIG_THUMB2_KERNEL
2602 SPFIX( str r0, [sp] ) @ temporarily saved
2604 @@ -167,7 +256,12 @@ ENDPROC(__und_invalid)
2606 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2607 mov r6, #-1 @ "" "" "" ""
2608 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2609 + @ offset sp by 8 as done in pax_enter_kernel
2610 + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2612 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2614 SPFIX( addeq r2, r2, #4 )
2615 str r3, [sp, #-4]! @ save the "real" r0 copied
2616 @ from the exception stack
2617 @@ -371,6 +465,9 @@ ENDPROC(__fiq_abt)
2618 .macro usr_entry, trace=1
2620 UNWIND(.cantunwind ) @ don't unwind the user space
2622 + pax_enter_kernel_user
2624 sub sp, sp, #S_FRAME_SIZE
2625 ARM( stmib sp, {r1 - r12} )
2626 THUMB( stmia sp, {r0 - r12} )
2627 @@ -481,7 +578,9 @@ __und_usr:
2628 tst r3, #PSR_T_BIT @ Thumb mode?
2630 sub r4, r2, #4 @ ARM instr at LR - 4
2633 + pax_close_userland
2634 ARM_BE8(rev r0, r0) @ little endian instruction
2636 @ r0 = 32-bit ARM instruction which caused the exception
2637 @@ -515,11 +614,15 @@ __und_usr_thumb:
2643 + pax_close_userland
2644 ARM_BE8(rev16 r5, r5) @ little endian instruction
2645 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2646 blo __und_usr_fault_16 @ 16bit undefined instruction
2649 + pax_close_userland
2650 ARM_BE8(rev16 r0, r0) @ little endian instruction
2651 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2652 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2653 @@ -549,7 +652,8 @@ ENDPROC(__und_usr)
2655 .pushsection .text.fixup, "ax"
2657 -4: str r4, [sp, #S_PC] @ retry current instruction
2658 +4: pax_close_userland
2659 + str r4, [sp, #S_PC] @ retry current instruction
2662 .pushsection __ex_table,"a"
2663 @@ -769,7 +873,7 @@ ENTRY(__switch_to)
2664 THUMB( str lr, [ip], #4 )
2665 ldr r4, [r2, #TI_TP_VALUE]
2666 ldr r5, [r2, #TI_TP_VALUE + 4]
2667 -#ifdef CONFIG_CPU_USE_DOMAINS
2668 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2669 ldr r6, [r2, #TI_CPU_DOMAIN]
2671 switch_tls r1, r4, r5, r3, r7
2672 @@ -778,7 +882,7 @@ ENTRY(__switch_to)
2673 ldr r8, =__stack_chk_guard
2674 ldr r7, [r7, #TSK_STACK_CANARY]
2676 -#ifdef CONFIG_CPU_USE_DOMAINS
2677 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2678 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2681 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2682 index b48dd4f..9f9a72f 100644
2683 --- a/arch/arm/kernel/entry-common.S
2684 +++ b/arch/arm/kernel/entry-common.S
2686 #include <asm/assembler.h>
2687 #include <asm/unistd.h>
2688 #include <asm/ftrace.h>
2689 +#include <asm/domain.h>
2690 #include <asm/unwind.h>
2692 +#include "entry-header.S"
2694 #ifdef CONFIG_NEED_RET_TO_USER
2695 #include <mach/entry-macro.S>
2697 .macro arch_ret_to_user, tmp1, tmp2
2698 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2700 + stmdb sp!, {r1, r2}
2701 + @ read DACR from cpu_domain into r1
2703 + @ assume 8K pages, since we have to split the immediate in two
2704 + bic r2, r2, #(0x1fc0)
2705 + bic r2, r2, #(0x3f)
2706 + ldr r1, [r2, #TI_CPU_DOMAIN]
2707 +#ifdef CONFIG_PAX_KERNEXEC
2708 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2709 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2710 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2712 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2713 + @ set current DOMAIN_USER to DOMAIN_UDEREF
2714 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2715 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2717 + @ write r1 to current_thread_info()->cpu_domain
2718 + str r1, [r2, #TI_CPU_DOMAIN]
2719 + @ write r1 to DACR
2720 + mcr p15, 0, r1, c3, c0, 0
2721 + @ instruction sync
2724 + ldmia sp!, {r1, r2}
2729 -#include "entry-header.S"
2734 * This is the fast syscall return path. We do as little as
2735 @@ -174,6 +202,12 @@ ENTRY(vector_swi)
2736 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2740 + * do this here to avoid a performance hit of wrapping the code above
2741 + * that directly dereferences userland to parse the SWI instruction
2743 + pax_enter_kernel_user
2745 adr tbl, sys_call_table @ load syscall table pointer
2747 #if defined(CONFIG_OABI_COMPAT)
2748 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2749 index 1a0045a..9b4f34d 100644
2750 --- a/arch/arm/kernel/entry-header.S
2751 +++ b/arch/arm/kernel/entry-header.S
2752 @@ -196,6 +196,60 @@
2753 msr cpsr_c, \rtemp @ switch back to the SVC mode
2756 + .macro pax_enter_kernel_user
2757 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2759 + stmdb sp!, {r0, r1}
2760 + @ read DACR from cpu_domain into r1
2762 + @ assume 8K pages, since we have to split the immediate in two
2763 + bic r0, r0, #(0x1fc0)
2764 + bic r0, r0, #(0x3f)
2765 + ldr r1, [r0, #TI_CPU_DOMAIN]
2766 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2767 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2768 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2770 +#ifdef CONFIG_PAX_KERNEXEC
2771 + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2772 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2773 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2775 + @ write r1 to current_thread_info()->cpu_domain
2776 + str r1, [r0, #TI_CPU_DOMAIN]
2777 + @ write r1 to DACR
2778 + mcr p15, 0, r1, c3, c0, 0
2779 + @ instruction sync
2782 + ldmia sp!, {r0, r1}
2786 + .macro pax_exit_kernel
2787 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2789 + stmdb sp!, {r0, r1}
2790 + @ read old DACR from stack into r1
2791 + ldr r1, [sp, #(8 + S_SP)]
2795 + @ write r1 to current_thread_info()->cpu_domain
2797 + @ assume 8K pages, since we have to split the immediate in two
2798 + bic r0, r0, #(0x1fc0)
2799 + bic r0, r0, #(0x3f)
2800 + str r1, [r0, #TI_CPU_DOMAIN]
2801 + @ write r1 to DACR
2802 + mcr p15, 0, r1, c3, c0, 0
2803 + @ instruction sync
2806 + ldmia sp!, {r0, r1}
2810 #ifndef CONFIG_THUMB2_KERNEL
2811 .macro svc_exit, rpsr, irq = 0
2814 blne trace_hardirqs_off
2820 msr spsr_cxsf, \rpsr
2821 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2822 @ We must avoid clrex due to Cortex-A15 erratum #830321
2824 blne trace_hardirqs_off
2830 ldr lr, [sp, #S_SP] @ top of the stack
2831 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2833 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2834 index 059c3da..8e45cfc 100644
2835 --- a/arch/arm/kernel/fiq.c
2836 +++ b/arch/arm/kernel/fiq.c
2837 @@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2838 void *base = vectors_page;
2839 unsigned offset = FIQ_OFFSET;
2841 + pax_open_kernel();
2842 memcpy(base + offset, start, length);
2843 + pax_close_kernel();
2845 if (!cache_is_vipt_nonaliasing())
2846 flush_icache_range((unsigned long)base + offset, offset +
2848 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2849 index 29e2991..7bc5757 100644
2850 --- a/arch/arm/kernel/head.S
2851 +++ b/arch/arm/kernel/head.S
2852 @@ -467,7 +467,7 @@ __enable_mmu:
2853 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2854 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2855 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2856 - domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2857 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2858 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2859 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2861 diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
2862 index 097e2e2..3927085 100644
2863 --- a/arch/arm/kernel/module-plts.c
2864 +++ b/arch/arm/kernel/module-plts.c
2865 @@ -30,17 +30,12 @@ struct plt_entries {
2866 u32 lit[PLT_ENT_COUNT];
2869 -static bool in_init(const struct module *mod, u32 addr)
2871 - return addr - (u32)mod->module_init < mod->init_size;
2874 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
2876 struct plt_entries *plt, *plt_end;
2879 - if (in_init(mod, loc)) {
2880 + if (within_module_init(loc, mod)) {
2881 plt = (void *)mod->arch.init_plt->sh_addr;
2882 plt_end = (void *)plt + mod->arch.init_plt->sh_size;
2883 count = &mod->arch.init_plt_count;
2884 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2885 index efdddcb..35e58f6 100644
2886 --- a/arch/arm/kernel/module.c
2887 +++ b/arch/arm/kernel/module.c
2892 -void *module_alloc(unsigned long size)
2893 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2895 - void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2896 - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2899 + if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
2902 + p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2903 + GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2904 __builtin_return_address(0));
2905 if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
2907 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2908 - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2909 + GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2910 __builtin_return_address(0));
2913 +void *module_alloc(unsigned long size)
2916 +#ifdef CONFIG_PAX_KERNEXEC
2917 + return __module_alloc(size, PAGE_KERNEL);
2919 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2924 +#ifdef CONFIG_PAX_KERNEXEC
2925 +void module_memfree_exec(void *module_region)
2927 + module_memfree(module_region);
2929 +EXPORT_SYMBOL(module_memfree_exec);
2931 +void *module_alloc_exec(unsigned long size)
2933 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2935 +EXPORT_SYMBOL(module_alloc_exec);
2940 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2941 index 69bda1a..755113a 100644
2942 --- a/arch/arm/kernel/patch.c
2943 +++ b/arch/arm/kernel/patch.c
2944 @@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2946 __acquire(&patch_lock);
2948 + pax_open_kernel();
2949 if (thumb2 && __opcode_is_thumb16(insn)) {
2950 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2952 @@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2953 *(u32 *)waddr = insn;
2956 + pax_close_kernel();
2958 if (waddr != addr) {
2959 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2960 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2961 index f192a2a..1a40523 100644
2962 --- a/arch/arm/kernel/process.c
2963 +++ b/arch/arm/kernel/process.c
2964 @@ -105,8 +105,8 @@ void __show_regs(struct pt_regs *regs)
2966 show_regs_print_info(KERN_DEFAULT);
2968 - print_symbol("PC is at %s\n", instruction_pointer(regs));
2969 - print_symbol("LR is at %s\n", regs->ARM_lr);
2970 + printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2971 + printk("LR is at %pA\n", (void *)regs->ARM_lr);
2972 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2973 "sp : %08lx ip : %08lx fp : %08lx\n",
2974 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2975 @@ -283,12 +283,6 @@ unsigned long get_wchan(struct task_struct *p)
2979 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2981 - unsigned long range_end = mm->brk + 0x02000000;
2982 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2986 #ifdef CONFIG_KUSER_HELPERS
2988 @@ -304,7 +298,7 @@ static struct vm_area_struct gate_vma = {
2990 static int __init gate_vma_init(void)
2992 - gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2993 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2996 arch_initcall(gate_vma_init);
2997 @@ -333,91 +327,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2998 return is_gate_vma(vma) ? "[vectors]" : NULL;
3001 -/* If possible, provide a placement hint at a random offset from the
3002 - * stack for the sigpage and vdso pages.
3004 -static unsigned long sigpage_addr(const struct mm_struct *mm,
3005 - unsigned int npages)
3007 - unsigned long offset;
3008 - unsigned long first;
3009 - unsigned long last;
3010 - unsigned long addr;
3011 - unsigned int slots;
3013 - first = PAGE_ALIGN(mm->start_stack);
3015 - last = TASK_SIZE - (npages << PAGE_SHIFT);
3017 - /* No room after stack? */
3021 - /* Just enough room? */
3022 - if (first == last)
3025 - slots = ((last - first) >> PAGE_SHIFT) + 1;
3027 - offset = get_random_int() % slots;
3029 - addr = first + (offset << PAGE_SHIFT);
3034 -static struct page *signal_page;
3035 -extern struct page *get_signal_page(void);
3037 -static const struct vm_special_mapping sigpage_mapping = {
3038 - .name = "[sigpage]",
3039 - .pages = &signal_page,
3042 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3044 struct mm_struct *mm = current->mm;
3045 - struct vm_area_struct *vma;
3046 - unsigned long npages;
3047 - unsigned long addr;
3048 - unsigned long hint;
3052 - signal_page = get_signal_page();
3056 - npages = 1; /* for sigpage */
3057 - npages += vdso_total_pages;
3059 down_write(&mm->mmap_sem);
3060 - hint = sigpage_addr(mm, npages);
3061 - addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
3062 - if (IS_ERR_VALUE(addr)) {
3067 - vma = _install_special_mapping(mm, addr, PAGE_SIZE,
3068 - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3069 - &sigpage_mapping);
3071 - if (IS_ERR(vma)) {
3072 - ret = PTR_ERR(vma);
3076 - mm->context.sigpage = addr;
3078 - /* Unlike the sigpage, failure to install the vdso is unlikely
3079 - * to be fatal to the process, so no error check needed
3082 - arm_install_vdso(mm, addr + PAGE_SIZE);
3085 + mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3086 up_write(&mm->mmap_sem);
3091 diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3092 index f90fdf4..24e8c84 100644
3093 --- a/arch/arm/kernel/psci.c
3094 +++ b/arch/arm/kernel/psci.c
3096 #include <asm/psci.h>
3097 #include <asm/system_misc.h>
3099 -struct psci_operations psci_ops;
3100 +struct psci_operations psci_ops __read_only;
3102 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3103 typedef int (*psci_initcall_t)(const struct device_node *);
3104 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3105 index ef9119f..31995a3 100644
3106 --- a/arch/arm/kernel/ptrace.c
3107 +++ b/arch/arm/kernel/ptrace.c
3108 @@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3112 +#ifdef CONFIG_GRKERNSEC_SETXID
3113 +extern void gr_delayed_cred_worker(void);
3116 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3118 current_thread_info()->syscall = scno;
3120 +#ifdef CONFIG_GRKERNSEC_SETXID
3121 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3122 + gr_delayed_cred_worker();
3125 /* Do the secure computing check first; failures should be fast. */
3126 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3127 if (secure_computing() == -1)
3128 diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
3129 index 3826935..8ed63ed 100644
3130 --- a/arch/arm/kernel/reboot.c
3131 +++ b/arch/arm/kernel/reboot.c
3132 @@ -122,6 +122,7 @@ void machine_power_off(void)
3140 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3141 index 36c18b7..0d78292 100644
3142 --- a/arch/arm/kernel/setup.c
3143 +++ b/arch/arm/kernel/setup.c
3144 @@ -108,21 +108,23 @@ EXPORT_SYMBOL(elf_hwcap);
3145 unsigned int elf_hwcap2 __read_mostly;
3146 EXPORT_SYMBOL(elf_hwcap2);
3148 +pteval_t __supported_pte_mask __read_only;
3149 +pmdval_t __supported_pmd_mask __read_only;
3152 -struct processor processor __read_mostly;
3153 +struct processor processor __read_only;
3156 -struct cpu_tlb_fns cpu_tlb __read_mostly;
3157 +struct cpu_tlb_fns cpu_tlb __read_only;
3160 -struct cpu_user_fns cpu_user __read_mostly;
3161 +struct cpu_user_fns cpu_user __read_only;
3164 -struct cpu_cache_fns cpu_cache __read_mostly;
3165 +struct cpu_cache_fns cpu_cache __read_only;
3167 #ifdef CONFIG_OUTER_CACHE
3168 -struct outer_cache_fns outer_cache __read_mostly;
3169 +struct outer_cache_fns outer_cache __read_only;
3170 EXPORT_SYMBOL(outer_cache);
3173 @@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3174 * Register 0 and check for VMSAv7 or PMSAv7 */
3175 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3176 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3177 - (mmfr0 & 0x000000f0) >= 0x00000030)
3178 + (mmfr0 & 0x000000f0) >= 0x00000030) {
3179 cpu_arch = CPU_ARCH_ARMv7;
3180 - else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3181 + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3182 + __supported_pte_mask |= L_PTE_PXN;
3183 + __supported_pmd_mask |= PMD_PXNTABLE;
3185 + } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3186 (mmfr0 & 0x000000f0) == 0x00000020)
3187 cpu_arch = CPU_ARCH_ARMv6;
3189 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3190 index 586eef2..61aabd4 100644
3191 --- a/arch/arm/kernel/signal.c
3192 +++ b/arch/arm/kernel/signal.c
3195 extern const unsigned long sigreturn_codes[7];
3197 -static unsigned long signal_return_offset;
3199 #ifdef CONFIG_CRUNCH
3200 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3202 @@ -390,8 +388,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3203 * except when the MPU has protected the vectors
3206 - retcode = mm->context.sigpage + signal_return_offset +
3207 - (idx << 2) + thumb;
3208 + retcode = mm->context.sigpage + (idx << 2) + thumb;
3212 @@ -597,33 +594,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3213 } while (thread_flags & _TIF_WORK_MASK);
3217 -struct page *get_signal_page(void)
3219 - unsigned long ptr;
3221 - struct page *page;
3224 - page = alloc_pages(GFP_KERNEL, 0);
3229 - addr = page_address(page);
3231 - /* Give the signal return code some randomness */
3232 - offset = 0x200 + (get_random_int() & 0x7fc);
3233 - signal_return_offset = offset;
3236 - * Copy signal return handlers into the vector page, and
3237 - * set sigreturn to be a pointer to these.
3239 - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3241 - ptr = (unsigned long)addr + offset;
3242 - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3246 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3247 index 3d6b782..8b3baeb 100644
3248 --- a/arch/arm/kernel/smp.c
3249 +++ b/arch/arm/kernel/smp.c
3250 @@ -76,7 +76,7 @@ enum ipi_msg_type {
3252 static DECLARE_COMPLETION(cpu_running);
3254 -static struct smp_operations smp_ops;
3255 +static struct smp_operations smp_ops __read_only;
3257 void __init smp_set_ops(struct smp_operations *ops)
3259 diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3260 index b10e136..cb5edf9 100644
3261 --- a/arch/arm/kernel/tcm.c
3262 +++ b/arch/arm/kernel/tcm.c
3263 @@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3264 .virtual = ITCM_OFFSET,
3265 .pfn = __phys_to_pfn(ITCM_OFFSET),
3267 - .type = MT_MEMORY_RWX_ITCM,
3268 + .type = MT_MEMORY_RX_ITCM,
3272 @@ -362,7 +362,9 @@ no_dtcm:
3273 start = &__sitcm_text;
3274 end = &__eitcm_text;
3275 ram = &__itcm_start;
3276 + pax_open_kernel();
3277 memcpy(start, ram, itcm_code_sz);
3278 + pax_close_kernel();
3279 pr_debug("CPU ITCM: copied code from %p - %p\n",
3281 itcm_present = true;
3282 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3283 index d358226..bfd4019 100644
3284 --- a/arch/arm/kernel/traps.c
3285 +++ b/arch/arm/kernel/traps.c
3286 @@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3287 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3289 #ifdef CONFIG_KALLSYMS
3290 - printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3291 + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3293 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3295 @@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3296 static int die_owner = -1;
3297 static unsigned int die_nest_count;
3299 +extern void gr_handle_kernel_exploit(void);
3301 static unsigned long oops_begin(void)
3304 @@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3305 panic("Fatal exception in interrupt");
3307 panic("Fatal exception");
3309 + gr_handle_kernel_exploit();
3314 @@ -870,7 +875,11 @@ void __init early_trap_init(void *vectors_base)
3315 kuser_init(vectors_base);
3317 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3318 - modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3320 +#ifndef CONFIG_PAX_MEMORY_UDEREF
3321 + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3324 #else /* ifndef CONFIG_CPU_V7M */
3326 * on V7-M there is no need to copy the vector table to a dedicated
3327 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3328 index 8b60fde..8d986dd 100644
3329 --- a/arch/arm/kernel/vmlinux.lds.S
3330 +++ b/arch/arm/kernel/vmlinux.lds.S
3334 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3335 - defined(CONFIG_GENERIC_BUG)
3336 + defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3337 #define ARM_EXIT_KEEP(x) x
3338 #define ARM_EXIT_DISCARD(x)
3340 @@ -120,6 +120,8 @@ SECTIONS
3341 #ifdef CONFIG_DEBUG_RODATA
3342 . = ALIGN(1<<SECTION_SHIFT);
3344 + _etext = .; /* End of text section */
3349 @@ -150,8 +152,6 @@ SECTIONS
3353 - _etext = .; /* End of text and rodata section */
3355 #ifndef CONFIG_XIP_KERNEL
3356 # ifdef CONFIG_ARM_KERNMEM_PERMS
3357 . = ALIGN(1<<SECTION_SHIFT);
3358 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3359 index f9c341c..7430436 100644
3360 --- a/arch/arm/kvm/arm.c
3361 +++ b/arch/arm/kvm/arm.c
3362 @@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3363 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3365 /* The VMID used in the VTTBR */
3366 -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3367 +static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3368 static u8 kvm_next_vmid;
3369 static DEFINE_SPINLOCK(kvm_vmid_lock);
3371 @@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
3373 static bool need_new_vmid_gen(struct kvm *kvm)
3375 - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3376 + return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3380 @@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
3382 /* First user of a new VMID generation? */
3383 if (unlikely(kvm_next_vmid == 0)) {
3384 - atomic64_inc(&kvm_vmid_gen);
3385 + atomic64_inc_unchecked(&kvm_vmid_gen);
3389 @@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
3390 kvm_call_hyp(__kvm_flush_vm_context);
3393 - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3394 + kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3395 kvm->arch.vmid = kvm_next_vmid;
3398 @@ -1110,7 +1110,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3400 * Initialize Hyp-mode and memory mappings on all CPUs.
3402 -int kvm_arch_init(void *opaque)
3403 +int kvm_arch_init(const void *opaque)
3407 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3408 index 1710fd7..ec3e014 100644
3409 --- a/arch/arm/lib/clear_user.S
3410 +++ b/arch/arm/lib/clear_user.S
3415 -/* Prototype: int __clear_user(void *addr, size_t sz)
3416 +/* Prototype: int ___clear_user(void *addr, size_t sz)
3417 * Purpose : clear some user memory
3418 * Params : addr - user memory address to clear
3419 * : sz - number of bytes to clear
3420 * Returns : number of bytes NOT cleared
3422 ENTRY(__clear_user_std)
3424 +WEAK(___clear_user)
3428 @@ -44,7 +44,7 @@ WEAK(__clear_user)
3429 USER( strnebt r2, [r0])
3432 -ENDPROC(__clear_user)
3433 +ENDPROC(___clear_user)
3434 ENDPROC(__clear_user_std)
3436 .pushsection .text.fixup,"ax"
3437 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3438 index 7a235b9..73a0556 100644
3439 --- a/arch/arm/lib/copy_from_user.S
3440 +++ b/arch/arm/lib/copy_from_user.S
3445 - * size_t __copy_from_user(void *to, const void *from, size_t n)
3446 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
3454 -ENTRY(__copy_from_user)
3455 +ENTRY(___copy_from_user)
3457 #include "copy_template.S"
3459 -ENDPROC(__copy_from_user)
3460 +ENDPROC(___copy_from_user)
3462 .pushsection .fixup,"ax"
3464 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3465 index 6ee2f67..d1cce76 100644
3466 --- a/arch/arm/lib/copy_page.S
3467 +++ b/arch/arm/lib/copy_page.S
3469 * ASM optimised string functions
3471 #include <linux/linkage.h>
3472 +#include <linux/const.h>
3473 #include <asm/assembler.h>
3474 #include <asm/asm-offsets.h>
3475 #include <asm/cache.h>
3476 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3477 index 9648b06..19c333c 100644
3478 --- a/arch/arm/lib/copy_to_user.S
3479 +++ b/arch/arm/lib/copy_to_user.S
3484 - * size_t __copy_to_user(void *to, const void *from, size_t n)
3485 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
3492 ENTRY(__copy_to_user_std)
3493 -WEAK(__copy_to_user)
3494 +WEAK(___copy_to_user)
3496 #include "copy_template.S"
3498 -ENDPROC(__copy_to_user)
3499 +ENDPROC(___copy_to_user)
3500 ENDPROC(__copy_to_user_std)
3502 .pushsection .text.fixup,"ax"
3503 diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3504 index 1d0957e..f708846 100644
3505 --- a/arch/arm/lib/csumpartialcopyuser.S
3506 +++ b/arch/arm/lib/csumpartialcopyuser.S
3508 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3511 -#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3512 -#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3513 +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3514 +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3516 #include "csumpartialcopygeneric.S"
3518 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3519 index 8044591..c9b2609 100644
3520 --- a/arch/arm/lib/delay.c
3521 +++ b/arch/arm/lib/delay.c
3524 * Default to the loop-based delay implementation.
3526 -struct arm_delay_ops arm_delay_ops = {
3527 +struct arm_delay_ops arm_delay_ops __read_only = {
3528 .delay = __loop_delay,
3529 .const_udelay = __loop_const_udelay,
3530 .udelay = __loop_udelay,
3531 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3532 index 4b39af2..9ae747d 100644
3533 --- a/arch/arm/lib/uaccess_with_memcpy.c
3534 +++ b/arch/arm/lib/uaccess_with_memcpy.c
3535 @@ -85,7 +85,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
3539 -static unsigned long noinline
3540 +static unsigned long noinline __size_overflow(3)
3541 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
3544 @@ -136,7 +136,7 @@ out:
3548 -__copy_to_user(void __user *to, const void *from, unsigned long n)
3549 +___copy_to_user(void __user *to, const void *from, unsigned long n)
3552 * This test is stubbed out of the main function above to keep
3553 @@ -150,7 +150,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
3554 return __copy_to_user_memcpy(to, from, n);
3557 -static unsigned long noinline
3558 +static unsigned long noinline __size_overflow(2)
3559 __clear_user_memset(void __user *addr, unsigned long n)
3561 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
3562 @@ -190,7 +190,7 @@ out:
3566 -unsigned long __clear_user(void __user *addr, unsigned long n)
3567 +unsigned long ___clear_user(void __user *addr, unsigned long n)
3569 /* See rational for this in __copy_to_user() above. */
3571 diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3572 index f572219..2cf36d5 100644
3573 --- a/arch/arm/mach-exynos/suspend.c
3574 +++ b/arch/arm/mach-exynos/suspend.c
3575 @@ -732,8 +732,10 @@ void __init exynos_pm_init(void)
3576 tmp |= pm_data->wake_disable_mask;
3577 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3579 - exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3580 - exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3581 + pax_open_kernel();
3582 + *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3583 + *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3584 + pax_close_kernel();
3586 register_syscore_ops(&exynos_pm_syscore_ops);
3587 suspend_set_ops(&exynos_suspend_ops);
3588 diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3589 index e46e9ea..9141c83 100644
3590 --- a/arch/arm/mach-mvebu/coherency.c
3591 +++ b/arch/arm/mach-mvebu/coherency.c
3592 @@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3595 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3596 - * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3597 + * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3598 * is needed as a workaround for a deadlock issue between the PCIe
3599 * interface and the cache controller.
3601 @@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3602 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3604 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3605 - mtype = MT_UNCACHED;
3606 + mtype = MT_UNCACHED_RW;
3608 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3610 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3611 index b6443a4..20a0b74 100644
3612 --- a/arch/arm/mach-omap2/board-n8x0.c
3613 +++ b/arch/arm/mach-omap2/board-n8x0.c
3614 @@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3618 -struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3619 +struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3620 .late_init = n8x0_menelaus_late_init,
3623 diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3624 index 79f49d9..70bf184 100644
3625 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3626 +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3627 @@ -86,7 +86,7 @@ struct cpu_pm_ops {
3628 void (*resume)(void);
3629 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3630 void (*hotplug_restart)(void);
3634 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3635 static struct powerdomain *mpuss_pd;
3636 @@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3637 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3640 -struct cpu_pm_ops omap_pm_ops = {
3641 +static struct cpu_pm_ops omap_pm_ops __read_only = {
3642 .finish_suspend = default_finish_suspend,
3643 .resume = dummy_cpu_resume,
3644 .scu_prepare = dummy_scu_prepare,
3645 diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3646 index 5305ec7..6d74045 100644
3647 --- a/arch/arm/mach-omap2/omap-smp.c
3648 +++ b/arch/arm/mach-omap2/omap-smp.c
3650 #include <linux/device.h>
3651 #include <linux/smp.h>
3652 #include <linux/io.h>
3653 +#include <linux/irq.h>
3654 #include <linux/irqchip/arm-gic.h>
3656 #include <asm/smp_scu.h>
3657 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3658 index e1d2e99..d9b3177 100644
3659 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
3660 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3661 @@ -330,7 +330,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3665 -static struct notifier_block __refdata irq_hotplug_notifier = {
3666 +static struct notifier_block irq_hotplug_notifier = {
3667 .notifier_call = irq_cpu_hotplug_notify,
3670 diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3671 index 4cb8fd9..5ce65bc 100644
3672 --- a/arch/arm/mach-omap2/omap_device.c
3673 +++ b/arch/arm/mach-omap2/omap_device.c
3674 @@ -504,7 +504,7 @@ void omap_device_delete(struct omap_device *od)
3675 struct platform_device __init *omap_device_build(const char *pdev_name,
3677 struct omap_hwmod *oh,
3678 - void *pdata, int pdata_len)
3679 + const void *pdata, int pdata_len)
3681 struct omap_hwmod *ohs[] = { oh };
3683 @@ -532,7 +532,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3684 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3686 struct omap_hwmod **ohs,
3687 - int oh_cnt, void *pdata,
3688 + int oh_cnt, const void *pdata,
3692 diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3693 index 78c02b3..c94109a 100644
3694 --- a/arch/arm/mach-omap2/omap_device.h
3695 +++ b/arch/arm/mach-omap2/omap_device.h
3696 @@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3697 /* Core code interface */
3699 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3700 - struct omap_hwmod *oh, void *pdata,
3701 + struct omap_hwmod *oh, const void *pdata,
3704 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3705 struct omap_hwmod **oh, int oh_cnt,
3706 - void *pdata, int pdata_len);
3707 + const void *pdata, int pdata_len);
3709 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3710 struct omap_hwmod **ohs, int oh_cnt);
3711 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3712 index 486cc4d..8d1a0b7 100644
3713 --- a/arch/arm/mach-omap2/omap_hwmod.c
3714 +++ b/arch/arm/mach-omap2/omap_hwmod.c
3715 @@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
3716 int (*init_clkdm)(struct omap_hwmod *oh);
3717 void (*update_context_lost)(struct omap_hwmod *oh);
3718 int (*get_context_lost)(struct omap_hwmod *oh);
3722 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3723 -static struct omap_hwmod_soc_ops soc_ops;
3724 +static struct omap_hwmod_soc_ops soc_ops __read_only;
3726 /* omap_hwmod_list contains all registered struct omap_hwmods */
3727 static LIST_HEAD(omap_hwmod_list);
3728 diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3729 index 95fee54..cfa9cf1 100644
3730 --- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3731 +++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3734 #include <linux/kernel.h>
3735 #include <linux/init.h>
3736 +#include <asm/pgtable.h>
3738 #include "powerdomain.h"
3740 @@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3742 void __init am43xx_powerdomains_init(void)
3744 - omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3745 + pax_open_kernel();
3746 + *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3747 + pax_close_kernel();
3748 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3749 pwrdm_register_pwrdms(powerdomains_am43xx);
3750 pwrdm_complete_init();
3751 diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3752 index ff0a68c..b312aa0 100644
3753 --- a/arch/arm/mach-omap2/wd_timer.c
3754 +++ b/arch/arm/mach-omap2/wd_timer.c
3755 @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3756 struct omap_hwmod *oh;
3757 char *oh_name = "wd_timer2";
3758 char *dev_name = "omap_wdt";
3759 - struct omap_wd_timer_platform_data pdata;
3760 + static struct omap_wd_timer_platform_data pdata = {
3761 + .read_reset_sources = prm_read_reset_sources
3764 if (!cpu_class_is_omap2() || of_have_populated_dt())
3766 @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3770 - pdata.read_reset_sources = prm_read_reset_sources;
3772 pdev = omap_device_build(dev_name, id, oh, &pdata,
3773 sizeof(struct omap_wd_timer_platform_data));
3774 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3775 diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
3776 index b0790fc..71eb21f 100644
3777 --- a/arch/arm/mach-shmobile/platsmp-apmu.c
3778 +++ b/arch/arm/mach-shmobile/platsmp-apmu.c
3780 #include <asm/proc-fns.h>
3781 #include <asm/smp_plat.h>
3782 #include <asm/suspend.h>
3783 +#include <asm/pgtable.h>
3785 #include "platsmp-apmu.h"
3787 @@ -233,6 +234,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
3789 void __init shmobile_smp_apmu_suspend_init(void)
3791 - shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
3792 + pax_open_kernel();
3793 + *(void **)&shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
3794 + pax_close_kernel();
3797 diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
3798 index 34608fc..344d7c0 100644
3799 --- a/arch/arm/mach-shmobile/pm-r8a7740.c
3800 +++ b/arch/arm/mach-shmobile/pm-r8a7740.c
3802 #include <linux/console.h>
3803 #include <linux/io.h>
3804 #include <linux/suspend.h>
3805 +#include <asm/pgtable.h>
3808 #include "pm-rmobile.h"
3809 @@ -117,7 +118,9 @@ static int r8a7740_enter_suspend(suspend_state_t suspend_state)
3811 static void r8a7740_suspend_init(void)
3813 - shmobile_suspend_ops.enter = r8a7740_enter_suspend;
3814 + pax_open_kernel();
3815 + *(void **)&shmobile_suspend_ops.enter = r8a7740_enter_suspend;
3816 + pax_close_kernel();
3819 static void r8a7740_suspend_init(void) {}
3820 diff --git a/arch/arm/mach-shmobile/pm-sh73a0.c b/arch/arm/mach-shmobile/pm-sh73a0.c
3821 index a7e4668..83334f33 100644
3822 --- a/arch/arm/mach-shmobile/pm-sh73a0.c
3823 +++ b/arch/arm/mach-shmobile/pm-sh73a0.c
3827 #include <linux/suspend.h>
3828 +#include <asm/pgtable.h>
3831 #ifdef CONFIG_SUSPEND
3832 @@ -20,7 +21,9 @@ static int sh73a0_enter_suspend(suspend_state_t suspend_state)
3834 static void sh73a0_suspend_init(void)
3836 - shmobile_suspend_ops.enter = sh73a0_enter_suspend;
3837 + pax_open_kernel();
3838 + *(void **)&shmobile_suspend_ops.enter = sh73a0_enter_suspend;
3839 + pax_close_kernel();
3842 static void sh73a0_suspend_init(void) {}
3843 diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3844 index 7469347..1ecc350 100644
3845 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3846 +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3847 @@ -177,7 +177,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3848 bool entered_lp2 = false;
3850 if (tegra_pending_sgi())
3851 - ACCESS_ONCE(abort_flag) = true;
3852 + ACCESS_ONCE_RW(abort_flag) = true;
3854 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3856 diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3857 index 3b9098d..15b390f 100644
3858 --- a/arch/arm/mach-tegra/irq.c
3859 +++ b/arch/arm/mach-tegra/irq.c
3861 #include <linux/cpu_pm.h>
3862 #include <linux/interrupt.h>
3863 #include <linux/io.h>
3864 +#include <linux/irq.h>
3865 #include <linux/irqchip/arm-gic.h>
3866 #include <linux/irq.h>
3867 #include <linux/kernel.h>
3868 diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3869 index 8538910..2f39bc4 100644
3870 --- a/arch/arm/mach-ux500/pm.c
3871 +++ b/arch/arm/mach-ux500/pm.c
3875 #include <linux/kernel.h>
3876 +#include <linux/irq.h>
3877 #include <linux/irqchip/arm-gic.h>
3878 #include <linux/delay.h>
3879 #include <linux/io.h>
3880 diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3881 index f66816c..228b951 100644
3882 --- a/arch/arm/mach-zynq/platsmp.c
3883 +++ b/arch/arm/mach-zynq/platsmp.c
3885 #include <linux/io.h>
3886 #include <asm/cacheflush.h>
3887 #include <asm/smp_scu.h>
3888 +#include <linux/irq.h>
3889 #include <linux/irqchip/arm-gic.h>
3892 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3893 index 7c6b976..055db09 100644
3894 --- a/arch/arm/mm/Kconfig
3895 +++ b/arch/arm/mm/Kconfig
3896 @@ -446,6 +446,7 @@ config CPU_32v5
3900 + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3901 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3904 @@ -600,6 +601,7 @@ config CPU_CP15_MPU
3906 config CPU_USE_DOMAINS
3908 + depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3910 This option enables or disables the use of domain switching
3911 via the set_fs() function.
3912 @@ -818,7 +820,7 @@ config NEED_KUSER_HELPERS
3914 config KUSER_HELPERS
3915 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3917 + depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3920 Warning: disabling this option may break user programs.
3921 @@ -832,7 +834,7 @@ config KUSER_HELPERS
3922 See Documentation/arm/kernel_user_helpers.txt for details.
3924 However, the fixed address nature of these helpers can be used
3925 - by ROP (return orientated programming) authors when creating
3926 + by ROP (Return Oriented Programming) authors when creating
3929 If all of the binaries and libraries which run on your platform
3930 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3931 index 9769f1e..16aaa55 100644
3932 --- a/arch/arm/mm/alignment.c
3933 +++ b/arch/arm/mm/alignment.c
3934 @@ -216,10 +216,12 @@ union offset_union {
3935 #define __get16_unaligned_check(ins,val,addr) \
3937 unsigned int err = 0, v, a = addr; \
3938 + pax_open_userland(); \
3939 __get8_unaligned_check(ins,v,a,err); \
3940 val = v << ((BE) ? 8 : 0); \
3941 __get8_unaligned_check(ins,v,a,err); \
3942 val |= v << ((BE) ? 0 : 8); \
3943 + pax_close_userland(); \
3947 @@ -233,6 +235,7 @@ union offset_union {
3948 #define __get32_unaligned_check(ins,val,addr) \
3950 unsigned int err = 0, v, a = addr; \
3951 + pax_open_userland(); \
3952 __get8_unaligned_check(ins,v,a,err); \
3953 val = v << ((BE) ? 24 : 0); \
3954 __get8_unaligned_check(ins,v,a,err); \
3955 @@ -241,6 +244,7 @@ union offset_union {
3956 val |= v << ((BE) ? 8 : 16); \
3957 __get8_unaligned_check(ins,v,a,err); \
3958 val |= v << ((BE) ? 0 : 24); \
3959 + pax_close_userland(); \
3963 @@ -254,6 +258,7 @@ union offset_union {
3964 #define __put16_unaligned_check(ins,val,addr) \
3966 unsigned int err = 0, v = val, a = addr; \
3967 + pax_open_userland(); \
3968 __asm__( FIRST_BYTE_16 \
3969 ARM( "1: "ins" %1, [%2], #1\n" ) \
3970 THUMB( "1: "ins" %1, [%2]\n" ) \
3971 @@ -273,6 +278,7 @@ union offset_union {
3973 : "=r" (err), "=&r" (v), "=&r" (a) \
3974 : "0" (err), "1" (v), "2" (a)); \
3975 + pax_close_userland(); \
3979 @@ -286,6 +292,7 @@ union offset_union {
3980 #define __put32_unaligned_check(ins,val,addr) \
3982 unsigned int err = 0, v = val, a = addr; \
3983 + pax_open_userland(); \
3984 __asm__( FIRST_BYTE_32 \
3985 ARM( "1: "ins" %1, [%2], #1\n" ) \
3986 THUMB( "1: "ins" %1, [%2]\n" ) \
3987 @@ -315,6 +322,7 @@ union offset_union {
3989 : "=r" (err), "=&r" (v), "=&r" (a) \
3990 : "0" (err), "1" (v), "2" (a)); \
3991 + pax_close_userland(); \
3995 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3996 index 71b3d33..8af9ade 100644
3997 --- a/arch/arm/mm/cache-l2x0.c
3998 +++ b/arch/arm/mm/cache-l2x0.c
3999 @@ -44,7 +44,7 @@ struct l2c_init_data {
4000 void (*configure)(void __iomem *);
4001 void (*unlock)(void __iomem *, unsigned);
4002 struct outer_cache_fns outer_cache;
4006 #define CACHE_LINE_SIZE 32
4008 diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
4009 index 845769e..4278fd7 100644
4010 --- a/arch/arm/mm/context.c
4011 +++ b/arch/arm/mm/context.c
4013 #define NUM_USER_ASIDS ASID_FIRST_VERSION
4015 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
4016 -static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4017 +static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4018 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4020 static DEFINE_PER_CPU(atomic64_t, active_asids);
4021 @@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4023 static u32 cur_idx = 1;
4024 u64 asid = atomic64_read(&mm->context.id);
4025 - u64 generation = atomic64_read(&asid_generation);
4026 + u64 generation = atomic64_read_unchecked(&asid_generation);
4030 @@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4032 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4033 if (asid == NUM_USER_ASIDS) {
4034 - generation = atomic64_add_return(ASID_FIRST_VERSION,
4035 + generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4038 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4039 @@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4040 cpu_set_reserved_ttbr0();
4042 asid = atomic64_read(&mm->context.id);
4043 - if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4044 + if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4045 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4046 goto switch_mm_fastpath;
4048 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4049 /* Check that our ASID belongs to the current generation. */
4050 asid = atomic64_read(&mm->context.id);
4051 - if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4052 + if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4053 asid = new_context(mm, cpu);
4054 atomic64_set(&mm->context.id, asid);
4056 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4057 index 0d629b8..01867c8 100644
4058 --- a/arch/arm/mm/fault.c
4059 +++ b/arch/arm/mm/fault.c
4061 #include <asm/system_misc.h>
4062 #include <asm/system_info.h>
4063 #include <asm/tlbflush.h>
4064 +#include <asm/sections.h>
4068 @@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4069 if (fixup_exception(regs))
4072 +#ifdef CONFIG_PAX_MEMORY_UDEREF
4073 + if (addr < TASK_SIZE) {
4074 + if (current->signal->curr_ip)
4075 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
4076 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4078 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4079 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4083 +#ifdef CONFIG_PAX_KERNEXEC
4084 + if ((fsr & FSR_WRITE) &&
4085 + (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4086 + (MODULES_VADDR <= addr && addr < MODULES_END)))
4088 + if (current->signal->curr_ip)
4089 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
4090 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4092 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4093 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4098 * No handler, we'll have to terminate things with extreme prejudice.
4100 @@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4104 +#ifdef CONFIG_PAX_PAGEEXEC
4105 + if (fsr & FSR_LNX_PF) {
4106 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4107 + do_group_exit(SIGKILL);
4111 tsk->thread.address = addr;
4112 tsk->thread.error_code = fsr;
4113 tsk->thread.trap_no = 14;
4114 @@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4116 #endif /* CONFIG_MMU */
4118 +#ifdef CONFIG_PAX_PAGEEXEC
4119 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4123 + printk(KERN_ERR "PAX: bytes at PC: ");
4124 + for (i = 0; i < 20; i++) {
4126 + if (get_user(c, (__force unsigned char __user *)pc+i))
4127 + printk(KERN_CONT "?? ");
4129 + printk(KERN_CONT "%02x ", c);
4133 + printk(KERN_ERR "PAX: bytes at SP-4: ");
4134 + for (i = -1; i < 20; i++) {
4136 + if (get_user(c, (__force unsigned long __user *)sp+i))
4137 + printk(KERN_CONT "???????? ");
4139 + printk(KERN_CONT "%08lx ", c);
4146 * First Level Translation Fault Handler
4148 @@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4149 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4150 struct siginfo info;
4152 +#ifdef CONFIG_PAX_MEMORY_UDEREF
4153 + if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4154 + if (current->signal->curr_ip)
4155 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
4156 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4158 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4159 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4164 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4168 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4169 inf->name, fsr, addr);
4170 show_pte(current->mm, addr);
4171 @@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4172 ifsr_info[nr].name = name;
4175 +asmlinkage int sys_sigreturn(struct pt_regs *regs);
4176 +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4178 asmlinkage void __exception
4179 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4181 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4182 struct siginfo info;
4183 + unsigned long pc = instruction_pointer(regs);
4185 + if (user_mode(regs)) {
4186 + unsigned long sigpage = current->mm->context.sigpage;
4188 + if (sigpage <= pc && pc < sigpage + 7*4) {
4189 + if (pc < sigpage + 3*4)
4190 + sys_sigreturn(regs);
4192 + sys_rt_sigreturn(regs);
4195 + if (pc == 0xffff0f60UL) {
4197 + * PaX: __kuser_cmpxchg64 emulation
4200 + //regs->ARM_pc = regs->ARM_lr;
4203 + if (pc == 0xffff0fa0UL) {
4205 + * PaX: __kuser_memory_barrier emulation
4207 + // dmb(); implied by the exception
4208 + regs->ARM_pc = regs->ARM_lr;
4211 + if (pc == 0xffff0fc0UL) {
4213 + * PaX: __kuser_cmpxchg emulation
4219 + //op = FUTEX_OP_SET << 28;
4220 + //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4221 + //regs->ARM_r0 = old != new;
4222 + //regs->ARM_pc = regs->ARM_lr;
4225 + if (pc == 0xffff0fe0UL) {
4227 + * PaX: __kuser_get_tls emulation
4229 + regs->ARM_r0 = current_thread_info()->tp_value[0];
4230 + regs->ARM_pc = regs->ARM_lr;
4235 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4236 + else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4237 + if (current->signal->curr_ip)
4238 + printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current),
4239 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4240 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4242 + printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4243 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4244 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4249 +#ifdef CONFIG_PAX_REFCOUNT
4250 + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4251 +#ifdef CONFIG_THUMB2_KERNEL
4252 + unsigned short bkpt;
4254 + if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4256 + unsigned int bkpt;
4258 + if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4260 + current->thread.error_code = ifsr;
4261 + current->thread.trap_no = 0;
4262 + pax_report_refcount_overflow(regs);
4263 + fixup_exception(regs);
4269 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4273 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4274 inf->name, ifsr, addr);
4276 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4277 index cf08bdf..772656c 100644
4278 --- a/arch/arm/mm/fault.h
4279 +++ b/arch/arm/mm/fault.h
4283 * Fault status register encodings. We steal bit 31 for our own purposes.
4284 + * Set when the FSR value is from an instruction fault.
4286 #define FSR_LNX_PF (1 << 31)
4287 #define FSR_WRITE (1 << 11)
4288 @@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4292 +/* valid for LPAE and !LPAE */
4293 +static inline int is_xn_fault(unsigned int fsr)
4295 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
4298 +static inline int is_domain_fault(unsigned int fsr)
4300 + return ((fsr_fs(fsr) & 0xD) == 0x9);
4303 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4304 unsigned long search_exception_table(unsigned long addr);
4306 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4307 index 8a63b4c..6b04370 100644
4308 --- a/arch/arm/mm/init.c
4309 +++ b/arch/arm/mm/init.c
4310 @@ -710,7 +710,46 @@ void free_tcmmem(void)
4312 #ifdef CONFIG_HAVE_TCM
4313 extern char __tcm_start, __tcm_end;
4316 +#ifdef CONFIG_PAX_KERNEXEC
4317 + unsigned long addr;
4321 + int cpu_arch = cpu_architecture();
4322 + unsigned int cr = get_cr();
4324 + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4325 + /* make pages tables, etc before .text NX */
4326 + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4327 + pgd = pgd_offset_k(addr);
4328 + pud = pud_offset(pgd, addr);
4329 + pmd = pmd_offset(pud, addr);
4330 + __section_update(pmd, addr, PMD_SECT_XN);
4332 + /* make init NX */
4333 + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4334 + pgd = pgd_offset_k(addr);
4335 + pud = pud_offset(pgd, addr);
4336 + pmd = pmd_offset(pud, addr);
4337 + __section_update(pmd, addr, PMD_SECT_XN);
4339 + /* make kernel code/rodata RX */
4340 + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4341 + pgd = pgd_offset_k(addr);
4342 + pud = pud_offset(pgd, addr);
4343 + pmd = pmd_offset(pud, addr);
4344 +#ifdef CONFIG_ARM_LPAE
4345 + __section_update(pmd, addr, PMD_SECT_RDONLY);
4347 + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4353 +#ifdef CONFIG_HAVE_TCM
4354 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4355 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4357 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4358 index 0c81056..97279f7 100644
4359 --- a/arch/arm/mm/ioremap.c
4360 +++ b/arch/arm/mm/ioremap.c
4361 @@ -405,9 +405,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4365 - mtype = MT_MEMORY_RWX;
4366 + mtype = MT_MEMORY_RX;
4368 - mtype = MT_MEMORY_RWX_NONCACHED;
4369 + mtype = MT_MEMORY_RX_NONCACHED;
4371 return __arm_ioremap_caller(phys_addr, size, mtype,
4372 __builtin_return_address(0));
4373 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4374 index 407dc78..047ce9d 100644
4375 --- a/arch/arm/mm/mmap.c
4376 +++ b/arch/arm/mm/mmap.c
4377 @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4378 struct vm_area_struct *vma;
4380 int aliasing = cache_is_vipt_aliasing();
4381 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4382 struct vm_unmapped_area_info info;
4385 @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4386 if (len > TASK_SIZE)
4389 +#ifdef CONFIG_PAX_RANDMMAP
4390 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4395 addr = COLOUR_ALIGN(addr, pgoff);
4396 @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4397 addr = PAGE_ALIGN(addr);
4399 vma = find_vma(mm, addr);
4400 - if (TASK_SIZE - len >= addr &&
4401 - (!vma || addr + len <= vma->vm_start))
4402 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4406 @@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4407 info.high_limit = TASK_SIZE;
4408 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4409 info.align_offset = pgoff << PAGE_SHIFT;
4410 + info.threadstack_offset = offset;
4411 return vm_unmapped_area(&info);
4414 @@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4415 unsigned long addr = addr0;
4417 int aliasing = cache_is_vipt_aliasing();
4418 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4419 struct vm_unmapped_area_info info;
4422 @@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4426 +#ifdef CONFIG_PAX_RANDMMAP
4427 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4430 /* requesting a specific address */
4433 @@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4435 addr = PAGE_ALIGN(addr);
4436 vma = find_vma(mm, addr);
4437 - if (TASK_SIZE - len >= addr &&
4438 - (!vma || addr + len <= vma->vm_start))
4439 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4443 @@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4444 info.high_limit = mm->mmap_base;
4445 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4446 info.align_offset = pgoff << PAGE_SHIFT;
4447 + info.threadstack_offset = offset;
4448 addr = vm_unmapped_area(&info);
4451 @@ -183,14 +193,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4453 unsigned long random_factor = 0UL;
4455 +#ifdef CONFIG_PAX_RANDMMAP
4456 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4459 if (current->flags & PF_RANDOMIZE)
4460 random_factor = arch_mmap_rnd();
4462 if (mmap_is_legacy()) {
4463 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4465 +#ifdef CONFIG_PAX_RANDMMAP
4466 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4467 + mm->mmap_base += mm->delta_mmap;
4470 mm->get_unmapped_area = arch_get_unmapped_area;
4472 mm->mmap_base = mmap_base(random_factor);
4474 +#ifdef CONFIG_PAX_RANDMMAP
4475 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4476 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4479 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4482 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4483 index 870838a..070df1d 100644
4484 --- a/arch/arm/mm/mmu.c
4485 +++ b/arch/arm/mm/mmu.c
4490 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4491 +void modify_domain(unsigned int dom, unsigned int type)
4493 + struct thread_info *thread = current_thread_info();
4494 + unsigned int domain = thread->cpu_domain;
4496 + * DOMAIN_MANAGER might be defined to some other value,
4497 + * use the arch-defined constant
4499 + domain &= ~domain_val(dom, 3);
4500 + thread->cpu_domain = domain | domain_val(dom, type);
4501 + set_domain(thread->cpu_domain);
4503 +EXPORT_SYMBOL(modify_domain);
4507 * empty_zero_page is a special page that is used for
4508 * zero-initialized data and COW.
4509 @@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4510 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4511 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4513 -static struct mem_type mem_types[] = {
4514 +#ifdef CONFIG_PAX_KERNEXEC
4515 +#define L_PTE_KERNEXEC L_PTE_RDONLY
4516 +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4518 +#define L_PTE_KERNEXEC L_PTE_DIRTY
4519 +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4522 +static struct mem_type mem_types[] __read_only = {
4523 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4524 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4526 @@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4527 .prot_sect = PROT_SECT_DEVICE,
4528 .domain = DOMAIN_IO,
4531 + [MT_UNCACHED_RW] = {
4532 .prot_pte = PROT_PTE_DEVICE,
4533 .prot_l1 = PMD_TYPE_TABLE,
4534 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4535 .domain = DOMAIN_IO,
4537 - [MT_CACHECLEAN] = {
4538 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4539 + [MT_CACHECLEAN_RO] = {
4540 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4541 .domain = DOMAIN_KERNEL,
4543 #ifndef CONFIG_ARM_LPAE
4544 - [MT_MINICLEAN] = {
4545 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4546 + [MT_MINICLEAN_RO] = {
4547 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4548 .domain = DOMAIN_KERNEL,
4551 @@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4552 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4554 .prot_l1 = PMD_TYPE_TABLE,
4555 - .domain = DOMAIN_USER,
4556 + .domain = DOMAIN_VECTORS,
4558 [MT_HIGH_VECTORS] = {
4559 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4560 L_PTE_USER | L_PTE_RDONLY,
4561 .prot_l1 = PMD_TYPE_TABLE,
4562 - .domain = DOMAIN_USER,
4563 + .domain = DOMAIN_VECTORS,
4565 - [MT_MEMORY_RWX] = {
4566 + [__MT_MEMORY_RWX] = {
4567 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4568 .prot_l1 = PMD_TYPE_TABLE,
4569 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4570 @@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4571 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4572 .domain = DOMAIN_KERNEL,
4575 - .prot_sect = PMD_TYPE_SECT,
4576 + [MT_MEMORY_RX] = {
4577 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4578 + .prot_l1 = PMD_TYPE_TABLE,
4579 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4580 + .domain = DOMAIN_KERNEL,
4583 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4584 .domain = DOMAIN_KERNEL,
4586 - [MT_MEMORY_RWX_NONCACHED] = {
4587 + [MT_MEMORY_RW_NONCACHED] = {
4588 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4589 L_PTE_MT_BUFFERABLE,
4590 .prot_l1 = PMD_TYPE_TABLE,
4591 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4592 .domain = DOMAIN_KERNEL,
4594 + [MT_MEMORY_RX_NONCACHED] = {
4595 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4596 + L_PTE_MT_BUFFERABLE,
4597 + .prot_l1 = PMD_TYPE_TABLE,
4598 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4599 + .domain = DOMAIN_KERNEL,
4601 [MT_MEMORY_RW_DTCM] = {
4602 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4604 @@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4605 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4606 .domain = DOMAIN_KERNEL,
4608 - [MT_MEMORY_RWX_ITCM] = {
4609 - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4610 + [MT_MEMORY_RX_ITCM] = {
4611 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4612 .prot_l1 = PMD_TYPE_TABLE,
4613 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4614 .domain = DOMAIN_KERNEL,
4616 [MT_MEMORY_RW_SO] = {
4617 @@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4618 * Mark cache clean areas and XIP ROM read only
4619 * from SVC mode and no access from userspace.
4621 - mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4622 - mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4623 - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4624 + mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4625 +#ifdef CONFIG_PAX_KERNEXEC
4626 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4627 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4628 + mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4630 + mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4631 + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4635 @@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4636 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4637 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4638 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4639 - mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4640 - mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4641 + mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4642 + mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4643 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4644 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4645 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4646 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4647 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4648 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4649 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4650 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4651 + mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4652 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4653 + mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4657 @@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4658 if (cpu_arch >= CPU_ARCH_ARMv6) {
4659 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4660 /* Non-cacheable Normal is XCB = 001 */
4661 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4662 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4663 + PMD_SECT_BUFFERED;
4664 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4667 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4668 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4669 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4671 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4675 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4676 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4677 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4680 #ifdef CONFIG_ARM_LPAE
4681 @@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4682 user_pgprot |= PTE_EXT_PXN;
4685 + user_pgprot |= __supported_pte_mask;
4687 for (i = 0; i < 16; i++) {
4688 pteval_t v = pgprot_val(protection_map[i]);
4689 protection_map[i] = __pgprot(v | user_pgprot);
4690 @@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4692 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4693 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4694 - mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4695 - mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4696 + mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4697 + mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4698 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4699 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4700 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4701 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4702 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4703 - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4704 - mem_types[MT_ROM].prot_sect |= cp->pmd;
4705 + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4706 + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4707 + mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4711 - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4712 + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4716 - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4717 + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4720 pr_info("Memory policy: %sData cache %s\n",
4721 @@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4725 - if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4726 + if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4727 md->virtual >= PAGE_OFFSET &&
4728 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4729 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4730 @@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
4731 * called function. This means you can't use any function or debugging
4732 * method which may touch any device, otherwise the kernel _will_ crash.
4735 +static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4737 static void __init devicemaps_init(const struct machine_desc *mdesc)
4739 struct map_desc map;
4744 - * Allocate the vector page early.
4746 - vectors = early_alloc(PAGE_SIZE * 2);
4748 - early_trap_init(vectors);
4749 + early_trap_init(&vectors);
4751 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4752 pmd_clear(pmd_off_k(addr));
4753 @@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4754 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4755 map.virtual = MODULES_VADDR;
4756 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4757 - map.type = MT_ROM;
4758 + map.type = MT_ROM_RX;
4759 create_mapping(&map);
4762 @@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4763 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4764 map.virtual = FLUSH_BASE;
4766 - map.type = MT_CACHECLEAN;
4767 + map.type = MT_CACHECLEAN_RO;
4768 create_mapping(&map);
4770 #ifdef FLUSH_BASE_MINICACHE
4771 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4772 map.virtual = FLUSH_BASE_MINICACHE;
4774 - map.type = MT_MINICLEAN;
4775 + map.type = MT_MINICLEAN_RO;
4776 create_mapping(&map);
4779 @@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4780 * location (0xffff0000). If we aren't using high-vectors, also
4781 * create a mapping at the low-vectors virtual address.
4783 - map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4784 + map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4785 map.virtual = 0xffff0000;
4786 map.length = PAGE_SIZE;
4787 #ifdef CONFIG_KUSER_HELPERS
4788 @@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
4789 static void __init map_lowmem(void)
4791 struct memblock_region *reg;
4792 +#ifndef CONFIG_PAX_KERNEXEC
4793 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4794 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4797 /* Map all the lowmem memory banks. */
4798 for_each_memblock(memory, reg) {
4799 @@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
4803 +#ifdef CONFIG_PAX_KERNEXEC
4804 + map.pfn = __phys_to_pfn(start);
4805 + map.virtual = __phys_to_virt(start);
4806 + map.length = end - start;
4808 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4809 + struct map_desc kernel;
4810 + struct map_desc initmap;
4812 + /* when freeing initmem we will make this RW */
4813 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4814 + initmap.virtual = (unsigned long)__init_begin;
4815 + initmap.length = _sdata - __init_begin;
4816 + initmap.type = __MT_MEMORY_RWX;
4817 + create_mapping(&initmap);
4819 + /* when freeing initmem we will make this RX */
4820 + kernel.pfn = __phys_to_pfn(__pa(_stext));
4821 + kernel.virtual = (unsigned long)_stext;
4822 + kernel.length = __init_begin - _stext;
4823 + kernel.type = __MT_MEMORY_RWX;
4824 + create_mapping(&kernel);
4826 + if (map.virtual < (unsigned long)_stext) {
4827 + map.length = (unsigned long)_stext - map.virtual;
4828 + map.type = __MT_MEMORY_RWX;
4829 + create_mapping(&map);
4832 + map.pfn = __phys_to_pfn(__pa(_sdata));
4833 + map.virtual = (unsigned long)_sdata;
4834 + map.length = end - __pa(_sdata);
4837 + map.type = MT_MEMORY_RW;
4838 + create_mapping(&map);
4840 if (end < kernel_x_start) {
4841 map.pfn = __phys_to_pfn(start);
4842 map.virtual = __phys_to_virt(start);
4843 map.length = end - start;
4844 - map.type = MT_MEMORY_RWX;
4845 + map.type = __MT_MEMORY_RWX;
4847 create_mapping(&map);
4848 } else if (start >= kernel_x_end) {
4849 @@ -1377,7 +1470,7 @@ static void __init map_lowmem(void)
4850 map.pfn = __phys_to_pfn(kernel_x_start);
4851 map.virtual = __phys_to_virt(kernel_x_start);
4852 map.length = kernel_x_end - kernel_x_start;
4853 - map.type = MT_MEMORY_RWX;
4854 + map.type = __MT_MEMORY_RWX;
4856 create_mapping(&map);
4858 @@ -1390,6 +1483,7 @@ static void __init map_lowmem(void)
4859 create_mapping(&map);
4866 diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4867 index c011e22..92a0260 100644
4868 --- a/arch/arm/net/bpf_jit_32.c
4869 +++ b/arch/arm/net/bpf_jit_32.c
4871 #include <asm/cacheflush.h>
4872 #include <asm/hwcap.h>
4873 #include <asm/opcodes.h>
4874 +#include <asm/pgtable.h>
4876 #include "bpf_jit_32.h"
4878 @@ -72,54 +73,38 @@ struct jit_ctx {
4882 +#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4883 +int bpf_jit_enable __read_only;
4885 int bpf_jit_enable __read_mostly;
4888 -static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
4889 - unsigned int size)
4891 - void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
4895 - memcpy(ret, ptr, size);
4899 -static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
4900 +static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4906 - err = call_neg_helper(skb, offset, &ret, 1);
4908 - err = skb_copy_bits(skb, offset, &ret, 1);
4909 + err = skb_copy_bits(skb, offset, &ret, 1);
4911 return (u64)err << 32 | ret;
4914 -static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
4915 +static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
4921 - err = call_neg_helper(skb, offset, &ret, 2);
4923 - err = skb_copy_bits(skb, offset, &ret, 2);
4924 + err = skb_copy_bits(skb, offset, &ret, 2);
4926 return (u64)err << 32 | ntohs(ret);
4929 -static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
4930 +static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
4936 - err = call_neg_helper(skb, offset, &ret, 4);
4938 - err = skb_copy_bits(skb, offset, &ret, 4);
4939 + err = skb_copy_bits(skb, offset, &ret, 4);
4941 return (u64)err << 32 | ntohl(ret);
4943 @@ -199,8 +184,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4946 /* We are guaranteed to have aligned memory. */
4947 + pax_open_kernel();
4948 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4949 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4950 + pax_close_kernel();
4953 static void build_prologue(struct jit_ctx *ctx)
4954 @@ -556,6 +543,9 @@ static int build_body(struct jit_ctx *ctx)
4955 case BPF_LD | BPF_B | BPF_ABS:
4958 + /* the interpreter will deal with the negative K */
4961 emit_mov_i(r_off, k, ctx);
4963 ctx->seen |= SEEN_DATA | SEEN_CALL;
4964 @@ -570,18 +560,6 @@ load_common:
4965 condt = ARM_COND_HI;
4969 - * test for negative offset, only if we are
4970 - * currently scheduled to take the fast
4971 - * path. this will update the flags so that
4972 - * the slowpath instruction are ignored if the
4973 - * offset is negative.
4975 - * for loard_order == 0 the HI condition will
4976 - * make loads at offset 0 take the slow path too.
4978 - _emit(condt, ARM_CMP_I(r_off, 0), ctx);
4980 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
4983 diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4984 index 5b217f4..c23f40e 100644
4985 --- a/arch/arm/plat-iop/setup.c
4986 +++ b/arch/arm/plat-iop/setup.c
4987 @@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4988 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4989 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4990 .length = IOP3XX_PERIPHERAL_SIZE,
4991 - .type = MT_UNCACHED,
4992 + .type = MT_UNCACHED_RW,
4996 diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4997 index a5bc92d..0bb4730 100644
4998 --- a/arch/arm/plat-omap/sram.c
4999 +++ b/arch/arm/plat-omap/sram.c
5000 @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
5001 * Looks like we need to preserve some bootloader code at the
5002 * beginning of SRAM for jumping to flash for reboot to work...
5004 + pax_open_kernel();
5005 memset_io(omap_sram_base + omap_sram_skip, 0,
5006 omap_sram_size - omap_sram_skip);
5007 + pax_close_kernel();
5009 diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
5010 index d6285ef..b684dac 100644
5011 --- a/arch/arm64/Kconfig.debug
5012 +++ b/arch/arm64/Kconfig.debug
5013 @@ -10,6 +10,7 @@ config ARM64_PTDUMP
5014 bool "Export kernel pagetable layout to userspace via debugfs"
5015 depends on DEBUG_KERNEL
5017 + depends on !GRKERNSEC_KMEM
5019 Say Y here if you want to show the kernel pagetable layout in a
5020 debugfs file. This information is only useful for kernel developers
5021 diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
5022 index 7047051..44e8675 100644
5023 --- a/arch/arm64/include/asm/atomic.h
5024 +++ b/arch/arm64/include/asm/atomic.h
5025 @@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
5026 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
5027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
5029 +#define atomic64_read_unchecked(v) atomic64_read(v)
5030 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5031 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5032 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5033 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5034 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5035 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5036 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5037 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5041 diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
5042 index 0fa47c4..b167938 100644
5043 --- a/arch/arm64/include/asm/barrier.h
5044 +++ b/arch/arm64/include/asm/barrier.h
5047 compiletime_assert_atomic_type(*p); \
5049 - ACCESS_ONCE(*p) = (v); \
5050 + ACCESS_ONCE_RW(*p) = (v); \
5053 #define smp_load_acquire(p) \
5054 diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
5055 index 4fde8c1..441f84f 100644
5056 --- a/arch/arm64/include/asm/percpu.h
5057 +++ b/arch/arm64/include/asm/percpu.h
5058 @@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
5062 - ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
5063 + ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
5066 - ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
5067 + ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
5070 - ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
5071 + ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
5074 - ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
5075 + ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
5079 diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
5080 index 7642056..bffc904 100644
5081 --- a/arch/arm64/include/asm/pgalloc.h
5082 +++ b/arch/arm64/include/asm/pgalloc.h
5083 @@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5084 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
5087 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5089 + pud_populate(mm, pud, pmd);
5092 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
5094 #if CONFIG_PGTABLE_LEVELS > 3
5095 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
5096 index 07e1ba44..ec8cbbb 100644
5097 --- a/arch/arm64/include/asm/uaccess.h
5098 +++ b/arch/arm64/include/asm/uaccess.h
5099 @@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
5103 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5104 #define access_ok(type, addr, size) __range_ok(addr, size)
5105 #define user_addr_max get_fs
5107 diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
5108 index d16a1ce..a5acc60 100644
5109 --- a/arch/arm64/mm/dma-mapping.c
5110 +++ b/arch/arm64/mm/dma-mapping.c
5111 @@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
5112 phys_to_page(paddr),
5113 size >> PAGE_SHIFT);
5115 - swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5116 + swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5119 static void *__dma_alloc(struct device *dev, size_t size,
5120 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
5121 index c3a58a1..78fbf54 100644
5122 --- a/arch/avr32/include/asm/cache.h
5123 +++ b/arch/avr32/include/asm/cache.h
5125 #ifndef __ASM_AVR32_CACHE_H
5126 #define __ASM_AVR32_CACHE_H
5128 +#include <linux/const.h>
5130 #define L1_CACHE_SHIFT 5
5131 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5132 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5135 * Memory returned by kmalloc() may be used for DMA, so we must make
5136 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
5137 index 0388ece..87c8df1 100644
5138 --- a/arch/avr32/include/asm/elf.h
5139 +++ b/arch/avr32/include/asm/elf.h
5140 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
5141 the loader. We need to make sure that it is out of the way of the program
5142 that it will "exec", and that there is sufficient room for the brk. */
5144 -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5145 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5147 +#ifdef CONFIG_PAX_ASLR
5148 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
5150 +#define PAX_DELTA_MMAP_LEN 15
5151 +#define PAX_DELTA_STACK_LEN 15
5154 /* This yields a mask that user programs can use to figure out what
5155 instruction set this CPU supports. This could be done in user space,
5156 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
5157 index 479330b..53717a8 100644
5158 --- a/arch/avr32/include/asm/kmap_types.h
5159 +++ b/arch/avr32/include/asm/kmap_types.h
5161 #define __ASM_AVR32_KMAP_TYPES_H
5163 #ifdef CONFIG_DEBUG_HIGHMEM
5164 -# define KM_TYPE_NR 29
5165 +# define KM_TYPE_NR 30
5167 -# define KM_TYPE_NR 14
5168 +# define KM_TYPE_NR 15
5171 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5172 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5173 index c035339..e1fa594 100644
5174 --- a/arch/avr32/mm/fault.c
5175 +++ b/arch/avr32/mm/fault.c
5176 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5178 int exception_trace = 1;
5180 +#ifdef CONFIG_PAX_PAGEEXEC
5181 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5185 + printk(KERN_ERR "PAX: bytes at PC: ");
5186 + for (i = 0; i < 20; i++) {
5188 + if (get_user(c, (unsigned char *)pc+i))
5189 + printk(KERN_CONT "???????? ");
5191 + printk(KERN_CONT "%02x ", c);
5198 * This routine handles page faults. It determines the address and the
5199 * problem, and then passes it off to one of the appropriate routines.
5200 @@ -178,6 +195,16 @@ bad_area:
5201 up_read(&mm->mmap_sem);
5203 if (user_mode(regs)) {
5205 +#ifdef CONFIG_PAX_PAGEEXEC
5206 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5207 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5208 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5209 + do_group_exit(SIGKILL);
5214 if (exception_trace && printk_ratelimit())
5215 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5216 "sp %08lx ecr %lu\n",
5217 diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
5218 index f3337ee..15b6f8d 100644
5219 --- a/arch/blackfin/Kconfig.debug
5220 +++ b/arch/blackfin/Kconfig.debug
5221 @@ -18,6 +18,7 @@ config DEBUG_VERBOSE
5223 tristate "Generate Blackfin MMR tree"
5225 + depends on !GRKERNSEC_KMEM
5227 Create a tree of Blackfin MMRs via the debugfs tree. If
5228 you enable this, you will find all MMRs laid out in the
5229 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5230 index 568885a..f8008df 100644
5231 --- a/arch/blackfin/include/asm/cache.h
5232 +++ b/arch/blackfin/include/asm/cache.h
5234 #ifndef __ARCH_BLACKFIN_CACHE_H
5235 #define __ARCH_BLACKFIN_CACHE_H
5237 +#include <linux/const.h>
5238 #include <linux/linkage.h> /* for asmlinkage */
5242 * Blackfin loads 32 bytes for cache
5244 #define L1_CACHE_SHIFT 5
5245 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5246 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5247 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5249 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5250 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5251 index aea2718..3639a60 100644
5252 --- a/arch/cris/include/arch-v10/arch/cache.h
5253 +++ b/arch/cris/include/arch-v10/arch/cache.h
5255 #ifndef _ASM_ARCH_CACHE_H
5256 #define _ASM_ARCH_CACHE_H
5258 +#include <linux/const.h>
5259 /* Etrax 100LX have 32-byte cache-lines. */
5260 -#define L1_CACHE_BYTES 32
5261 #define L1_CACHE_SHIFT 5
5262 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5264 #endif /* _ASM_ARCH_CACHE_H */
5265 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5266 index 7caf25d..ee65ac5 100644
5267 --- a/arch/cris/include/arch-v32/arch/cache.h
5268 +++ b/arch/cris/include/arch-v32/arch/cache.h
5270 #ifndef _ASM_CRIS_ARCH_CACHE_H
5271 #define _ASM_CRIS_ARCH_CACHE_H
5273 +#include <linux/const.h>
5274 #include <arch/hwregs/dma.h>
5276 /* A cache-line is 32 bytes. */
5277 -#define L1_CACHE_BYTES 32
5278 #define L1_CACHE_SHIFT 5
5279 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5281 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5283 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5284 index 102190a..5334cea 100644
5285 --- a/arch/frv/include/asm/atomic.h
5286 +++ b/arch/frv/include/asm/atomic.h
5287 @@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5288 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5289 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5291 +#define atomic64_read_unchecked(v) atomic64_read(v)
5292 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5293 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5294 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5295 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5296 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5297 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5298 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5299 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5301 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5304 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5305 index 2797163..c2a401df9 100644
5306 --- a/arch/frv/include/asm/cache.h
5307 +++ b/arch/frv/include/asm/cache.h
5309 #ifndef __ASM_CACHE_H
5310 #define __ASM_CACHE_H
5312 +#include <linux/const.h>
5314 /* bytes per L1 cache line */
5315 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5316 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5317 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5319 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5320 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5321 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5322 index 43901f2..0d8b865 100644
5323 --- a/arch/frv/include/asm/kmap_types.h
5324 +++ b/arch/frv/include/asm/kmap_types.h
5326 #ifndef _ASM_KMAP_TYPES_H
5327 #define _ASM_KMAP_TYPES_H
5329 -#define KM_TYPE_NR 17
5330 +#define KM_TYPE_NR 18
5333 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5334 index 836f147..4cf23f5 100644
5335 --- a/arch/frv/mm/elf-fdpic.c
5336 +++ b/arch/frv/mm/elf-fdpic.c
5337 @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5339 struct vm_area_struct *vma;
5340 struct vm_unmapped_area_info info;
5341 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5343 if (len > TASK_SIZE)
5345 @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5347 addr = PAGE_ALIGN(addr);
5348 vma = find_vma(current->mm, addr);
5349 - if (TASK_SIZE - len >= addr &&
5350 - (!vma || addr + len <= vma->vm_start))
5351 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5355 @@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5356 info.high_limit = (current->mm->start_stack - 0x00200000);
5357 info.align_mask = 0;
5358 info.align_offset = 0;
5359 + info.threadstack_offset = offset;
5360 addr = vm_unmapped_area(&info);
5361 if (!(addr & ~PAGE_MASK))
5363 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5364 index 69952c18..4fa2908 100644
5365 --- a/arch/hexagon/include/asm/cache.h
5366 +++ b/arch/hexagon/include/asm/cache.h
5368 #ifndef __ASM_CACHE_H
5369 #define __ASM_CACHE_H
5371 +#include <linux/const.h>
5373 /* Bytes per L1 cache line */
5374 -#define L1_CACHE_SHIFT (5)
5375 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5376 +#define L1_CACHE_SHIFT 5
5377 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5379 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5381 diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5382 index 42a91a7..29d446e 100644
5383 --- a/arch/ia64/Kconfig
5384 +++ b/arch/ia64/Kconfig
5385 @@ -518,6 +518,7 @@ source "drivers/sn/Kconfig"
5387 bool "kexec system call"
5388 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5389 + depends on !GRKERNSEC_KMEM
5391 kexec is a system call that implements the ability to shutdown your
5392 current kernel, and to start another kernel. It is like a reboot
5393 diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5394 index 970d0bd..e750b9b 100644
5395 --- a/arch/ia64/Makefile
5396 +++ b/arch/ia64/Makefile
5397 @@ -98,5 +98,6 @@ endef
5398 archprepare: make_nr_irqs_h FORCE
5399 PHONY += make_nr_irqs_h FORCE
5401 +make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5402 make_nr_irqs_h: FORCE
5403 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5404 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5405 index 0bf0350..2ad1957 100644
5406 --- a/arch/ia64/include/asm/atomic.h
5407 +++ b/arch/ia64/include/asm/atomic.h
5408 @@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5409 #define atomic64_inc(v) atomic64_add(1, (v))
5410 #define atomic64_dec(v) atomic64_sub(1, (v))
5412 +#define atomic64_read_unchecked(v) atomic64_read(v)
5413 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5414 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5415 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5416 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5417 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5418 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5419 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5420 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5422 #endif /* _ASM_IA64_ATOMIC_H */
5423 diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5424 index 843ba43..fa118fb 100644
5425 --- a/arch/ia64/include/asm/barrier.h
5426 +++ b/arch/ia64/include/asm/barrier.h
5429 compiletime_assert_atomic_type(*p); \
5431 - ACCESS_ONCE(*p) = (v); \
5432 + ACCESS_ONCE_RW(*p) = (v); \
5435 #define smp_load_acquire(p) \
5436 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5437 index 988254a..e1ee885 100644
5438 --- a/arch/ia64/include/asm/cache.h
5439 +++ b/arch/ia64/include/asm/cache.h
5441 #ifndef _ASM_IA64_CACHE_H
5442 #define _ASM_IA64_CACHE_H
5444 +#include <linux/const.h>
5447 * Copyright (C) 1998-2000 Hewlett-Packard Co
5450 /* Bytes per L1 (data) cache line. */
5451 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5452 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5453 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5456 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5457 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5458 index 5a83c5c..4d7f553 100644
5459 --- a/arch/ia64/include/asm/elf.h
5460 +++ b/arch/ia64/include/asm/elf.h
5463 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5465 +#ifdef CONFIG_PAX_ASLR
5466 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5468 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5469 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5472 #define PT_IA_64_UNWIND 0x70000001
5474 /* IA-64 relocations: */
5475 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5476 index f5e70e9..624fad5 100644
5477 --- a/arch/ia64/include/asm/pgalloc.h
5478 +++ b/arch/ia64/include/asm/pgalloc.h
5479 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5480 pgd_val(*pgd_entry) = __pa(pud);
5484 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5486 + pgd_populate(mm, pgd_entry, pud);
5489 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5491 return quicklist_alloc(0, GFP_KERNEL, NULL);
5492 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5493 pud_val(*pud_entry) = __pa(pmd);
5497 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5499 + pud_populate(mm, pud_entry, pmd);
5502 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5504 return quicklist_alloc(0, GFP_KERNEL, NULL);
5505 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5506 index 9f3ed9e..c99b418 100644
5507 --- a/arch/ia64/include/asm/pgtable.h
5508 +++ b/arch/ia64/include/asm/pgtable.h
5510 * David Mosberger-Tang <davidm@hpl.hp.com>
5514 +#include <linux/const.h>
5515 #include <asm/mman.h>
5516 #include <asm/page.h>
5517 #include <asm/processor.h>
5518 @@ -139,6 +139,17 @@
5519 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5520 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5521 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5523 +#ifdef CONFIG_PAX_PAGEEXEC
5524 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5525 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5526 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5528 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5529 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5530 +# define PAGE_COPY_NOEXEC PAGE_COPY
5533 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5534 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5535 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5536 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5537 index 45698cd..e8e2dbc 100644
5538 --- a/arch/ia64/include/asm/spinlock.h
5539 +++ b/arch/ia64/include/asm/spinlock.h
5540 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5541 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5543 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5544 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5545 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5548 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5549 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5550 index 4f3fb6cc..254055e 100644
5551 --- a/arch/ia64/include/asm/uaccess.h
5552 +++ b/arch/ia64/include/asm/uaccess.h
5554 && ((segment).seg == KERNEL_DS.seg \
5555 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5557 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5558 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5561 @@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5562 static inline unsigned long
5563 __copy_to_user (void __user *to, const void *from, unsigned long count)
5565 + if (count > INT_MAX)
5568 + if (!__builtin_constant_p(count))
5569 + check_object_size(from, count, true);
5571 return __copy_user(to, (__force void __user *) from, count);
5574 static inline unsigned long
5575 __copy_from_user (void *to, const void __user *from, unsigned long count)
5577 + if (count > INT_MAX)
5580 + if (!__builtin_constant_p(count))
5581 + check_object_size(to, count, false);
5583 return __copy_user((__force void __user *) to, from, count);
5586 @@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5588 void __user *__cu_to = (to); \
5589 const void *__cu_from = (from); \
5590 - long __cu_len = (n); \
5591 + unsigned long __cu_len = (n); \
5593 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
5594 + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5595 + if (!__builtin_constant_p(n)) \
5596 + check_object_size(__cu_from, __cu_len, true); \
5597 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5602 @@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5604 void *__cu_to = (to); \
5605 const void __user *__cu_from = (from); \
5606 - long __cu_len = (n); \
5607 + unsigned long __cu_len = (n); \
5609 __chk_user_ptr(__cu_from); \
5610 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
5611 + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5612 + if (!__builtin_constant_p(n)) \
5613 + check_object_size(__cu_to, __cu_len, false); \
5614 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5619 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5620 index b15933c..098b1c8 100644
5621 --- a/arch/ia64/kernel/module.c
5622 +++ b/arch/ia64/kernel/module.c
5623 @@ -484,15 +484,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5627 +in_init_rx (const struct module *mod, uint64_t addr)
5629 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5633 +in_init_rw (const struct module *mod, uint64_t addr)
5635 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5639 in_init (const struct module *mod, uint64_t addr)
5641 - return addr - (uint64_t) mod->module_init < mod->init_size;
5642 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5646 +in_core_rx (const struct module *mod, uint64_t addr)
5648 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5652 +in_core_rw (const struct module *mod, uint64_t addr)
5654 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5658 in_core (const struct module *mod, uint64_t addr)
5660 - return addr - (uint64_t) mod->module_core < mod->core_size;
5661 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5665 @@ -675,7 +699,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5669 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5670 + if (in_init_rx(mod, val))
5671 + val -= (uint64_t) mod->module_init_rx;
5672 + else if (in_init_rw(mod, val))
5673 + val -= (uint64_t) mod->module_init_rw;
5674 + else if (in_core_rx(mod, val))
5675 + val -= (uint64_t) mod->module_core_rx;
5676 + else if (in_core_rw(mod, val))
5677 + val -= (uint64_t) mod->module_core_rw;
5681 @@ -810,15 +841,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5682 * addresses have been selected...
5685 - if (mod->core_size > MAX_LTOFF)
5686 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5688 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5689 * at the end of the module.
5691 - gp = mod->core_size - MAX_LTOFF / 2;
5692 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5694 - gp = mod->core_size / 2;
5695 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5696 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5697 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5699 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5701 diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5702 index c39c3cd..3c77738 100644
5703 --- a/arch/ia64/kernel/palinfo.c
5704 +++ b/arch/ia64/kernel/palinfo.c
5705 @@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5709 -static struct notifier_block __refdata palinfo_cpu_notifier =
5710 +static struct notifier_block palinfo_cpu_notifier =
5712 .notifier_call = palinfo_cpu_callback,
5714 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5715 index 41e33f8..65180b2a 100644
5716 --- a/arch/ia64/kernel/sys_ia64.c
5717 +++ b/arch/ia64/kernel/sys_ia64.c
5718 @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5719 unsigned long align_mask = 0;
5720 struct mm_struct *mm = current->mm;
5721 struct vm_unmapped_area_info info;
5722 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5724 if (len > RGN_MAP_LIMIT)
5726 @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5727 if (REGION_NUMBER(addr) == RGN_HPAGE)
5731 +#ifdef CONFIG_PAX_RANDMMAP
5732 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5733 + addr = mm->free_area_cache;
5738 addr = TASK_UNMAPPED_BASE;
5740 @@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5741 info.high_limit = TASK_SIZE;
5742 info.align_mask = align_mask;
5743 info.align_offset = 0;
5744 + info.threadstack_offset = offset;
5745 return vm_unmapped_area(&info);
5748 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5749 index dc506b0..39baade 100644
5750 --- a/arch/ia64/kernel/vmlinux.lds.S
5751 +++ b/arch/ia64/kernel/vmlinux.lds.S
5752 @@ -171,7 +171,7 @@ SECTIONS {
5754 . = ALIGN(PERCPU_PAGE_SIZE);
5755 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5756 - __phys_per_cpu_start = __per_cpu_load;
5757 + __phys_per_cpu_start = per_cpu_load;
5759 * ensure percpu data fits
5760 * into percpu page size
5761 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5762 index 70b40d1..01a9a28 100644
5763 --- a/arch/ia64/mm/fault.c
5764 +++ b/arch/ia64/mm/fault.c
5765 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5766 return pte_present(pte);
5769 +#ifdef CONFIG_PAX_PAGEEXEC
5770 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5774 + printk(KERN_ERR "PAX: bytes at PC: ");
5775 + for (i = 0; i < 8; i++) {
5777 + if (get_user(c, (unsigned int *)pc+i))
5778 + printk(KERN_CONT "???????? ");
5780 + printk(KERN_CONT "%08x ", c);
5786 # define VM_READ_BIT 0
5787 # define VM_WRITE_BIT 1
5788 # define VM_EXEC_BIT 2
5789 @@ -151,8 +168,21 @@ retry:
5790 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5793 - if ((vma->vm_flags & mask) != mask)
5794 + if ((vma->vm_flags & mask) != mask) {
5796 +#ifdef CONFIG_PAX_PAGEEXEC
5797 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5798 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5801 + up_read(&mm->mmap_sem);
5802 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5803 + do_group_exit(SIGKILL);
5811 * If for any reason at all we couldn't handle the fault, make
5812 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5813 index f50d4b3..c7975ee 100644
5814 --- a/arch/ia64/mm/hugetlbpage.c
5815 +++ b/arch/ia64/mm/hugetlbpage.c
5816 @@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5817 unsigned long pgoff, unsigned long flags)
5819 struct vm_unmapped_area_info info;
5820 + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5822 if (len > RGN_MAP_LIMIT)
5824 @@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5825 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5826 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5827 info.align_offset = 0;
5828 + info.threadstack_offset = offset;
5829 return vm_unmapped_area(&info);
5832 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5833 index 97e48b0..fc59c36 100644
5834 --- a/arch/ia64/mm/init.c
5835 +++ b/arch/ia64/mm/init.c
5836 @@ -119,6 +119,19 @@ ia64_init_addr_space (void)
5837 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5838 vma->vm_end = vma->vm_start + PAGE_SIZE;
5839 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5841 +#ifdef CONFIG_PAX_PAGEEXEC
5842 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5843 + vma->vm_flags &= ~VM_EXEC;
5845 +#ifdef CONFIG_PAX_MPROTECT
5846 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
5847 + vma->vm_flags &= ~VM_MAYEXEC;
5853 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5854 down_write(¤t->mm->mmap_sem);
5855 if (insert_vm_struct(current->mm, vma)) {
5856 @@ -279,7 +292,7 @@ static int __init gate_vma_init(void)
5857 gate_vma.vm_start = FIXADDR_USER_START;
5858 gate_vma.vm_end = FIXADDR_USER_END;
5859 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5860 - gate_vma.vm_page_prot = __P101;
5861 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5865 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5866 index 40b3ee98..8c2c112 100644
5867 --- a/arch/m32r/include/asm/cache.h
5868 +++ b/arch/m32r/include/asm/cache.h
5870 #ifndef _ASM_M32R_CACHE_H
5871 #define _ASM_M32R_CACHE_H
5873 +#include <linux/const.h>
5875 /* L1 cache line size */
5876 #define L1_CACHE_SHIFT 4
5877 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5878 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5880 #endif /* _ASM_M32R_CACHE_H */
5881 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5882 index 82abd15..d95ae5d 100644
5883 --- a/arch/m32r/lib/usercopy.c
5884 +++ b/arch/m32r/lib/usercopy.c
5887 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5893 if (access_ok(VERIFY_WRITE, to, n))
5894 __copy_user(to,from,n);
5895 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5897 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5903 if (access_ok(VERIFY_READ, from, n))
5904 __copy_user_zeroing(to,from,n);
5905 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5906 index 0395c51..5f26031 100644
5907 --- a/arch/m68k/include/asm/cache.h
5908 +++ b/arch/m68k/include/asm/cache.h
5910 #ifndef __ARCH_M68K_CACHE_H
5911 #define __ARCH_M68K_CACHE_H
5913 +#include <linux/const.h>
5915 /* bytes per L1 cache line */
5916 #define L1_CACHE_SHIFT 4
5917 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5918 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5920 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5922 diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5923 index 5a696e5..070490d 100644
5924 --- a/arch/metag/include/asm/barrier.h
5925 +++ b/arch/metag/include/asm/barrier.h
5926 @@ -90,7 +90,7 @@ static inline void fence(void)
5928 compiletime_assert_atomic_type(*p); \
5930 - ACCESS_ONCE(*p) = (v); \
5931 + ACCESS_ONCE_RW(*p) = (v); \
5934 #define smp_load_acquire(p) \
5935 diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5936 index 53f0f6c..2dc07fd 100644
5937 --- a/arch/metag/mm/hugetlbpage.c
5938 +++ b/arch/metag/mm/hugetlbpage.c
5939 @@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5940 info.high_limit = TASK_SIZE;
5941 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5942 info.align_offset = 0;
5943 + info.threadstack_offset = 0;
5944 return vm_unmapped_area(&info);
5947 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5948 index 4efe96a..60e8699 100644
5949 --- a/arch/microblaze/include/asm/cache.h
5950 +++ b/arch/microblaze/include/asm/cache.h
5952 #ifndef _ASM_MICROBLAZE_CACHE_H
5953 #define _ASM_MICROBLAZE_CACHE_H
5955 +#include <linux/const.h>
5956 #include <asm/registers.h>
5958 #define L1_CACHE_SHIFT 5
5959 /* word-granular cache in microblaze */
5960 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5961 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5963 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5965 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5966 index 199a835..822b487 100644
5967 --- a/arch/mips/Kconfig
5968 +++ b/arch/mips/Kconfig
5969 @@ -2591,6 +2591,7 @@ source "kernel/Kconfig.preempt"
5972 bool "Kexec system call"
5973 + depends on !GRKERNSEC_KMEM
5975 kexec is a system call that implements the ability to shutdown your
5976 current kernel, and to start another kernel. It is like a reboot
5977 diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5978 index d8960d4..77dbd31 100644
5979 --- a/arch/mips/cavium-octeon/dma-octeon.c
5980 +++ b/arch/mips/cavium-octeon/dma-octeon.c
5981 @@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5982 if (dma_release_from_coherent(dev, order, vaddr))
5985 - swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5986 + swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5989 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5990 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5991 index 26d4363..3c9a82e 100644
5992 --- a/arch/mips/include/asm/atomic.h
5993 +++ b/arch/mips/include/asm/atomic.h
5995 #include <asm/cmpxchg.h>
5996 #include <asm/war.h>
5998 +#ifdef CONFIG_GENERIC_ATOMIC64
5999 +#include <asm-generic/atomic64.h>
6002 #define ATOMIC_INIT(i) { (i) }
6004 +#ifdef CONFIG_64BIT
6005 +#define _ASM_EXTABLE(from, to) \
6006 +" .section __ex_table,\"a\"\n" \
6007 +" .dword " #from ", " #to"\n" \
6010 +#define _ASM_EXTABLE(from, to) \
6011 +" .section __ex_table,\"a\"\n" \
6012 +" .word " #from ", " #to"\n" \
6017 * atomic_read - read atomic variable
6018 * @v: pointer of type atomic_t
6020 * Atomically reads the value of @v.
6022 -#define atomic_read(v) ACCESS_ONCE((v)->counter)
6023 +static inline int atomic_read(const atomic_t *v)
6025 + return ACCESS_ONCE(v->counter);
6028 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6030 + return ACCESS_ONCE(v->counter);
6034 * atomic_set - set atomic variable
6037 * Atomically sets the value of @v to @i.
6039 -#define atomic_set(v, i) ((v)->counter = (i))
6040 +static inline void atomic_set(atomic_t *v, int i)
6045 -#define ATOMIC_OP(op, c_op, asm_op) \
6046 -static __inline__ void atomic_##op(int i, atomic_t * v) \
6047 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6052 +#ifdef CONFIG_PAX_REFCOUNT
6053 +#define __OVERFLOW_POST \
6055 + " .set noreorder \n" \
6057 + " move %0, %1 \n" \
6058 + " .set reorder \n"
6059 +#define __OVERFLOW_EXTABLE \
6061 + _ASM_EXTABLE(2b, 3b)
6063 +#define __OVERFLOW_POST
6064 +#define __OVERFLOW_EXTABLE
6067 +#define __ATOMIC_OP(op, suffix, asm_op, extable) \
6068 +static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
6070 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6073 __asm__ __volatile__( \
6074 - " .set arch=r4000 \n" \
6075 - "1: ll %0, %1 # atomic_" #op " \n" \
6076 - " " #asm_op " %0, %2 \n" \
6077 + " .set mips3 \n" \
6078 + "1: ll %0, %1 # atomic_" #op #suffix "\n" \
6079 + "2: " #asm_op " %0, %2 \n" \
6081 " beqzl %0, 1b \n" \
6084 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6086 } else if (kernel_uses_llsc) { \
6090 - __asm__ __volatile__( \
6091 - " .set "MIPS_ISA_LEVEL" \n" \
6092 - " ll %0, %1 # atomic_" #op "\n" \
6093 - " " #asm_op " %0, %2 \n" \
6095 - " .set mips0 \n" \
6096 - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6098 - } while (unlikely(!temp)); \
6099 + __asm__ __volatile__( \
6100 + " .set "MIPS_ISA_LEVEL" \n" \
6101 + "1: ll %0, %1 # atomic_" #op #suffix "\n" \
6102 + "2: " #asm_op " %0, %2 \n" \
6104 + " beqz %0, 1b \n" \
6106 + " .set mips0 \n" \
6107 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6110 unsigned long flags; \
6112 raw_local_irq_save(flags); \
6113 - v->counter c_op i; \
6114 + __asm__ __volatile__( \
6115 + "2: " #asm_op " %0, %1 \n" \
6117 + : "+r" (v->counter) : "Ir" (i)); \
6118 raw_local_irq_restore(flags); \
6122 -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
6123 -static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6124 +#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
6125 + __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6127 +#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6128 +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
6132 @@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6135 __asm__ __volatile__( \
6136 - " .set arch=r4000 \n" \
6137 - "1: ll %1, %2 # atomic_" #op "_return \n" \
6138 - " " #asm_op " %0, %1, %3 \n" \
6139 + " .set mips3 \n" \
6140 + "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
6141 + "2: " #asm_op " %0, %1, %3 \n" \
6143 " beqzl %0, 1b \n" \
6144 - " " #asm_op " %0, %1, %3 \n" \
6147 + "4: " #asm_op " %0, %1, %3 \n" \
6150 : "=&r" (result), "=&r" (temp), \
6151 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6152 @@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6153 } else if (kernel_uses_llsc) { \
6157 - __asm__ __volatile__( \
6158 - " .set "MIPS_ISA_LEVEL" \n" \
6159 - " ll %1, %2 # atomic_" #op "_return \n" \
6160 - " " #asm_op " %0, %1, %3 \n" \
6162 - " .set mips0 \n" \
6163 - : "=&r" (result), "=&r" (temp), \
6164 - "+" GCC_OFF_SMALL_ASM() (v->counter) \
6166 - } while (unlikely(!result)); \
6167 + __asm__ __volatile__( \
6168 + " .set "MIPS_ISA_LEVEL" \n" \
6169 + "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
6170 + "2: " #asm_op " %0, %1, %3 \n" \
6174 + "4: " #asm_op " %0, %1, %3 \n" \
6176 + " .set mips0 \n" \
6177 + : "=&r" (result), "=&r" (temp), \
6178 + "+" GCC_OFF_SMALL_ASM() (v->counter) \
6181 result = temp; result c_op i; \
6183 unsigned long flags; \
6185 raw_local_irq_save(flags); \
6186 - result = v->counter; \
6188 - v->counter = result; \
6189 + __asm__ __volatile__( \
6191 + "2: " #asm_op " %0, %1, %2 \n" \
6195 + : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6197 raw_local_irq_restore(flags); \
6200 @@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6204 -#define ATOMIC_OPS(op, c_op, asm_op) \
6205 - ATOMIC_OP(op, c_op, asm_op) \
6206 - ATOMIC_OP_RETURN(op, c_op, asm_op)
6207 +#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6208 + __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6210 -ATOMIC_OPS(add, +=, addu)
6211 -ATOMIC_OPS(sub, -=, subu)
6212 +#define ATOMIC_OPS(op, asm_op) \
6213 + ATOMIC_OP(op, asm_op) \
6214 + ATOMIC_OP_RETURN(op, asm_op)
6216 +ATOMIC_OPS(add, add)
6217 +ATOMIC_OPS(sub, sub)
6220 #undef ATOMIC_OP_RETURN
6221 +#undef __ATOMIC_OP_RETURN
6226 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
6227 @@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
6228 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6229 * The function returns the old value of @v minus @i.
6231 -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6232 +static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6236 @@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6239 __asm__ __volatile__(
6240 - " .set arch=r4000 \n"
6241 + " .set "MIPS_ISA_LEVEL" \n"
6242 "1: ll %1, %2 # atomic_sub_if_positive\n"
6243 " subu %0, %1, %3 \n"
6245 @@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6249 -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6250 -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6251 +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6253 + return cmpxchg(&v->counter, old, new);
6256 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6259 + return cmpxchg(&(v->counter), old, new);
6262 +static inline int atomic_xchg(atomic_t *v, int new)
6264 + return xchg(&v->counter, new);
6267 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6269 + return xchg(&(v->counter), new);
6273 * __atomic_add_unless - add unless the number is a given value
6274 @@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6276 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6277 #define atomic_inc_return(v) atomic_add_return(1, (v))
6278 +static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6280 + return atomic_add_return_unchecked(1, v);
6284 * atomic_sub_and_test - subtract value from variable and test result
6285 @@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6288 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6289 +static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6291 + return atomic_add_return_unchecked(1, v) == 0;
6295 * atomic_dec_and_test - decrement by 1 and test
6296 @@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6297 * Atomically increments @v by 1.
6299 #define atomic_inc(v) atomic_add(1, (v))
6300 +static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6302 + atomic_add_unchecked(1, v);
6306 * atomic_dec - decrement and test
6307 @@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6308 * Atomically decrements @v by 1.
6310 #define atomic_dec(v) atomic_sub(1, (v))
6311 +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6313 + atomic_sub_unchecked(1, v);
6317 * atomic_add_negative - add and test if negative
6318 @@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6319 * @v: pointer of type atomic64_t
6322 -#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6323 +static inline long atomic64_read(const atomic64_t *v)
6325 + return ACCESS_ONCE(v->counter);
6328 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6330 + return ACCESS_ONCE(v->counter);
6334 * atomic64_set - set atomic variable
6335 * @v: pointer of type atomic64_t
6336 * @i: required value
6338 -#define atomic64_set(v, i) ((v)->counter = (i))
6339 +static inline void atomic64_set(atomic64_t *v, long i)
6344 -#define ATOMIC64_OP(op, c_op, asm_op) \
6345 -static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6346 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6351 +#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6352 +static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6354 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6357 __asm__ __volatile__( \
6358 - " .set arch=r4000 \n" \
6359 - "1: lld %0, %1 # atomic64_" #op " \n" \
6360 - " " #asm_op " %0, %2 \n" \
6361 + " .set "MIPS_ISA_LEVEL" \n" \
6362 + "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6363 + "2: " #asm_op " %0, %2 \n" \
6365 " beqzl %0, 1b \n" \
6368 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6370 } else if (kernel_uses_llsc) { \
6374 - __asm__ __volatile__( \
6375 - " .set "MIPS_ISA_LEVEL" \n" \
6376 - " lld %0, %1 # atomic64_" #op "\n" \
6377 - " " #asm_op " %0, %2 \n" \
6378 - " scd %0, %1 \n" \
6379 - " .set mips0 \n" \
6380 - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6382 - } while (unlikely(!temp)); \
6383 + __asm__ __volatile__( \
6384 + " .set "MIPS_ISA_LEVEL" \n" \
6385 + "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6386 + "2: " #asm_op " %0, %2 \n" \
6387 + " scd %0, %1 \n" \
6388 + " beqz %0, 1b \n" \
6390 + " .set mips0 \n" \
6391 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6394 unsigned long flags; \
6396 raw_local_irq_save(flags); \
6397 - v->counter c_op i; \
6398 + __asm__ __volatile__( \
6399 + "2: " #asm_op " %0, %1 \n" \
6401 + : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6402 raw_local_irq_restore(flags); \
6406 -#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6407 -static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6408 +#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6409 + __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6411 +#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6412 +static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6416 @@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6419 __asm__ __volatile__( \
6420 - " .set arch=r4000 \n" \
6421 + " .set mips3 \n" \
6422 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6423 - " " #asm_op " %0, %1, %3 \n" \
6424 + "2: " #asm_op " %0, %1, %3 \n" \
6426 " beqzl %0, 1b \n" \
6427 - " " #asm_op " %0, %1, %3 \n" \
6430 + "4: " #asm_op " %0, %1, %3 \n" \
6433 : "=&r" (result), "=&r" (temp), \
6434 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6435 @@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6436 } else if (kernel_uses_llsc) { \
6440 - __asm__ __volatile__( \
6441 - " .set "MIPS_ISA_LEVEL" \n" \
6442 - " lld %1, %2 # atomic64_" #op "_return\n" \
6443 - " " #asm_op " %0, %1, %3 \n" \
6444 - " scd %0, %2 \n" \
6445 - " .set mips0 \n" \
6446 - : "=&r" (result), "=&r" (temp), \
6447 - "=" GCC_OFF_SMALL_ASM() (v->counter) \
6448 - : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6450 - } while (unlikely(!result)); \
6451 + __asm__ __volatile__( \
6452 + " .set "MIPS_ISA_LEVEL" \n" \
6453 + "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6454 + "2: " #asm_op " %0, %1, %3 \n" \
6455 + " scd %0, %2 \n" \
6456 + " beqz %0, 1b \n" \
6459 + "4: " #asm_op " %0, %1, %3 \n" \
6461 + " .set mips0 \n" \
6462 + : "=&r" (result), "=&r" (temp), \
6463 + "=" GCC_OFF_SMALL_ASM() (v->counter) \
6464 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6467 result = temp; result c_op i; \
6469 unsigned long flags; \
6471 raw_local_irq_save(flags); \
6472 - result = v->counter; \
6474 - v->counter = result; \
6475 + __asm__ __volatile__( \
6477 + "2: " #asm_op " %0, %1, %2 \n" \
6481 + : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6483 raw_local_irq_restore(flags); \
6486 @@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6490 -#define ATOMIC64_OPS(op, c_op, asm_op) \
6491 - ATOMIC64_OP(op, c_op, asm_op) \
6492 - ATOMIC64_OP_RETURN(op, c_op, asm_op)
6493 +#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6494 + __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6496 -ATOMIC64_OPS(add, +=, daddu)
6497 -ATOMIC64_OPS(sub, -=, dsubu)
6498 +#define ATOMIC64_OPS(op, asm_op) \
6499 + ATOMIC64_OP(op, asm_op) \
6500 + ATOMIC64_OP_RETURN(op, asm_op)
6502 +ATOMIC64_OPS(add, dadd)
6503 +ATOMIC64_OPS(sub, dsub)
6506 #undef ATOMIC64_OP_RETURN
6507 +#undef __ATOMIC64_OP_RETURN
6509 +#undef __ATOMIC64_OP
6510 +#undef __OVERFLOW_EXTABLE
6511 +#undef __OVERFLOW_POST
6514 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6515 @@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6516 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6517 * The function returns the old value of @v minus @i.
6519 -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6520 +static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6524 @@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6527 __asm__ __volatile__(
6528 - " .set arch=r4000 \n"
6529 + " .set "MIPS_ISA_LEVEL" \n"
6530 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6531 " dsubu %0, %1, %3 \n"
6533 @@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6537 -#define atomic64_cmpxchg(v, o, n) \
6538 - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6539 -#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6540 +static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6542 + return cmpxchg(&v->counter, old, new);
6545 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6548 + return cmpxchg(&(v->counter), old, new);
6551 +static inline long atomic64_xchg(atomic64_t *v, long new)
6553 + return xchg(&v->counter, new);
6556 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6558 + return xchg(&(v->counter), new);
6562 * atomic64_add_unless - add unless the number is a given value
6563 @@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6565 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6566 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6567 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6570 * atomic64_sub_and_test - subtract value from variable and test result
6571 @@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6574 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6575 +#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6578 * atomic64_dec_and_test - decrement by 1 and test
6579 @@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6580 * Atomically increments @v by 1.
6582 #define atomic64_inc(v) atomic64_add(1, (v))
6583 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6586 * atomic64_dec - decrement and test
6587 @@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6588 * Atomically decrements @v by 1.
6590 #define atomic64_dec(v) atomic64_sub(1, (v))
6591 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6594 * atomic64_add_negative - add and test if negative
6595 diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6596 index 7ecba84..21774af 100644
6597 --- a/arch/mips/include/asm/barrier.h
6598 +++ b/arch/mips/include/asm/barrier.h
6601 compiletime_assert_atomic_type(*p); \
6603 - ACCESS_ONCE(*p) = (v); \
6604 + ACCESS_ONCE_RW(*p) = (v); \
6607 #define smp_load_acquire(p) \
6608 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6609 index b4db69f..8f3b093 100644
6610 --- a/arch/mips/include/asm/cache.h
6611 +++ b/arch/mips/include/asm/cache.h
6613 #ifndef _ASM_CACHE_H
6614 #define _ASM_CACHE_H
6616 +#include <linux/const.h>
6617 #include <kmalloc.h>
6619 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6620 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6621 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6623 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6624 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6625 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6626 index f19e890..a4f8177 100644
6627 --- a/arch/mips/include/asm/elf.h
6628 +++ b/arch/mips/include/asm/elf.h
6629 @@ -417,6 +417,13 @@ extern const char *__elf_platform;
6630 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6633 +#ifdef CONFIG_PAX_ASLR
6634 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6636 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6637 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6640 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6641 struct linux_binprm;
6642 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6643 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6644 index c1f6afa..38cc6e9 100644
6645 --- a/arch/mips/include/asm/exec.h
6646 +++ b/arch/mips/include/asm/exec.h
6651 -extern unsigned long arch_align_stack(unsigned long sp);
6652 +#define arch_align_stack(x) ((x) & ~0xfUL)
6654 #endif /* _ASM_EXEC_H */
6655 diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6656 index 9e8ef59..1139d6b 100644
6657 --- a/arch/mips/include/asm/hw_irq.h
6658 +++ b/arch/mips/include/asm/hw_irq.h
6661 #include <linux/atomic.h>
6663 -extern atomic_t irq_err_count;
6664 +extern atomic_unchecked_t irq_err_count;
6667 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6668 diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6669 index 8feaed6..1bd8a64 100644
6670 --- a/arch/mips/include/asm/local.h
6671 +++ b/arch/mips/include/asm/local.h
6672 @@ -13,15 +13,25 @@ typedef struct
6677 + atomic_long_unchecked_t a;
6678 +} local_unchecked_t;
6680 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6682 #define local_read(l) atomic_long_read(&(l)->a)
6683 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6684 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6685 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6687 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6688 +#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6689 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6690 +#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6691 #define local_inc(l) atomic_long_inc(&(l)->a)
6692 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6693 #define local_dec(l) atomic_long_dec(&(l)->a)
6694 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6697 * Same as above, but return the result value
6698 @@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6702 +static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6704 + unsigned long result;
6706 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6707 + unsigned long temp;
6709 + __asm__ __volatile__(
6711 + "1:" __LL "%1, %2 # local_add_return \n"
6712 + " addu %0, %1, %3 \n"
6714 + " beqzl %0, 1b \n"
6715 + " addu %0, %1, %3 \n"
6717 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6718 + : "Ir" (i), "m" (l->a.counter)
6720 + } else if (kernel_uses_llsc) {
6721 + unsigned long temp;
6723 + __asm__ __volatile__(
6725 + "1:" __LL "%1, %2 # local_add_return \n"
6726 + " addu %0, %1, %3 \n"
6729 + " addu %0, %1, %3 \n"
6731 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6732 + : "Ir" (i), "m" (l->a.counter)
6735 + unsigned long flags;
6737 + local_irq_save(flags);
6738 + result = l->a.counter;
6740 + l->a.counter = result;
6741 + local_irq_restore(flags);
6747 static __inline__ long local_sub_return(long i, local_t * l)
6749 unsigned long result;
6750 @@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6752 #define local_cmpxchg(l, o, n) \
6753 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6754 +#define local_cmpxchg_unchecked(l, o, n) \
6755 + ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6756 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6759 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6760 index 89dd7fe..a123c97 100644
6761 --- a/arch/mips/include/asm/page.h
6762 +++ b/arch/mips/include/asm/page.h
6763 @@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6764 #ifdef CONFIG_CPU_MIPS32
6765 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6766 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6767 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6768 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6770 typedef struct { unsigned long long pte; } pte_t;
6771 #define pte_val(x) ((x).pte)
6772 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6773 index b336037..5b874cc 100644
6774 --- a/arch/mips/include/asm/pgalloc.h
6775 +++ b/arch/mips/include/asm/pgalloc.h
6776 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6778 set_pud(pud, __pud((unsigned long)pmd));
6781 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6783 + pud_populate(mm, pud, pmd);
6788 diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6789 index ae85694..4cdbba8 100644
6790 --- a/arch/mips/include/asm/pgtable.h
6791 +++ b/arch/mips/include/asm/pgtable.h
6794 #include <asm/pgtable-bits.h>
6796 +#define ktla_ktva(addr) (addr)
6797 +#define ktva_ktla(addr) (addr)
6800 struct vm_area_struct;
6802 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6803 index 9c0014e..5101ef5 100644
6804 --- a/arch/mips/include/asm/thread_info.h
6805 +++ b/arch/mips/include/asm/thread_info.h
6806 @@ -100,6 +100,9 @@ static inline struct thread_info *current_thread_info(void)
6807 #define TIF_SECCOMP 4 /* secure computing */
6808 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6809 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6810 +/* li takes a 32bit immediate */
6811 +#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6813 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6814 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6815 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6816 @@ -135,14 +138,16 @@ static inline struct thread_info *current_thread_info(void)
6817 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6818 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6819 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6820 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6822 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6823 _TIF_SYSCALL_AUDIT | \
6824 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6825 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6826 + _TIF_GRSEC_SETXID)
6828 /* work to do in syscall_trace_leave() */
6829 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6830 - _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6831 + _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6833 /* work to do on interrupt/exception return */
6834 #define _TIF_WORK_MASK \
6835 @@ -150,7 +155,7 @@ static inline struct thread_info *current_thread_info(void)
6836 /* work to do on any return to u-space */
6837 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6838 _TIF_WORK_SYSCALL_EXIT | \
6839 - _TIF_SYSCALL_TRACEPOINT)
6840 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6843 * We stash processor id into a COP0 register to retrieve it fast
6844 diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6845 index 5305d69..1da2bf5 100644
6846 --- a/arch/mips/include/asm/uaccess.h
6847 +++ b/arch/mips/include/asm/uaccess.h
6848 @@ -146,6 +146,7 @@ static inline bool eva_kernel_access(void)
6852 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6853 #define access_ok(type, addr, size) \
6854 likely(__access_ok((addr), (size), __access_mask))
6856 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6857 index 1188e00..41cf144 100644
6858 --- a/arch/mips/kernel/binfmt_elfn32.c
6859 +++ b/arch/mips/kernel/binfmt_elfn32.c
6860 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6861 #undef ELF_ET_DYN_BASE
6862 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6864 +#ifdef CONFIG_PAX_ASLR
6865 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6867 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6868 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6871 #include <asm/processor.h>
6872 #include <linux/module.h>
6873 #include <linux/elfcore.h>
6874 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6875 index 9287678..f870e47 100644
6876 --- a/arch/mips/kernel/binfmt_elfo32.c
6877 +++ b/arch/mips/kernel/binfmt_elfo32.c
6878 @@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6879 #undef ELF_ET_DYN_BASE
6880 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6882 +#ifdef CONFIG_PAX_ASLR
6883 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6885 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6886 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6889 #include <asm/processor.h>
6891 #include <linux/module.h>
6892 diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6893 index 74f6752..f3d7a47 100644
6894 --- a/arch/mips/kernel/i8259.c
6895 +++ b/arch/mips/kernel/i8259.c
6896 @@ -205,7 +205,7 @@ spurious_8259A_irq:
6897 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6898 spurious_irq_mask |= irqmask;
6900 - atomic_inc(&irq_err_count);
6901 + atomic_inc_unchecked(&irq_err_count);
6903 * Theoretically we do not have to handle this IRQ,
6904 * but in Linux this does not cause problems and is
6905 diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6906 index 44a1f79..2bd6aa3 100644
6907 --- a/arch/mips/kernel/irq-gt641xx.c
6908 +++ b/arch/mips/kernel/irq-gt641xx.c
6909 @@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6913 - atomic_inc(&irq_err_count);
6914 + atomic_inc_unchecked(&irq_err_count);
6917 void __init gt641xx_irq_init(void)
6918 diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6919 index 8eb5af8..2baf465 100644
6920 --- a/arch/mips/kernel/irq.c
6921 +++ b/arch/mips/kernel/irq.c
6922 @@ -34,17 +34,17 @@ void ack_bad_irq(unsigned int irq)
6923 printk("unexpected IRQ # %d\n", irq);
6926 -atomic_t irq_err_count;
6927 +atomic_unchecked_t irq_err_count;
6929 int arch_show_interrupts(struct seq_file *p, int prec)
6931 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6932 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6936 asmlinkage void spurious_interrupt(void)
6938 - atomic_inc(&irq_err_count);
6939 + atomic_inc_unchecked(&irq_err_count);
6942 void __init init_IRQ(void)
6943 @@ -58,6 +58,8 @@ void __init init_IRQ(void)
6946 #ifdef CONFIG_DEBUG_STACKOVERFLOW
6948 +extern void gr_handle_kernel_exploit(void);
6949 static inline void check_stack_overflow(void)
6952 @@ -73,6 +75,7 @@ static inline void check_stack_overflow(void)
6953 printk("do_IRQ: stack overflow: %ld\n",
6954 sp - sizeof(struct thread_info));
6956 + gr_handle_kernel_exploit();
6960 diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6961 index 0614717..002fa43 100644
6962 --- a/arch/mips/kernel/pm-cps.c
6963 +++ b/arch/mips/kernel/pm-cps.c
6964 @@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6965 nc_core_ready_count = nc_addr;
6967 /* Ensure ready_count is zero-initialised before the assembly runs */
6968 - ACCESS_ONCE(*nc_core_ready_count) = 0;
6969 + ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6970 coupled_barrier(&per_cpu(pm_barrier, core), online);
6972 /* Run the generated entry code */
6973 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6974 index f2975d4..f61d355 100644
6975 --- a/arch/mips/kernel/process.c
6976 +++ b/arch/mips/kernel/process.c
6977 @@ -541,18 +541,6 @@ out:
6982 - * Don't forget that the stack pointer must be aligned on a 8 bytes
6983 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6985 -unsigned long arch_align_stack(unsigned long sp)
6987 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6988 - sp -= get_random_int() & ~PAGE_MASK;
6990 - return sp & ALMASK;
6993 static void arch_dump_stack(void *info)
6995 struct pt_regs *regs;
6996 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6997 index e933a30..0d02625 100644
6998 --- a/arch/mips/kernel/ptrace.c
6999 +++ b/arch/mips/kernel/ptrace.c
7000 @@ -785,6 +785,10 @@ long arch_ptrace(struct task_struct *child, long request,
7004 +#ifdef CONFIG_GRKERNSEC_SETXID
7005 +extern void gr_delayed_cred_worker(void);
7009 * Notification of system call entry/exit
7010 * - triggered by current->work.syscall_trace
7011 @@ -803,6 +807,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7012 tracehook_report_syscall_entry(regs))
7015 +#ifdef CONFIG_GRKERNSEC_SETXID
7016 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7017 + gr_delayed_cred_worker();
7020 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7021 trace_sys_enter(regs, regs->regs[2]);
7023 diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7024 index 2242bdd..b284048 100644
7025 --- a/arch/mips/kernel/sync-r4k.c
7026 +++ b/arch/mips/kernel/sync-r4k.c
7028 #include <asm/mipsregs.h>
7030 static atomic_t count_start_flag = ATOMIC_INIT(0);
7031 -static atomic_t count_count_start = ATOMIC_INIT(0);
7032 -static atomic_t count_count_stop = ATOMIC_INIT(0);
7033 +static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7034 +static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7035 static atomic_t count_reference = ATOMIC_INIT(0);
7038 @@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7040 for (i = 0; i < NR_LOOPS; i++) {
7041 /* slaves loop on '!= 2' */
7042 - while (atomic_read(&count_count_start) != 1)
7043 + while (atomic_read_unchecked(&count_count_start) != 1)
7045 - atomic_set(&count_count_stop, 0);
7046 + atomic_set_unchecked(&count_count_stop, 0);
7049 /* this lets the slaves write their count register */
7050 - atomic_inc(&count_count_start);
7051 + atomic_inc_unchecked(&count_count_start);
7054 * Everyone initialises count in the last loop:
7055 @@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7057 * Wait for all slaves to leave the synchronization point:
7059 - while (atomic_read(&count_count_stop) != 1)
7060 + while (atomic_read_unchecked(&count_count_stop) != 1)
7062 - atomic_set(&count_count_start, 0);
7063 + atomic_set_unchecked(&count_count_start, 0);
7065 - atomic_inc(&count_count_stop);
7066 + atomic_inc_unchecked(&count_count_stop);
7068 /* Arrange for an interrupt in a short while */
7069 write_c0_compare(read_c0_count() + COUNTON);
7070 @@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7071 initcount = atomic_read(&count_reference);
7073 for (i = 0; i < NR_LOOPS; i++) {
7074 - atomic_inc(&count_count_start);
7075 - while (atomic_read(&count_count_start) != 2)
7076 + atomic_inc_unchecked(&count_count_start);
7077 + while (atomic_read_unchecked(&count_count_start) != 2)
7081 @@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7082 if (i == NR_LOOPS-1)
7083 write_c0_count(initcount);
7085 - atomic_inc(&count_count_stop);
7086 - while (atomic_read(&count_count_stop) != 2)
7087 + atomic_inc_unchecked(&count_count_stop);
7088 + while (atomic_read_unchecked(&count_count_stop) != 2)
7091 /* Arrange for an interrupt in a short while */
7092 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7093 index 8ea28e6..c8873d5 100644
7094 --- a/arch/mips/kernel/traps.c
7095 +++ b/arch/mips/kernel/traps.c
7096 @@ -697,7 +697,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7099 prev_state = exception_enter();
7100 - die_if_kernel("Integer overflow", regs);
7101 + if (unlikely(!user_mode(regs))) {
7103 +#ifdef CONFIG_PAX_REFCOUNT
7104 + if (fixup_exception(regs)) {
7105 + pax_report_refcount_overflow(regs);
7106 + exception_exit(prev_state);
7111 + die("Integer overflow", regs);
7114 info.si_code = FPE_INTOVF;
7115 info.si_signo = SIGFPE;
7116 diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
7117 index cd4c129..290c518 100644
7118 --- a/arch/mips/kvm/mips.c
7119 +++ b/arch/mips/kvm/mips.c
7120 @@ -1016,7 +1016,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7124 -int kvm_arch_init(void *opaque)
7125 +int kvm_arch_init(const void *opaque)
7127 if (kvm_mips_callbacks) {
7128 kvm_err("kvm: module already exists\n");
7129 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7130 index 852a41c..75b9d38 100644
7131 --- a/arch/mips/mm/fault.c
7132 +++ b/arch/mips/mm/fault.c
7135 int show_unhandled_signals = 1;
7137 +#ifdef CONFIG_PAX_PAGEEXEC
7138 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7142 + printk(KERN_ERR "PAX: bytes at PC: ");
7143 + for (i = 0; i < 5; i++) {
7145 + if (get_user(c, (unsigned int *)pc+i))
7146 + printk(KERN_CONT "???????? ");
7148 + printk(KERN_CONT "%08x ", c);
7155 * This routine handles page faults. It determines the address,
7156 * and the problem, and then passes it off to one of the appropriate
7157 @@ -207,6 +224,14 @@ bad_area:
7158 bad_area_nosemaphore:
7159 /* User mode accesses just cause a SIGSEGV */
7160 if (user_mode(regs)) {
7162 +#ifdef CONFIG_PAX_PAGEEXEC
7163 + if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7164 + pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7165 + do_group_exit(SIGKILL);
7169 tsk->thread.cp0_badvaddr = address;
7170 tsk->thread.error_code = write;
7171 if (show_unhandled_signals &&
7172 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7173 index 5c81fdd..db158d3 100644
7174 --- a/arch/mips/mm/mmap.c
7175 +++ b/arch/mips/mm/mmap.c
7176 @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7177 struct vm_area_struct *vma;
7178 unsigned long addr = addr0;
7180 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7181 struct vm_unmapped_area_info info;
7183 if (unlikely(len > TASK_SIZE))
7184 @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7187 /* requesting a specific address */
7189 +#ifdef CONFIG_PAX_RANDMMAP
7190 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7195 addr = COLOUR_ALIGN(addr, pgoff);
7196 @@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7197 addr = PAGE_ALIGN(addr);
7199 vma = find_vma(mm, addr);
7200 - if (TASK_SIZE - len >= addr &&
7201 - (!vma || addr + len <= vma->vm_start))
7202 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7207 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7208 info.align_offset = pgoff << PAGE_SHIFT;
7209 + info.threadstack_offset = offset;
7212 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7213 @@ -160,45 +166,34 @@ void arch_pic