]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/commitdiff
Auto commit, 1 new patch{es}.
authorPiotr Karbowski <piotr.karbowski@gmail.com>
Thu, 10 Nov 2016 05:49:58 +0000 (06:49 +0100)
committerPiotr Karbowski <piotr.karbowski@gmail.com>
Thu, 10 Nov 2016 05:49:58 +0000 (06:49 +0100)
test/changelog-test.txt
test/grsecurity-3.1-4.8.6-201611091800.patch [new file with mode: 0644]
test/grsecurity-3.1-4.8.6-201611091800.patch.sig [new file with mode: 0644]

index 08ce7e75143ccb20bce748d971e8197d1c820c1b..1c97bb7e3cb57a4decf54b3c9fb42f98f7b53b2e 100644 (file)
@@ -1,3 +1,56 @@
+commit c65bef9442a61a12256456658a6e3a3aa6f0017c
+Author: Brad Spengler <spender@grsecurity.net>
+Date:   Wed Nov 9 17:22:04 2016 -0500
+
+    Add SLAB_USERCOPY backward compatibility for out of tree modules
+
+ include/linux/slab.h | 7 +++++++
+ mm/slab_common.c     | 3 ++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+commit 9f7c67696110c732bc080f27629a93c652aa6784
+Merge: 8c2ed61 26e177d
+Author: Brad Spengler <spender@grsecurity.net>
+Date:   Wed Nov 9 17:13:02 2016 -0500
+
+    Merge branch 'pax-test' into grsec-test
+
+commit 26e177df8561bd7b261090dcce16f8bc5a166e43
+Author: Brad Spengler <spender@grsecurity.net>
+Date:   Wed Nov 9 17:11:43 2016 -0500
+
+    Update to pax-linux-4.8.6-test2.patch:
+    - fixed resume regression on X86 caused by the recent constification of boot_cpu_data, reported by Joe Gabinsky (https://bugs.archlinux.org/task/51767)
+    - worked around a compile error with gcc enforced PIE, reported by Carlos Carvalho (https://forums.grsecurity.net/viewtopic.php?f=3&t=4607)
+    - fixed a latent entropy compile error that triggered on arm, reported by spender
+
+ Makefile                                              |  2 ++
+ arch/x86/include/asm/irqflags.h                       |  2 ++
+ arch/x86/include/asm/uaccess_32.h                     |  3 +++
+ arch/x86/include/asm/uaccess_64.h                     |  4 ++++
+ arch/x86/kernel/acpi/wakeup_32.S                      |  2 ++
+ arch/x86/kernel/acpi/wakeup_64.S                      |  2 ++
+ arch/x86/kernel/cpu/common.c                          |  6 ++++++
+ arch/x86/kernel/vm86_32.c                             |  6 ++----
+ arch/x86/power/hibernate_asm_32.S                     |  2 ++
+ arch/x86/power/hibernate_asm_64.S                     |  2 ++
+ fs/pstore/ftrace.c                                    |  1 +
+ kernel/events/hw_breakpoint.c                         |  1 +
+ mm/slab.c                                             | 14 +++++++-------
+ scripts/gcc-plugins/latent_entropy_plugin.c           |  4 ++--
+ scripts/gcc-plugins/size_overflow_plugin/disable.data |  3 +++
+ scripts/gcc-plugins/size_overflow_plugin/e_fns.data   |  5 +----
+ 16 files changed, 42 insertions(+), 17 deletions(-)
+
+commit 8c2ed61b048133cef4d19cbcfad489c5229c6d85
+Author: Brad Spengler <spender@grsecurity.net>
+Date:   Wed Nov 9 17:08:54 2016 -0500
+
+    re-enable latent_entropy on ARM
+
+ security/Kconfig | 1 -
+ 1 file changed, 1 deletion(-)
+
 commit f111a022d48483a796d7f6d170e5165fa17c32aa
 Author: Brad Spengler <spender@grsecurity.net>
 Date:   Tue Nov 8 21:10:00 2016 -0500
diff --git a/test/grsecurity-3.1-4.8.6-201611091800.patch b/test/grsecurity-3.1-4.8.6-201611091800.patch
new file mode 100644 (file)
index 0000000..dbef24d
--- /dev/null
@@ -0,0 +1,159896 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index 5385cba..607c6a0 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -7,6 +7,7 @@
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -16,6 +17,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -52,14 +54,17 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+ *.9
+-.*
++.[^g]*
++.gen*
+ .*.d
+ .mm
+ 53c700_d.h
+@@ -73,9 +78,11 @@ Image
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
++TRACEEVENT-CFLAGS
+ aconf
+ af_names.h
+ aic7*reg.h*
+@@ -84,6 +91,7 @@ aic7*seq.h*
+ aicasm
+ aicdb.h*
+ altivec*.c
++ashldi3.S
+ asm-offsets.h
+ asm_offsets.h
+ autoconf.h*
+@@ -96,11 +104,14 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+@@ -109,19 +120,23 @@ config-*
+ config_data.h*
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+ crc32table.h*
+ cscope.*
+ defkeymap.c
++devicetable-offsets.h
+ devlist.h*
+ dnotify_test
+ docproc
+ dslm
++dtc-lexer.lex.c
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -129,12 +144,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -149,14 +167,14 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+ kxgettext
+ lex.c
+ lex.*.c
+-linux
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -167,12 +185,14 @@ machtypes.h
+ map
+ map_hugetlb
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -188,6 +208,8 @@ oui.c*
+ page-types
+ parse.c
+ parse.h
++parse-events*
++pasyms.h
+ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+@@ -197,6 +219,7 @@ perf-archive
+ piggyback
+ piggy.gzip
+ piggy.S
++pmu-*
+ pnmtologo
+ ppc_defs.h*
+ pss_boot.h
+@@ -206,7 +229,12 @@ r200_reg_safe.h
+ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
++randomize_layout_hash.h
++randomize_layout_seed.h
++realmode.lds
++realmode.relocs
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -216,8 +244,17 @@ series
+ setup
+ setup.bin
+ setup.elf
++signing_key*
++aux.h
++disable.h
++e_fields.h
++e_fns.h
++e_fptrs.h
++e_vars.h
+ sImage
++slabinfo
+ sm_tbl*
++sortextable
+ split-include
+ syscalltab.h
+ tables.c
+@@ -227,6 +264,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -238,13 +276,17 @@ vdso32.lds
+ vdso32.so.dbg
+ vdso64.lds
+ vdso64.so.dbg
++vdsox32.lds
++vdsox32-syms.lds
+ version.h*
+ vmImage
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -252,9 +294,12 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
++x509*
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
+index 385a5ef..51d7fba 100644
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
+       === 4 Host Program support
+          --- 4.1 Simple Host Program
+          --- 4.2 Composite Host Programs
+-         --- 4.3 Using C++ for host programs
+-         --- 4.4 Controlling compiler options for host programs
+-         --- 4.5 When host programs are actually built
+-         --- 4.6 Using hostprogs-$(CONFIG_FOO)
++         --- 4.3 Defining shared libraries
++         --- 4.4 Using C++ for host programs
++         --- 4.5 Controlling compiler options for host programs
++         --- 4.6 When host programs are actually built
++         --- 4.7 Using hostprogs-$(CONFIG_FOO)
+       === 5 Kbuild clean infrastructure
+@@ -644,7 +645,29 @@ Both possibilities are described in the following.
+       Finally, the two .o files are linked to the executable, lxdialog.
+       Note: The syntax <executable>-y is not permitted for host-programs.
+---- 4.3 Using C++ for host programs
++--- 4.3 Defining shared libraries
++
++      Objects with extension .so are considered shared libraries, and
++      will be compiled as position independent objects.
++      Kbuild provides support for shared libraries, but the usage
++      shall be restricted.
++      In the following example the libkconfig.so shared library is used
++      to link the executable conf.
++
++      Example:
++              #scripts/kconfig/Makefile
++              hostprogs-y     := conf
++              conf-objs       := conf.o libkconfig.so
++              libkconfig-objs := expr.o type.o
++
++      Shared libraries always require a corresponding -objs line, and
++      in the example above the shared library libkconfig is composed by
++      the two objects expr.o and type.o.
++      expr.o and type.o will be built as position independent code and
++      linked as a shared library libkconfig.so. C++ is not supported for
++      shared libraries.
++
++--- 4.4 Using C++ for host programs
+       kbuild offers support for host programs written in C++. This was
+       introduced solely to support kconfig, and is not recommended
+@@ -667,7 +690,7 @@ Both possibilities are described in the following.
+               qconf-cxxobjs := qconf.o
+               qconf-objs    := check.o
+---- 4.4 Controlling compiler options for host programs
++--- 4.5 Controlling compiler options for host programs
+       When compiling host programs, it is possible to set specific flags.
+       The programs will always be compiled utilising $(HOSTCC) passed
+@@ -695,7 +718,7 @@ Both possibilities are described in the following.
+       When linking qconf, it will be passed the extra option
+       "-L$(QTDIR)/lib".
+---- 4.5 When host programs are actually built
++--- 4.6 When host programs are actually built
+       Kbuild will only build host-programs when they are referenced
+       as a prerequisite.
+@@ -726,7 +749,7 @@ Both possibilities are described in the following.
+       This will tell kbuild to build lxdialog even if not referenced in
+       any rule.
+---- 4.6 Using hostprogs-$(CONFIG_FOO)
++--- 4.7 Using hostprogs-$(CONFIG_FOO)
+       A typical pattern in a Kbuild file looks like this:
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 46726d4..36138ff 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1368,6 +1368,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       [KNL] Should the hard-lockup detector generate
+                       backtraces on all cpus.
+                       Format: <integer>
++      grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
++                      ignore grsecurity's /proc restrictions
++
++      grsec_sysfs_restrict= Format: 0 | 1
++                      Default: 1
++                      Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
+       hashdist=       [KNL,NUMA] Large hashes allocated during boot
+                       are distributed across NUMA nodes.  Defaults on
+@@ -2591,6 +2597,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       noexec=on: enable non-executable mappings (default)
+                       noexec=off: disable non-executable mappings
++      nopcid          [X86-64]
++                      Disable PCID (Process-Context IDentifier) even if it
++                      is supported by the processor.
++
+       nosmap          [X86]
+                       Disable SMAP (Supervisor Mode Access Prevention)
+                       even if it is supported by processor.
+@@ -2895,6 +2905,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+                       the specified number of seconds.  This is to be used if
+                       your oopses keep scrolling off the screen.
++      pax_nouderef    [X86] disables UDEREF.  Most likely needed under certain
++                      virtualization environments that don't cope well with the
++                      expand down segment used by UDEREF on X86-32 or the frequent
++                      page table updates on X86-64.
++
++      pax_sanitize_slab=
++                      Format: { 0 | 1 | off | fast | full }
++                      Options '0' and '1' are only provided for backward
++                      compatibility, 'off' or 'fast' should be used instead.
++                      0|off : disable slab object sanitization
++                      1|fast: enable slab object sanitization excluding
++                              whitelisted slabs (default)
++                      full  : sanitize all slabs, even the whitelisted ones
++
++      pax_softmode=   0/1 to disable/enable PaX softmode on boot already.
++
++      pax_extra_latent_entropy
++                      Enable a very simple form of latent entropy extraction
++                      from the first 4GB of memory as the bootmem allocator
++                      passes the memory pages to the buddy allocator.
++
++      pax_size_overflow_report_only
++                      Enables rate-limited logging of size_overflow plugin
++                      violations while disabling killing of the violating
++                      task.
++
++      pax_weakuderef  [X86-64] enables the weaker but faster form of UDEREF
++                      when the processor supports PCID.
++
+       pcbit=          [HW,ISDN]
+       pcd.            [PARIDE]
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index ffab8b5..b8fcd61 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -42,6 +42,7 @@ show up in /proc/sys/kernel:
+ - kptr_restrict
+ - kstack_depth_to_print       [ X86 only ]
+ - l2cr                        [ PPC only ]
++- modify_ldt                  [ X86 only ]
+ - modprobe                    ==> Documentation/debugging-modules.txt
+ - modules_disabled
+ - msg_next_id               [ sysv ipc ]
+@@ -409,6 +410,20 @@ This flag controls the L2 cache of G3 processor boards. If
+ ==============================================================
++modify_ldt: (X86 only)
++
++Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
++(Local Descriptor Table) may be needed to run a 16-bit or segmented code
++such as Dosemu or Wine. This is done via a system call which is not needed
++to run portable applications, and which can sometimes be abused to exploit
++some weaknesses of the architecture, opening new vulnerabilities.
++
++This sysctl allows one to increase the system's security by disabling the
++system call, or to restore compatibility with specific applications when it
++was already disabled.
++
++==============================================================
++
+ modules_disabled:
+ A toggle value indicating if modules are allowed to be loaded
+diff --git a/Makefile b/Makefile
+index b249529..d525945 100644
+--- a/Makefile
++++ b/Makefile
+@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ HOSTCC       = gcc
+ HOSTCXX      = g++
+ HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS   = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
++HOSTCFLAGS  += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
+ ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
+ HOSTCFLAGS  += -Wno-unused-value -Wno-unused-parameter \
+@@ -621,6 +623,8 @@ include arch/$(SRCARCH)/Makefile
+ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
+ KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
++KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ KBUILD_CFLAGS += -Os
+@@ -715,7 +719,7 @@ KBUILD_CFLAGS   += $(call cc-option, -gsplit-dwarf, -g)
+ else
+ KBUILD_CFLAGS += -g
+ endif
+-KBUILD_AFLAGS += -Wa,-gdwarf-2
++KBUILD_AFLAGS += -Wa,--gdwarf-2
+ endif
+ ifdef CONFIG_DEBUG_INFO_DWARF4
+ KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
+@@ -890,7 +894,7 @@ export mod_sign_cmd
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y                += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y                += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ vmlinux-dirs  := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+                    $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -1256,7 +1260,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+                 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
+                 signing_key.pem signing_key.priv signing_key.x509     \
+                 x509.genkey extra_certificates signing_key.x509.keyid \
+-                signing_key.x509.signer vmlinux-gdb.py
++                signing_key.x509.signer vmlinux-gdb.py \
++                scripts/gcc-plugins/size_overflow_plugin/e_*.h \
++                scripts/gcc-plugins/size_overflow_plugin/disable.h \
++                scripts/gcc-plugins/randomize_layout_seed.h
+ # clean - Delete most, but leave enough to build external modules
+ #
+@@ -1295,7 +1302,7 @@ distclean: mrproper
+       @find $(srctree) $(RCS_FIND_IGNORE) \
+               \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+               -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+-              -o -name '.*.rej' -o -name '*%'  -o -name 'core' \) \
++              -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
+               -type f -print | xargs rm -f
+diff --git a/arch/Kconfig b/arch/Kconfig
+index fd6e971..35d7bbf 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -355,7 +355,7 @@ config HAVE_GCC_PLUGINS
+ menuconfig GCC_PLUGINS
+       bool "GCC plugins"
+       depends on HAVE_GCC_PLUGINS
+-      depends on !COMPILE_TEST
++      default y
+       help
+         GCC plugins are loadable modules that provide extra features to the
+         compiler. They are useful for runtime instrumentation and static analysis.
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 498933a..78d2b22 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -308,4 +308,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ALPHA_ATOMIC_H */
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index ad368a9..fbe0f25 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,19 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
++#include <linux/const.h>
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES     64
+ # define L1_CACHE_SHIFT     6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+    direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES     32
+ # define L1_CACHE_SHIFT     5
+ #endif
++#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES    L1_CACHE_BYTES
+ #endif
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index 968d999..d36b2df 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #define ELF_ET_DYN_BASE               (TASK_UNMAPPED_BASE + 0x1000000)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN    (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN   (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be 
+    registered using atexit.  This provides a mean for the dynamic
+    linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
+index c2ebb6f..93a0613 100644
+--- a/arch/alpha/include/asm/pgalloc.h
++++ b/arch/alpha/include/asm/pgalloc.h
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+       pgd_set(pgd, pmd);
+ }
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++      pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+ static inline void
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index a9a1195..e9b8417 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED   __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY     __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC     __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC   PAGE_SHARED
++# define PAGE_COPY_NOEXEC     PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL   __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 936bc8f..bb1859f 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+       /* The small sections were sorted to the end of the segment.
+          The following should definitely cover them.  */
+-      gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
++      gp = (u64)me->core_layout.base_rw + me->core_layout.size_rw - 0x8000;
+       got = sechdrs[me->arch.gotsecindex].sh_addr;
+       for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index ffb93f49..ced8233 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+-                       unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++                       unsigned long limit, unsigned long flags)
+ {
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+       info.flags = 0;
+       info.length = len;
+@@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+       info.high_limit = limit;
+       info.align_mask = 0;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+@@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+          merely specific addresses, but regions of memory -- perhaps
+          this feature should be incorporated into all ports?  */
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+-              addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++              addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
+               if (addr != (unsigned long) -ENOMEM)
+                       return addr;
+       }
+       /* Next, try allocating at TASK_UNMAPPED_BASE.  */
+-      addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+-                                       len, limit);
++      addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
++
+       if (addr != (unsigned long) -ENOMEM)
+               return addr;
+       /* Finally, try allocating in low memory.  */
+-      addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++      addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+       return addr;
+ }
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 83e9eee..db02682 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+       __reload_thread(pcb);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when patched PLT trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: patched PLT emulation #1 */
++              unsigned int ldah, ldq, jmp;
++
++              err = get_user(ldah, (unsigned int *)regs->pc);
++              err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++              err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++                  (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++                  jmp == 0x6BFB0000U)
++              {
++                      unsigned long r27, addr;
++                      unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++                      unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++                      addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++                      err = get_user(r27, (unsigned long *)addr);
++                      if (err)
++                              break;
++
++                      regs->r27 = r27;
++                      regs->pc = r27;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #2 */
++              unsigned int ldah, lda, br;
++
++              err = get_user(ldah, (unsigned int *)regs->pc);
++              err |= get_user(lda, (unsigned int *)(regs->pc+4));
++              err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++                  (lda & 0xFFFF0000U) == 0xA77B0000U &&
++                  (br & 0xFFE00000U) == 0xC3E00000U)
++              {
++                      unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++                      unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++                      unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++                      regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++                      regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation */
++              unsigned int br;
++
++              err = get_user(br, (unsigned int *)regs->pc);
++
++              if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++                      unsigned int br2, ldq, nop, jmp;
++                      unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++                      addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++                      err = get_user(br2, (unsigned int *)addr);
++                      err |= get_user(ldq, (unsigned int *)(addr+4));
++                      err |= get_user(nop, (unsigned int *)(addr+8));
++                      err |= get_user(jmp, (unsigned int *)(addr+12));
++                      err |= get_user(resolver, (unsigned long *)(addr+16));
++
++                      if (err)
++                              break;
++
++                      if (br2 == 0xC3600000U &&
++                          ldq == 0xA77B000CU &&
++                          nop == 0x47FF041FU &&
++                          jmp == 0x6B7B0000U)
++                      {
++                              regs->r28 = regs->pc+4;
++                              regs->r27 = addr+16;
++                              regs->pc = resolver;
++                              return 3;
++                      }
++              }
++      } while (0);
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
+ /*
+  * This routine handles page faults.  It determines the address,
+@@ -132,8 +250,29 @@ retry:
+  good_area:
+       si_code = SEGV_ACCERR;
+       if (cause < 0) {
+-              if (!(vma->vm_flags & VM_EXEC))
++              if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++                      if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++                              goto bad_area;
++
++                      up_read(&mm->mmap_sem);
++                      switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++                      case 2:
++                      case 3:
++                              return;
++#endif
++
++                      }
++                      pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++                      do_group_exit(SIGKILL);
++#else
+                       goto bad_area;
++#endif
++
++              }
+       } else if (!cause) {
+               /* Allow reads even for write-only mappings */
+               if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index 0d3e59f..4418d65 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -541,6 +541,7 @@ config ARC_DBG_TLB_MISS_COUNT
+       bool "Profile TLB Misses"
+       default n
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       help
+         Counts number of I and D TLB Misses and exports them via Debugfs
+         The counters can be cleared via Debugfs as well
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index a9c4e48..75bc9c9 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1621,6 +1621,7 @@ config AEABI
+ config OABI_COMPAT
+       bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
+       depends on AEABI && !THUMB2_KERNEL
++      depends on !GRKERNSEC
+       help
+         This option preserves the old syscall interface along with the
+         new (ARM EABI) one. It also provides a compatibility layer to
+@@ -1689,6 +1690,7 @@ config HIGHPTE
+ config CPU_SW_DOMAIN_PAN
+       bool "Enable use of CPU domains to implement privileged no-access"
+       depends on MMU && !ARM_LPAE
++      depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+       default y
+       help
+         Increase kernel security by ensuring that normal kernel accesses
+@@ -1765,7 +1767,7 @@ config ALIGNMENT_TRAP
+ config UACCESS_WITH_MEMCPY
+       bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
+-      depends on MMU
++      depends on MMU && !PAX_MEMORY_UDEREF
+       default y if CPU_FEROCEON
+       help
+         Implement faster copy_to_user and clear_user methods for CPU
+@@ -2020,6 +2022,7 @@ config KEXEC
+       depends on (!SMP || PM_SLEEP_SMP)
+       depends on !CPU_V7M
+       select KEXEC_CORE
++      depends on !GRKERNSEC_KMEM
+       help
+         kexec is a system call that implements the ability to shutdown your
+         current kernel, and to start another kernel.  It is like a reboot
+@@ -2064,7 +2067,7 @@ config EFI_STUB
+ config EFI
+       bool "UEFI runtime support"
+-      depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
++      depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL && !PAX_KERNEXEC
+       select UCS2_STRING
+       select EFI_PARAMS_FROM_FDT
+       select EFI_STUB
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index a9693b6..87d8936 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -7,6 +7,7 @@ config ARM_PTDUMP
+       depends on DEBUG_KERNEL
+       depends on MMU
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       ---help---
+         Say Y here if you want to show the kernel pagetable layout in a
+         debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index d50430c..01cc53b 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -103,6 +103,8 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
+ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
+ endif
++KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
++
+ # -fstack-protector-strong triggers protection checks in this code,
+ # but it is being used too early to link to meaningful stack_chk logic.
+ nossp_flags := $(call cc-option, -fno-stack-protector)
+diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
+index 6fc73bf..d0af3c7b 100644
+--- a/arch/arm/crypto/sha1_glue.c
++++ b/arch/arm/crypto/sha1_glue.c
+@@ -27,8 +27,8 @@
+ #include "sha1.h"
+-asmlinkage void sha1_block_data_order(u32 *digest,
+-              const unsigned char *data, unsigned int rounds);
++asmlinkage void sha1_block_data_order(struct sha1_state *digest,
++              const u8 *data, int rounds);
+ int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+                   unsigned int len)
+@@ -36,22 +36,20 @@ int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+       /* make sure casting to sha1_block_fn() is safe */
+       BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
+-      return sha1_base_do_update(desc, data, len,
+-                                 (sha1_block_fn *)sha1_block_data_order);
++      return sha1_base_do_update(desc, data, len, sha1_block_data_order);
+ }
+ EXPORT_SYMBOL_GPL(sha1_update_arm);
+ static int sha1_final(struct shash_desc *desc, u8 *out)
+ {
+-      sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order);
++      sha1_base_do_finalize(desc, sha1_block_data_order);
+       return sha1_base_finish(desc, out);
+ }
+ int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
+                  unsigned int len, u8 *out)
+ {
+-      sha1_base_do_update(desc, data, len,
+-                          (sha1_block_fn *)sha1_block_data_order);
++      sha1_base_do_update(desc, data, len, sha1_block_data_order);
+       return sha1_final(desc, out);
+ }
+ EXPORT_SYMBOL_GPL(sha1_finup_arm);
+diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
+index 4e22f12..49902aa 100644
+--- a/arch/arm/crypto/sha1_neon_glue.c
++++ b/arch/arm/crypto/sha1_neon_glue.c
+@@ -31,8 +31,8 @@
+ #include "sha1.h"
+-asmlinkage void sha1_transform_neon(void *state_h, const char *data,
+-                                  unsigned int rounds);
++asmlinkage void sha1_transform_neon(struct sha1_state *state_h, const u8 *data,
++                                  int rounds);
+ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+                         unsigned int len)
+@@ -45,7 +45,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+       kernel_neon_begin();
+       sha1_base_do_update(desc, data, len,
+-                          (sha1_block_fn *)sha1_transform_neon);
++                          sha1_transform_neon);
+       kernel_neon_end();
+       return 0;
+@@ -60,8 +60,8 @@ static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
+       kernel_neon_begin();
+       if (len)
+               sha1_base_do_update(desc, data, len,
+-                                  (sha1_block_fn *)sha1_transform_neon);
+-      sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon);
++                                  sha1_transform_neon);
++      sha1_base_do_finalize(desc, sha1_transform_neon);
+       kernel_neon_end();
+       return sha1_base_finish(desc, out);
+diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
+index a84e869..53a0c61 100644
+--- a/arch/arm/crypto/sha256_glue.c
++++ b/arch/arm/crypto/sha256_glue.c
+@@ -30,8 +30,8 @@
+ #include "sha256_glue.h"
+-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+-                                      unsigned int num_blks);
++asmlinkage void sha256_block_data_order(struct sha256_state *digest, const u8 *data,
++                                      int num_blks);
+ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len)
+@@ -39,23 +39,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+       /* make sure casting to sha256_block_fn() is safe */
+       BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+-      return sha256_base_do_update(desc, data, len,
+-                              (sha256_block_fn *)sha256_block_data_order);
++      return sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_update);
+ static int sha256_final(struct shash_desc *desc, u8 *out)
+ {
+-      sha256_base_do_finalize(desc,
+-                              (sha256_block_fn *)sha256_block_data_order);
++      sha256_base_do_finalize(desc, sha256_block_data_order);
+       return sha256_base_finish(desc, out);
+ }
+ int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
+                           unsigned int len, u8 *out)
+ {
+-      sha256_base_do_update(desc, data, len,
+-                            (sha256_block_fn *)sha256_block_data_order);
++      sha256_base_do_update(desc, data, len, sha256_block_data_order);
+       return sha256_final(desc, out);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_finup);
+diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
+index 39ccd65..f9511cb 100644
+--- a/arch/arm/crypto/sha256_neon_glue.c
++++ b/arch/arm/crypto/sha256_neon_glue.c
+@@ -26,8 +26,8 @@
+ #include "sha256_glue.h"
+-asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
+-                                           unsigned int num_blks);
++asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, const u8 *data,
++                                           int num_blks);
+ static int sha256_update(struct shash_desc *desc, const u8 *data,
+                        unsigned int len)
+@@ -39,8 +39,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
+               return crypto_sha256_arm_update(desc, data, len);
+       kernel_neon_begin();
+-      sha256_base_do_update(desc, data, len,
+-                      (sha256_block_fn *)sha256_block_data_order_neon);
++      sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
+       kernel_neon_end();
+       return 0;
+@@ -54,10 +53,8 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
+       kernel_neon_begin();
+       if (len)
+-              sha256_base_do_update(desc, data, len,
+-                      (sha256_block_fn *)sha256_block_data_order_neon);
+-      sha256_base_do_finalize(desc,
+-                      (sha256_block_fn *)sha256_block_data_order_neon);
++              sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
++      sha256_base_do_finalize(desc, sha256_block_data_order_neon);
+       kernel_neon_end();
+       return sha256_base_finish(desc, out);
+diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
+index 269a394..c7a91f1 100644
+--- a/arch/arm/crypto/sha512-glue.c
++++ b/arch/arm/crypto/sha512-glue.c
+@@ -28,27 +28,24 @@ MODULE_ALIAS_CRYPTO("sha512");
+ MODULE_ALIAS_CRYPTO("sha384-arm");
+ MODULE_ALIAS_CRYPTO("sha512-arm");
+-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
++asmlinkage void sha512_block_data_order(struct sha512_state *state, u8 const *src, int blocks);
+ int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+                     unsigned int len)
+ {
+-      return sha512_base_do_update(desc, data, len,
+-              (sha512_block_fn *)sha512_block_data_order);
++      return sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ }
+ int sha512_arm_final(struct shash_desc *desc, u8 *out)
+ {
+-      sha512_base_do_finalize(desc,
+-              (sha512_block_fn *)sha512_block_data_order);
++      sha512_base_do_finalize(desc, sha512_block_data_order);
+       return sha512_base_finish(desc, out);
+ }
+ int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+                    unsigned int len, u8 *out)
+ {
+-      sha512_base_do_update(desc, data, len,
+-              (sha512_block_fn *)sha512_block_data_order);
++      sha512_base_do_update(desc, data, len, sha512_block_data_order);
+       return sha512_arm_final(desc, out);
+ }
+diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
+index 3269368..9fcbc00 100644
+--- a/arch/arm/crypto/sha512-neon-glue.c
++++ b/arch/arm/crypto/sha512-neon-glue.c
+@@ -22,7 +22,7 @@
+ MODULE_ALIAS_CRYPTO("sha384-neon");
+ MODULE_ALIAS_CRYPTO("sha512-neon");
+-asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src,
++asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, u8 const *src,
+                                            int blocks);
+ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+@@ -35,8 +35,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+               return sha512_arm_update(desc, data, len);
+       kernel_neon_begin();
+-      sha512_base_do_update(desc, data, len,
+-              (sha512_block_fn *)sha512_block_data_order_neon);
++      sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
+       kernel_neon_end();
+       return 0;
+@@ -50,10 +49,8 @@ static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
+       kernel_neon_begin();
+       if (len)
+-              sha512_base_do_update(desc, data, len,
+-                      (sha512_block_fn *)sha512_block_data_order_neon);
+-      sha512_base_do_finalize(desc,
+-              (sha512_block_fn *)sha512_block_data_order_neon);
++              sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
++      sha512_base_do_finalize(desc, sha512_block_data_order_neon);
+       kernel_neon_end();
+       return sha512_base_finish(desc, out);
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index 66d0e21..8fa3237 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -18,17 +18,41 @@
+ #include <asm/barrier.h>
+ #include <asm/cmpxchg.h>
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i)        { (i) }
+ #ifdef __KERNEL__
++#ifdef CONFIG_THUMB2_KERNEL
++#define REFCOUNT_TRAP_INSN "bkpt      0xf1"
++#else
++#define REFCOUNT_TRAP_INSN "bkpt      0xf103"
++#endif
++
++#define _ASM_EXTABLE(from, to)                \
++"     .pushsection __ex_table,\"a\"\n"\
++"     .align  3\n"                    \
++"     .long   " #from ", " #to"\n"    \
++"     .popsection"
++
+ /*
+  * On ARM, ordinary assignment (str instruction) doesn't clear the local
+  * strex/ldrex monitor on some implementations. The reason we can use it for
+  * atomic_set() is the clrex or dummy strex done on every exception return.
+  */
+ #define atomic_read(v)        READ_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      return READ_ONCE(v->counter);
++}
+ #define atomic_set(v,i)       WRITE_ONCE(((v)->counter), (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      WRITE_ONCE(v->counter, i);
++}
+ #if __LINUX_ARM_ARCH__ >= 6
+@@ -38,45 +62,74 @@
+  * to ensure that the update happens.
+  */
+-#define ATOMIC_OP(op, c_op, asm_op)                                   \
+-static inline void atomic_##op(int i, atomic_t *v)                    \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST                       \
++      "       bvc     3f\n"           \
++      "2:     " REFCOUNT_TRAP_INSN "\n"\
++      "3:\n"
++#define __OVERFLOW_POST_RETURN                \
++      "       bvc     3f\n"           \
++      "       mov     %1, %0\n"       \
++      "2:     " REFCOUNT_TRAP_INSN "\n"\
++      "3:\n"
++#define __OVERFLOW_EXTABLE            \
++      "4:\n"                          \
++      _ASM_EXTABLE(2b, 4b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_POST_RETURN
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, c_op, asm_op)                         \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)  \
+ {                                                                     \
+       unsigned long tmp;                                              \
+       int result;                                                     \
+                                                                       \
+       prefetchw(&v->counter);                                         \
+-      __asm__ __volatile__("@ atomic_" #op "\n"                       \
++      __asm__ __volatile__("@ atomic_" #op #suffix "\n"               \
+ "1:   ldrex   %0, [%3]\n"                                             \
+ "     " #asm_op "     %0, %0, %4\n"                                   \
++      __OVERFLOW_POST                                                 \
+ "     strex   %1, %0, [%3]\n"                                         \
+ "     teq     %1, #0\n"                                               \
+-"     bne     1b"                                                     \
++"     bne     1b\n"                                                   \
++      __OVERFLOW_EXTABLE                                              \
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
+       : "r" (&v->counter), "Ir" (i)                                   \
+       : "cc");                                                        \
+ }                                                                     \
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                            \
+-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)    \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op)\
++                                  __ATOMIC_OP(op, , c_op, asm_op##s)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op)                  \
++static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\
+ {                                                                     \
+-      unsigned long tmp;                                              \
++      int tmp;                                                        \
+       int result;                                                     \
+                                                                       \
+       prefetchw(&v->counter);                                         \
+                                                                       \
+-      __asm__ __volatile__("@ atomic_" #op "_return\n"                \
++      __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n"     \
+ "1:   ldrex   %0, [%3]\n"                                             \
+-"     " #asm_op "     %0, %0, %4\n"                                   \
+-"     strex   %1, %0, [%3]\n"                                         \
+-"     teq     %1, #0\n"                                               \
+-"     bne     1b"                                                     \
+-      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
++"     " #asm_op "     %1, %0, %4\n"                                   \
++      __OVERFLOW_POST_RETURN                                          \
++"     strex   %0, %1, [%3]\n"                                         \
++"     teq     %0, #0\n"                                               \
++"     bne     1b\n"                                                   \
++      __OVERFLOW_EXTABLE                                              \
++      : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)               \
+       : "r" (&v->counter), "Ir" (i)                                   \
+       : "cc");                                                        \
+                                                                       \
+       return result;                                                  \
+ }
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)\
++                                         __ATOMIC_OP_RETURN(op, , c_op, asm_op##s)
++
+ #define ATOMIC_FETCH_OP(op, c_op, asm_op)                             \
+ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)     \
+ {                                                                     \
+@@ -99,6 +152,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)   \
+ }
+ #define atomic_add_return_relaxed     atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed   atomic_add_return_unchecked_relaxed
+ #define atomic_sub_return_relaxed     atomic_sub_return_relaxed
+ #define atomic_fetch_add_relaxed      atomic_fetch_add_relaxed
+ #define atomic_fetch_sub_relaxed      atomic_fetch_sub_relaxed
+@@ -141,12 +195,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+       __asm__ __volatile__ ("@ atomic_add_unless\n"
+ "1:   ldrex   %0, [%4]\n"
+ "     teq     %0, %5\n"
+-"     beq     2f\n"
+-"     add     %1, %0, %6\n"
++"     beq     4f\n"
++"     adds    %1, %0, %6\n"
++
++      __OVERFLOW_POST
++
+ "     strex   %2, %1, [%4]\n"
+ "     teq     %2, #0\n"
+ "     bne     1b\n"
+-"2:"
++
++      __OVERFLOW_EXTABLE
++
+       : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "r" (u), "r" (a)
+       : "cc");
+@@ -157,14 +216,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+       return oldval;
+ }
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++      unsigned long oldval, res;
++
++      smp_mb();
++
++      do {
++              __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++              "ldrex  %1, [%3]\n"
++              "mov    %0, #0\n"
++              "teq    %1, %4\n"
++              "strexeq %0, %5, [%3]\n"
++                  : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++                  : "r" (&ptr->counter), "Ir" (old), "r" (new)
++                  : "cc");
++      } while (res);
++
++      smp_mb();
++
++      return oldval;
++}
++
+ #else /* ARM_ARCH_6 */
+ #ifdef CONFIG_SMP
+ #error SMP not supported on pre-ARMv6 CPUs
+ #endif
+-#define ATOMIC_OP(op, c_op, asm_op)                                   \
+-static inline void atomic_##op(int i, atomic_t *v)                    \
++#define __ATOMIC_OP(op, suffix, c_op, asm_op)                         \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)  \
+ {                                                                     \
+       unsigned long flags;                                            \
+                                                                       \
+@@ -173,8 +254,11 @@ static inline void atomic_##op(int i, atomic_t *v)                        \
+       raw_local_irq_restore(flags);                                   \
+ }                                                                     \
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                            \
+-static inline int atomic_##op##_return(int i, atomic_t *v)            \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op)   \
++                                  __ATOMIC_OP(op, _unchecked, c_op, asm_op)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op)                  \
++static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
+ {                                                                     \
+       unsigned long flags;                                            \
+       int val;                                                        \
+@@ -201,6 +285,9 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                    \
+       return val;                                                     \
+ }
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
++                                         __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
++
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+       int ret;
+@@ -215,6 +302,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+       return ret;
+ }
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++      return atomic_cmpxchg((atomic_t *)v, old, new);
++}
++
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+       int c, old;
+@@ -250,16 +342,29 @@ ATOMIC_OPS(xor, ^=, eor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++#define atomic_xchg_unchecked(v, new) (xchg_unchecked(&((v)->counter), new))
+ #define atomic_inc(v)         atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++      atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v)         atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++      atomic_sub_unchecked(1, v);
++}
+ #define atomic_inc_and_test(v)        (atomic_add_return(1, v) == 0)
++#define atomic_inc_and_test_unchecked(v)      (atomic_add_return_unchecked(1, v) == 0)
+ #define atomic_dec_and_test(v)        (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
++#define atomic_inc_return_unchecked_relaxed(v)    (atomic_add_return_unchecked_relaxed(1, v))
+ #define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+@@ -270,6 +375,14 @@ typedef struct {
+       long long counter;
+ } atomic64_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      long long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+ #ifdef CONFIG_ARM_LPAE
+@@ -286,6 +399,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+       return result;
+ }
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      long long result;
++
++      __asm__ __volatile__("@ atomic64_read_unchecked\n"
++"     ldrd    %0, %H0, [%1]"
++      : "=&r" (result)
++      : "r" (&v->counter), "Qo" (v->counter)
++      );
++
++      return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+       __asm__ __volatile__("@ atomic64_set\n"
+@@ -294,6 +420,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+       : "r" (&v->counter), "r" (i)
+       );
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++      __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"     strd    %2, %H2, [%1]"
++      : "=Qo" (v->counter)
++      : "r" (&v->counter), "r" (i)
++      );
++}
+ #else
+ static inline long long atomic64_read(const atomic64_t *v)
+ {
+@@ -308,6 +443,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+       return result;
+ }
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      long long result;
++
++      __asm__ __volatile__("@ atomic64_read_unchecked\n"
++"     ldrexd  %0, %H0, [%1]"
++      : "=&r" (result)
++      : "r" (&v->counter), "Qo" (v->counter)
++      );
++
++      return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+       long long tmp;
+@@ -322,50 +470,82 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+       : "r" (&v->counter), "r" (i)
+       : "cc");
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++      long long tmp;
++
++      prefetchw(&v->counter);
++      __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1:   ldrexd  %0, %H0, [%2]\n"
++"     strexd  %0, %3, %H3, [%2]\n"
++"     teq     %0, #0\n"
++"     bne     1b"
++      : "=&r" (tmp), "=Qo" (v->counter)
++      : "r" (&v->counter), "r" (i)
++      : "cc");
++}
+ #endif
+-#define ATOMIC64_OP(op, op1, op2)                                     \
+-static inline void atomic64_##op(long long i, atomic64_t *v)          \
++#define __OVERFLOW_POST_RETURN64      \
++      "       bvc     3f\n"           \
++"     mov     %Q1, %Q0\n"             \
++"     mov     %R1, %R0\n"             \
++      "2:     " REFCOUNT_TRAP_INSN "\n"\
++      "3:\n"
++
++#define __ATOMIC64_OP(op, suffix, op1, op2)                           \
++static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
+ {                                                                     \
+       long long result;                                               \
+       unsigned long tmp;                                              \
+                                                                       \
+       prefetchw(&v->counter);                                         \
+-      __asm__ __volatile__("@ atomic64_" #op "\n"                     \
++      __asm__ __volatile__("@ atomic64_" #op #suffix "\n"             \
+ "1:   ldrexd  %0, %H0, [%3]\n"                                        \
+ "     " #op1 " %Q0, %Q0, %Q4\n"                                       \
+ "     " #op2 " %R0, %R0, %R4\n"                                       \
++      __OVERFLOW_POST                                                 \
+ "     strexd  %1, %0, %H0, [%3]\n"                                    \
+ "     teq     %1, #0\n"                                               \
+-"     bne     1b"                                                     \
++"     bne     1b\n"                                                   \
++      __OVERFLOW_EXTABLE                                              \
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
+       : "r" (&v->counter), "r" (i)                                    \
+       : "cc");                                                        \
+ }                                                                     \
+-#define ATOMIC64_OP_RETURN(op, op1, op2)                              \
++#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2) \
++                                __ATOMIC64_OP(op, , op1, op2##s)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2)                    \
+ static inline long long                                                       \
+-atomic64_##op##_return_relaxed(long long i, atomic64_t *v)            \
++atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \
+ {                                                                     \
+       long long result;                                               \
+-      unsigned long tmp;                                              \
++      long long tmp;                                                  \
+                                                                       \
+       prefetchw(&v->counter);                                         \
+                                                                       \
+-      __asm__ __volatile__("@ atomic64_" #op "_return\n"              \
++      __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n"   \
+ "1:   ldrexd  %0, %H0, [%3]\n"                                        \
+-"     " #op1 " %Q0, %Q0, %Q4\n"                                       \
+-"     " #op2 " %R0, %R0, %R4\n"                                       \
+-"     strexd  %1, %0, %H0, [%3]\n"                                    \
+-"     teq     %1, #0\n"                                               \
+-"     bne     1b"                                                     \
+-      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
++"     " #op1 " %Q1, %Q0, %Q4\n"                                       \
++"     " #op2 " %R1, %R0, %R4\n"                                       \
++      __OVERFLOW_POST_RETURN64                                        \
++"     strexd  %0, %1, %H1, [%3]\n"                                    \
++"     teq     %0, #0\n"                                               \
++"     bne     1b\n"                                                   \
++      __OVERFLOW_EXTABLE                                              \
++      : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)               \
+       : "r" (&v->counter), "r" (i)                                    \
+       : "cc");                                                        \
+                                                                       \
+       return result;                                                  \
+ }
++#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2) \
++                                       __ATOMIC64_OP_RETURN(op, , op1, op2##s)
++
+ #define ATOMIC64_FETCH_OP(op, op1, op2)                                       \
+ static inline long long                                                       \
+ atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v)             \
+@@ -398,6 +578,7 @@ ATOMIC64_OPS(add, adds, adc)
+ ATOMIC64_OPS(sub, subs, sbc)
+ #define atomic64_add_return_relaxed   atomic64_add_return_relaxed
++#define atomic64_add_return_unchecked_relaxed atomic64_add_return_unchecked_relaxed
+ #define atomic64_sub_return_relaxed   atomic64_sub_return_relaxed
+ #define atomic64_fetch_add_relaxed    atomic64_fetch_add_relaxed
+ #define atomic64_fetch_sub_relaxed    atomic64_fetch_sub_relaxed
+@@ -422,7 +603,10 @@ ATOMIC64_OPS(xor, eor, eor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_POST_RETURN
+ static inline long long
+ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+@@ -448,6 +632,13 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+ }
+ #define atomic64_cmpxchg_relaxed      atomic64_cmpxchg_relaxed
++static inline long long
++atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old, long long new)
++{
++      return atomic64_cmpxchg_relaxed((atomic64_t *)ptr, old, new);
++}
++#define atomic64_cmpxchg_unchecked_relaxed    atomic64_cmpxchg_unchecked_relaxed
++
+ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+ {
+       long long result;
+@@ -468,25 +659,36 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+ }
+ #define atomic64_xchg_relaxed         atomic64_xchg_relaxed
++static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new)
++{
++      return atomic64_xchg_relaxed((atomic64_t *)ptr, new);
++}
++#define atomic64_xchg_unchecked_relaxed               atomic64_xchg_unchecked_relaxed
++
+ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+       long long result;
+-      unsigned long tmp;
++      u64 tmp;
+       smp_mb();
+       prefetchw(&v->counter);
+       __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+ "1:   ldrexd  %0, %H0, [%3]\n"
+-"     subs    %Q0, %Q0, #1\n"
+-"     sbc     %R0, %R0, #0\n"
+-"     teq     %R0, #0\n"
+-"     bmi     2f\n"
+-"     strexd  %1, %0, %H0, [%3]\n"
+-"     teq     %1, #0\n"
++"     subs    %Q1, %Q0, #1\n"
++"     sbcs    %R1, %R0, #0\n"
++
++      __OVERFLOW_POST_RETURN64
++
++"     teq     %R1, #0\n"
++"     bmi     4f\n"
++"     strexd  %0, %1, %H1, [%3]\n"
++"     teq     %0, #0\n"
+ "     bne     1b\n"
+-"2:"
+-      : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++
++      __OVERFLOW_EXTABLE
++
++      : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)
+       : "r" (&v->counter)
+       : "cc");
+@@ -509,13 +711,18 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ "     teq     %0, %5\n"
+ "     teqeq   %H0, %H5\n"
+ "     moveq   %1, #0\n"
+-"     beq     2f\n"
++"     beq     4f\n"
+ "     adds    %Q0, %Q0, %Q6\n"
+-"     adc     %R0, %R0, %R6\n"
++"     adcs    %R0, %R0, %R6\n"
++
++      __OVERFLOW_POST
++
+ "     strexd  %2, %0, %H0, [%4]\n"
+ "     teq     %2, #0\n"
+ "     bne     1b\n"
+-"2:"
++
++      __OVERFLOW_EXTABLE
++
+       : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "r" (u), "r" (a)
+       : "cc");
+@@ -526,12 +733,19 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+       return ret;
+ }
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST_RETURN64
++#undef __OVERFLOW_POST
++
+ #define atomic64_add_negative(a, v)   (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v)                       atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v)     atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return_relaxed(v)        atomic64_add_return_relaxed(1LL, (v))
++#define atomic64_inc_return_unchecked_relaxed(v)      atomic64_add_return_unchecked_relaxed(1LL, (v))
+ #define atomic64_inc_and_test(v)      (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v)   (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v)                       atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v)     atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return_relaxed(v)        atomic64_sub_return_relaxed(1LL, (v))
+ #define atomic64_dec_and_test(v)      (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1LL, 0LL)
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 75fe66b..2255c86 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT                CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ /*
+  * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 9156fc3..0521e3e 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -116,7 +116,7 @@ struct cpu_cache_fns {
+       void (*dma_unmap_area)(const void *, size_t, int);
+       void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const __no_randomize_layout;
+ /*
+  * Select the calling method
+diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
+index 524692f..a8871ec 100644
+--- a/arch/arm/include/asm/checksum.h
++++ b/arch/arm/include/asm/checksum.h
+@@ -37,7 +37,19 @@ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+ __wsum
+-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++
++static inline __wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
++{
++      __wsum ret;
++      pax_open_userland();
++      ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
++      pax_close_userland();
++      return ret;
++}
++
++
+ /*
+  *    Fold a partial checksum without adding pseudo headers
+diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
+index 97882f9..ff9d6ac 100644
+--- a/arch/arm/include/asm/cmpxchg.h
++++ b/arch/arm/include/asm/cmpxchg.h
+@@ -117,6 +117,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+       (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),           \
+                                  sizeof(*(ptr)));                     \
+ })
++#define xchg_unchecked_relaxed(ptr, x) ({                             \
++      (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),           \
++                                 sizeof(*(ptr)));                     \
++})
+ #include <asm-generic/cmpxchg-local.h>
+@@ -128,6 +132,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ #endif
+ #define xchg xchg_relaxed
++#define xchg_unchecked xchg_unchecked_relaxed
+ /*
+  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
+index baefe1d..29cb35a 100644
+--- a/arch/arm/include/asm/cpuidle.h
++++ b/arch/arm/include/asm/cpuidle.h
+@@ -32,7 +32,7 @@ struct device_node;
+ struct cpuidle_ops {
+       int (*suspend)(unsigned long arg);
+       int (*init)(struct device_node *, int cpu);
+-};
++} __no_const;
+ struct of_cpuidle_method {
+       const char *method;
+diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
+index 99d9f63..ec44cb5 100644
+--- a/arch/arm/include/asm/domain.h
++++ b/arch/arm/include/asm/domain.h
+@@ -42,7 +42,6 @@
+ #define DOMAIN_USER   1
+ #define DOMAIN_IO     0
+ #endif
+-#define DOMAIN_VECTORS        3
+ /*
+  * Domain types
+@@ -51,9 +50,28 @@
+ #define DOMAIN_CLIENT 1
+ #ifdef CONFIG_CPU_USE_DOMAINS
+ #define DOMAIN_MANAGER        3
++#define DOMAIN_VECTORS        3
++#define DOMAIN_USERCLIENT     DOMAIN_CLIENT
+ #else
++
++#ifdef CONFIG_PAX_KERNEXEC
+ #define DOMAIN_MANAGER        1
++#define DOMAIN_KERNEXEC       3
++#else
++#define DOMAIN_MANAGER        1
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define DOMAIN_USERCLIENT     0
++#define DOMAIN_UDEREF         1
++#define DOMAIN_VECTORS                DOMAIN_KERNEL
++#else
++#define DOMAIN_USERCLIENT     1
++#define DOMAIN_VECTORS                DOMAIN_USER
++#endif
++
+ #endif
++#define DOMAIN_KERNELCLIENT   1
+ #define domain_mask(dom)      ((3) << (2 * (dom)))
+ #define domain_val(dom,type)  ((type) << (2 * (dom)))
+@@ -62,13 +80,19 @@
+ #define DACR_INIT \
+       (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+-       domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++       domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+        domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++      /* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */
++#define DACR_INIT \
++      (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
++       domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
++       domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
+ #else
+ #define DACR_INIT \
+-      (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
++      (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
+        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+-       domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++       domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+        domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+ #endif
+@@ -124,6 +148,17 @@ static inline void set_domain(unsigned val)
+               set_domain(domain);                             \
+       } while (0)
++#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#define modify_domain(dom,type)                                       \
++      do {                                                    \
++              struct thread_info *thread = current_thread_info(); \
++              unsigned int domain = get_domain();             \
++              domain &= ~domain_mask(dom);                    \
++              domain = domain | domain_val(dom, type);        \
++              thread->cpu_domain = domain;                    \
++              set_domain(domain);                             \
++      } while (0)
++
+ #else
+ static inline void modify_domain(unsigned dom, unsigned type) { }
+ #endif
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index d2315ff..f60b47b 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+-#define ELF_ET_DYN_BASE       (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN    ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN   ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+ /* When the program starts, a1 contains a pointer to a function to be 
+    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
+diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
+index de53547..52b9a28 100644
+--- a/arch/arm/include/asm/fncpy.h
++++ b/arch/arm/include/asm/fncpy.h
+@@ -81,7 +81,9 @@
+       BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||             \
+               (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
+                                                                       \
++      pax_open_kernel();                                              \
+       memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);   \
++      pax_close_kernel();                                             \
+       flush_icache_range((unsigned long)(dest_buf),                   \
+               (unsigned long)(dest_buf) + (size));                    \
+                                                                       \
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 6795368..6c4d749 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+               return -EFAULT;
+       preempt_disable();
++
+       __ua_flags = uaccess_save_and_enable();
+       __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+       "1:     " TUSER(ldr) "  %1, [%4]\n"
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index 83eb2f7..ed77159 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -4,6 +4,6 @@
+ /*
+  * This is the "bare minimum".  AIO seems to require this.
+  */
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+ #endif
+diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
+index 9e614a1..3302cca 100644
+--- a/arch/arm/include/asm/mach/dma.h
++++ b/arch/arm/include/asm/mach/dma.h
+@@ -22,7 +22,7 @@ struct dma_ops {
+       int     (*residue)(unsigned int, dma_t *);              /* optional */
+       int     (*setspeed)(unsigned int, dma_t *, int);        /* optional */
+       const char *type;
+-};
++} __do_const;
+ struct dma_struct {
+       void            *addr;          /* single DMA address           */
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 9b7c328..2dfe68b 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -23,17 +23,19 @@ struct map_desc {
+ /* types 0-3 are defined in asm/io.h */
+ enum {
+-      MT_UNCACHED = 4,
+-      MT_CACHECLEAN,
+-      MT_MINICLEAN,
++      MT_UNCACHED_RW = 4,
++      MT_CACHECLEAN_RO,
++      MT_MINICLEAN_RO,
+       MT_LOW_VECTORS,
+       MT_HIGH_VECTORS,
+-      MT_MEMORY_RWX,
++      __MT_MEMORY_RWX,
+       MT_MEMORY_RW,
+-      MT_ROM,
+-      MT_MEMORY_RWX_NONCACHED,
++      MT_MEMORY_RX,
++      MT_ROM_RX,
++      MT_MEMORY_RW_NONCACHED,
++      MT_MEMORY_RX_NONCACHED,
+       MT_MEMORY_RW_DTCM,
+-      MT_MEMORY_RWX_ITCM,
++      MT_MEMORY_RX_ITCM,
+       MT_MEMORY_RW_SO,
+       MT_MEMORY_DMA_READY,
+ };
+diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
+index c2bf24f..69e437c 100644
+--- a/arch/arm/include/asm/outercache.h
++++ b/arch/arm/include/asm/outercache.h
+@@ -39,7 +39,7 @@ struct outer_cache_fns {
+       /* This is an ARM L2C thing */
+       void (*write_sec)(unsigned long, unsigned);
+       void (*configure)(const struct l2x0_regs *);
+-};
++} __no_const;
+ extern struct outer_cache_fns outer_cache;
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index 4355f0e..cd9168e 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -23,6 +23,7 @@
+ #else
++#include <linux/compiler.h>
+ #include <asm/glue.h>
+ /*
+@@ -114,7 +115,7 @@ struct cpu_user_fns {
+       void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+       void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+                       unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
+index b2902a5..da11e4d 100644
+--- a/arch/arm/include/asm/pgalloc.h
++++ b/arch/arm/include/asm/pgalloc.h
+@@ -17,6 +17,7 @@
+ #include <asm/processor.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/system_info.h>
+ #define check_pgt_cache()             do { } while (0)
+@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+       set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ }
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate(mm, pud, pmd);
++}
++
+ #else /* !CONFIG_ARM_LPAE */
+ /*
+@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ #define pmd_alloc_one(mm,addr)                ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, pmd)             do { } while (0)
+ #define pud_populate(mm,pmd,pte)      BUG()
++#define pud_populate_kernel(mm,pmd,pte)       BUG()
+ #endif        /* CONFIG_ARM_LPAE */
+@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+       __free_page(pte);
+ }
++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
++{
++#ifdef CONFIG_ARM_LPAE
++      pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#else
++      if (addr & SECTION_SIZE)
++              pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
++      else
++              pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#endif
++      flush_pmd_entry(pmdp);
++}
++
+ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+                                 pmdval_t prot)
+ {
+diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
+index 3f82e9d..2a85e8b 100644
+--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
+@@ -28,7 +28,7 @@
+ /*
+  *   - section
+  */
+-#define PMD_SECT_PXN    (_AT(pmdval_t, 1) << 0)     /* v7 */
++#define PMD_SECT_PXN          (_AT(pmdval_t, 1) << 0)     /* v7 */
+ #define PMD_SECT_BUFFERABLE   (_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE    (_AT(pmdval_t, 1) << 3)
+ #define PMD_SECT_XN           (_AT(pmdval_t, 1) << 4)         /* v6 */
+@@ -40,6 +40,7 @@
+ #define PMD_SECT_nG           (_AT(pmdval_t, 1) << 17)        /* v6 */
+ #define PMD_SECT_SUPER                (_AT(pmdval_t, 1) << 18)        /* v6 */
+ #define PMD_SECT_AF           (_AT(pmdval_t, 0))
++#define PMD_SECT_RDONLY               (_AT(pmdval_t, 0))
+ #define PMD_SECT_UNCACHED     (_AT(pmdval_t, 0))
+ #define PMD_SECT_BUFFERED     (PMD_SECT_BUFFERABLE)
+@@ -70,6 +71,7 @@
+  *   - extended small page/tiny page
+  */
+ #define PTE_EXT_XN            (_AT(pteval_t, 1) << 0)         /* v6 */
++#define PTE_EXT_PXN           (_AT(pteval_t, 1) << 2)         /* v7 */
+ #define PTE_EXT_AP_MASK               (_AT(pteval_t, 3) << 4)
+ #define PTE_EXT_AP0           (_AT(pteval_t, 1) << 4)
+ #define PTE_EXT_AP1           (_AT(pteval_t, 2) << 4)
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index 92fd2c8..061dae1 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -127,6 +127,9 @@
+ #define L_PTE_SHARED          (_AT(pteval_t, 1) << 10)        /* shared(v6), coherent(xsc3) */
+ #define L_PTE_NONE            (_AT(pteval_t, 1) << 11)
++/* Two-level page tables only have PXN in the PGD, not in the PTE. */
++#define L_PTE_PXN             (_AT(pteval_t, 0))
++
+ /*
+  * These are the memory types, defined to be compatible with
+  * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 2a029bc..a0524c7 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -80,6 +80,7 @@
+ #define L_PTE_USER            (_AT(pteval_t, 1) << 6)         /* AP[1] */
+ #define L_PTE_SHARED          (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG           (_AT(pteval_t, 1) << 10)        /* AF */
++#define L_PTE_PXN             (_AT(pteval_t, 1) << 53)        /* PXN */
+ #define L_PTE_XN              (_AT(pteval_t, 1) << 54)        /* XN */
+ #define L_PTE_DIRTY           (_AT(pteval_t, 1) << 55)
+ #define L_PTE_SPECIAL         (_AT(pteval_t, 1) << 56)
+@@ -90,10 +91,12 @@
+ #define L_PMD_SECT_DIRTY      (_AT(pmdval_t, 1) << 55)
+ #define L_PMD_SECT_NONE               (_AT(pmdval_t, 1) << 57)
+ #define L_PMD_SECT_RDONLY     (_AT(pteval_t, 1) << 58)
++#define PMD_SECT_RDONLY               PMD_SECT_AP2
+ /*
+  * To be used in assembly code with the upper page attributes.
+  */
++#define L_PTE_PXN_HIGH                (1 << (53 - 32))
+ #define L_PTE_XN_HIGH         (1 << (54 - 32))
+ #define L_PTE_DIRTY_HIGH      (1 << (55 - 32))
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index a8d656d..2febb8a 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -33,6 +33,9 @@
+ #include <asm/pgtable-2level.h>
+ #endif
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ /*
+  * Just any arbitrary offset to the start of the vmalloc VM area: the
+  * current 8MB value just means that there will be a 8MB "hole" after the
+@@ -48,6 +51,9 @@
+ #define LIBRARY_TEXT_START    0x0c000000
+ #ifndef __ASSEMBLY__
++extern pteval_t __supported_pte_mask;
++extern pmdval_t __supported_pmd_mask;
++
+ extern void __pte_error(const char *file, int line, pte_t);
+ extern void __pmd_error(const char *file, int line, pmd_t);
+ extern void __pgd_error(const char *file, int line, pgd_t);
+@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ #define pmd_ERROR(pmd)                __pmd_error(__FILE__, __LINE__, pmd)
+ #define pgd_ERROR(pgd)                __pgd_error(__FILE__, __LINE__, pgd)
++#define  __HAVE_ARCH_PAX_OPEN_KERNEL
++#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/domain.h>
++#include <linux/thread_info.h>
++#include <linux/preempt.h>
++
++static inline int test_domain(int domain, int domaintype)
++{
++      return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
++}
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++      /* TODO */
++#else
++      preempt_disable();
++      BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
++      modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
++#endif
++      return 0;
++}
++
++static inline unsigned long pax_close_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++      /* TODO */
++#else
++      BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
++      /* DOMAIN_MANAGER = "client" under KERNEXEC */
++      modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
++      preempt_enable_no_resched();
++#endif
++      return 0;
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+  * This is the lowest virtual address we can permit any user space
+  * mapping to be mapped at.  This is particularly important for
+@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ /*
+  * The pgprot_* and protection_map entries will be fixed up in runtime
+  * to include the cachable and bufferable bits based on memory policy,
+- * as well as any architecture dependent bits like global/ASID and SMP
+- * shared mapping bits.
++ * as well as any architecture dependent bits like global/ASID, PXN,
++ * and SMP shared mapping bits.
+  */
+ #define _L_PTE_DEFAULT        L_PTE_PRESENT | L_PTE_YOUNG
+@@ -308,7 +356,7 @@ static inline pte_t pte_mknexec(pte_t pte)
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+       const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+-              L_PTE_NONE | L_PTE_VALID;
++              L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
+       pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+       return pte;
+ }
+diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
+index 3d6dc8b..1262ad3 100644
+--- a/arch/arm/include/asm/smp.h
++++ b/arch/arm/include/asm/smp.h
+@@ -108,7 +108,7 @@ struct smp_operations {
+       int  (*cpu_disable)(unsigned int cpu);
+ #endif
+ #endif
+-};
++} __no_const;
+ struct of_cpu_method {
+       const char *method;
+diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
+index cf4f3aa..8f2f2d9 100644
+--- a/arch/arm/include/asm/string.h
++++ b/arch/arm/include/asm/string.h
+@@ -7,19 +7,19 @@
+  */
+ #define __HAVE_ARCH_STRRCHR
+-extern char * strrchr(const char * s, int c);
++extern char * strrchr(const char * s, int c) __nocapture(-1);
+ #define __HAVE_ARCH_STRCHR
+-extern char * strchr(const char * s, int c);
++extern char * strchr(const char * s, int c) __nocapture(-1);
+ #define __HAVE_ARCH_MEMCPY
+-extern void * memcpy(void *, const void *, __kernel_size_t);
++extern void * memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
+ #define __HAVE_ARCH_MEMMOVE
+-extern void * memmove(void *, const void *, __kernel_size_t);
++extern void * memmove(void *, const void *, __kernel_size_t) __nocapture(2);
+ #define __HAVE_ARCH_MEMCHR
+-extern void * memchr(const void *, int, __kernel_size_t);
++extern void * memchr(const void *, int, __kernel_size_t) __nocapture(-1);
+ #define __HAVE_ARCH_MEMSET
+ extern void * memset(void *, int, __kernel_size_t);
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 776757d..a552c1d 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -73,6 +73,9 @@ struct thread_info {
+       .flags          = 0,                                            \
+       .preempt_count  = INIT_PREEMPT_COUNT,                           \
+       .addr_limit     = KERNEL_DS,                                    \
++      .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) |  \
++                        domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
++                        domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT),   \
+ }
+ #define init_thread_info      (init_thread_union.thread_info)
+@@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_AUDIT     5       /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT        6       /* syscall tracepoint instrumentation */
+ #define TIF_SECCOMP           7       /* seccomp syscall filtering active */
++/* within 8 bits of TIF_SYSCALL_TRACE
++ *  to meet flexible second operand requirements
++ */
++#define TIF_GRSEC_SETXID      8
+ #define TIF_NOHZ              12      /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT      17
+@@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define _TIF_SYSCALL_TRACEPOINT       (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_SECCOMP          (1 << TIF_SECCOMP)
+ #define _TIF_USING_IWMMXT     (1 << TIF_USING_IWMMXT)
++#define _TIF_GRSEC_SETXID     (1 << TIF_GRSEC_SETXID)
+ /* Checks for any syscall work in entry-common.S */
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+-                         _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++                         _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
+ /*
+  * Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
+index f6fcc67..5895d62 100644
+--- a/arch/arm/include/asm/timex.h
++++ b/arch/arm/include/asm/timex.h
+@@ -13,6 +13,7 @@
+ #define _ASMARM_TIMEX_H
+ typedef unsigned long cycles_t;
++extern int read_current_timer(unsigned long *timer_val);
+ #define get_cycles()  ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+ #endif
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 5f833f7..76e6644 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -3,6 +3,7 @@
+ #include <linux/compiler.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+ #ifdef __ASSEMBLY__
+ #include <asm/asm-offsets.h>
+@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
+                        * at 0xffff0fe0 must be used instead.  (see
+                        * entry-armv.S for details)
+                        */
++                      pax_open_kernel();
+                       *((unsigned int *)0xffff0ff0) = val;
++                      pax_close_kernel();
+ #endif
+               }
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index a93c0f9..5c31bbb 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -18,6 +18,7 @@
+ #include <asm/domain.h>
+ #include <asm/unified.h>
+ #include <asm/compiler.h>
++#include <asm/pgtable.h>
+ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -50,6 +51,59 @@ struct exception_table_entry
+ extern int fixup_exception(struct pt_regs *regs);
+ /*
++ * These two are intentionally not defined anywhere - if the kernel
++ * code generates any references to them, that's a bug.
++ */
++extern int __get_user_bad(void);
++extern int __put_user_bad(void);
++
++/*
++ * Note that this is actually 0x1,0000,0000
++ */
++#define KERNEL_DS     0x00000000
++#define get_ds()      (KERNEL_DS)
++
++#ifdef CONFIG_MMU
++
++#define USER_DS               TASK_SIZE
++#define get_fs()      (current_thread_info()->addr_limit)
++
++static inline void set_fs(mm_segment_t fs)
++{
++      current_thread_info()->addr_limit = fs;
++      modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
++}
++
++#define segment_eq(a, b)      ((a) == (b))
++
++#define __HAVE_ARCH_PAX_OPEN_USERLAND
++#define __HAVE_ARCH_PAX_CLOSE_USERLAND
++
++static inline void pax_open_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (segment_eq(get_fs(), USER_DS)) {
++              BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
++              modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
++      }
++#endif
++
++}
++
++static inline void pax_close_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (segment_eq(get_fs(), USER_DS)) {
++              BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
++              modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
++      }
++#endif
++
++}
++
++/*
+  * These two functions allow hooking accesses to userspace to increase
+  * system integrity by ensuring that the kernel can not inadvertantly
+  * perform such accesses (eg, via list poison values) which could then
+@@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void)
+       return old_domain;
+ #else
++      pax_open_userland();
+       return 0;
+ #endif
+ }
+@@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags)
+ #ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /* Restore the user access mask */
+       set_domain(flags);
++#else
++      pax_close_userland();
+ #endif
+ }
+-/*
+- * These two are intentionally not defined anywhere - if the kernel
+- * code generates any references to them, that's a bug.
+- */
+-extern int __get_user_bad(void);
+-extern int __put_user_bad(void);
+-
+-/*
+- * Note that this is actually 0x1,0000,0000
+- */
+-#define KERNEL_DS     0x00000000
+-#define get_ds()      (KERNEL_DS)
+-
+-#ifdef CONFIG_MMU
+-
+-#define USER_DS               TASK_SIZE
+-#define get_fs()      (current_thread_info()->addr_limit)
+-
+-static inline void set_fs(mm_segment_t fs)
+-{
+-      current_thread_info()->addr_limit = fs;
+-      modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+-}
+-
+-#define segment_eq(a, b)      ((a) == (b))
+-
+ /* We use 33-bit arithmetic here... */
+ #define __range_ok(addr, size) ({ \
+       unsigned long flag, roksum; \
+@@ -268,6 +299,7 @@ static inline void set_fs(mm_segment_t fs)
+ #endif /* CONFIG_MMU */
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size)   (__range_ok(addr, size) == 0)
+ #define user_addr_max() \
+@@ -474,10 +506,10 @@ do {                                                                     \
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+-static inline unsigned long __must_check
++static inline unsigned long __must_check __size_overflow(3)
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+       unsigned int __ua_flags;
+@@ -489,9 +521,9 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
+       return n;
+ }
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+ static inline unsigned long __must_check
+@@ -511,9 +543,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ #endif
+ }
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ arm_clear_user(void __user *addr, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ __clear_user_std(void __user *addr, unsigned long n);
+ static inline unsigned long __must_check
+@@ -533,6 +565,9 @@ __clear_user(void __user *addr, unsigned long n)
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       if (access_ok(VERIFY_READ, from, n))
+               n = __copy_from_user(to, from, n);
+       else /* security hole - plug it */
+@@ -542,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       if (access_ok(VERIFY_WRITE, to, n))
+               n = __copy_to_user(to, from, n);
+       return n;
+diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
+index 5af0ed1..cea83883 100644
+--- a/arch/arm/include/uapi/asm/ptrace.h
++++ b/arch/arm/include/uapi/asm/ptrace.h
+@@ -92,7 +92,7 @@
+  * ARMv7 groups of PSR bits
+  */
+ #define APSR_MASK     0xf80f0000      /* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK 0x01000010      /* ISA state (J, T) mask */
++#define PSR_ISET_MASK 0x01000020      /* ISA state (J, T) mask */
+ #define PSR_IT_MASK   0x0600fc00      /* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK       0x00000200      /* Endianness state mask */
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 7e45f69..2c047db 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -59,7 +59,7 @@ EXPORT_SYMBOL(arm_delay_ops);
+       /* networking */
+ EXPORT_SYMBOL(csum_partial);
+-EXPORT_SYMBOL(csum_partial_copy_from_user);
++EXPORT_SYMBOL(__csum_partial_copy_from_user);
+ EXPORT_SYMBOL(csum_partial_copy_nocheck);
+ EXPORT_SYMBOL(__csum_ipv6_magic);
+diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
+index 7dccc96..84da243 100644
+--- a/arch/arm/kernel/cpuidle.c
++++ b/arch/arm/kernel/cpuidle.c
+@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
+ static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
+       __used __section(__cpuidle_method_of_table_end);
+-static struct cpuidle_ops cpuidle_ops[NR_CPUS];
++static struct cpuidle_ops cpuidle_ops[NR_CPUS] __read_only;
+ /**
+  * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
+diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
+index 9f43ba0..1cee475 100644
+--- a/arch/arm/kernel/efi.c
++++ b/arch/arm/kernel/efi.c
+@@ -60,9 +60,9 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+        * preference.
+        */
+       if (md->attribute & EFI_MEMORY_WB)
+-              desc.type = MT_MEMORY_RWX;
++              desc.type = __MT_MEMORY_RWX;
+       else if (md->attribute & EFI_MEMORY_WT)
+-              desc.type = MT_MEMORY_RWX_NONCACHED;
++              desc.type = MT_MEMORY_RW_NONCACHED;
+       else if (md->attribute & EFI_MEMORY_WC)
+               desc.type = MT_DEVICE_WC;
+       else
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 9f157e7..8e3f857 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -50,6 +50,87 @@
+ 9997:
+       .endm
++      .macro  pax_enter_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ make aligned space for saved DACR
++      sub     sp, sp, #8
++      @ save regs
++      stmdb   sp!, {r1, r2}
++      @ read DACR from cpu_domain into r1
++      mov     r2, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r2, r2, #(0x1fc0)
++      bic     r2, r2, #(0x3f)
++      ldr     r1, [r2, #TI_CPU_DOMAIN]
++      @ store old DACR on stack
++      str     r1, [sp, #8]
++#ifdef CONFIG_PAX_KERNEXEC
++      @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++      bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ set current DOMAIN_USER to DOMAIN_NOACCESS
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r2, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r1, r2}
++#endif
++      .endm
++
++      .macro  pax_open_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read DACR from cpu_domain into r1
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      ldr     r1, [r0, #TI_CPU_DOMAIN]
++      @ set current DOMAIN_USER to DOMAIN_CLIENT
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
++
++      .macro  pax_close_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read DACR from cpu_domain into r1
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      ldr     r1, [r0, #TI_CPU_DOMAIN]
++      @ set current DOMAIN_USER to DOMAIN_NOACCESS
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
++
+       .macro  pabt_helper
+       @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+ #ifdef MULTI_PABORT
+@@ -92,11 +173,15 @@
+  * Invalid mode handlers
+  */
+       .macro  inv_entry, reason
++
++      pax_enter_kernel
++
+       sub     sp, sp, #PT_REGS_SIZE
+  ARM( stmib   sp, {r1 - lr}           )
+  THUMB(       stmia   sp, {r0 - r12}          )
+  THUMB(       str     sp, [sp, #S_SP]         )
+  THUMB(       str     lr, [sp, #S_LR]         )
++
+       mov     r1, #\reason
+       .endm
+@@ -152,6 +237,9 @@ ENDPROC(__und_invalid)
+       .macro  svc_entry, stack_hole=0, trace=1, uaccess=1
+  UNWIND(.fnstart              )
+  UNWIND(.save {r0 - pc}               )
++
++      pax_enter_kernel
++
+       sub     sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+ #ifdef CONFIG_THUMB2_KERNEL
+  SPFIX(       str     r0, [sp]        )       @ temporarily saved
+@@ -167,7 +255,12 @@ ENDPROC(__und_invalid)
+       ldmia   r0, {r3 - r5}
+       add     r7, sp, #S_SP - 4       @ here for interlock avoidance
+       mov     r6, #-1                 @  ""  ""      ""       ""
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ offset sp by 8 as done in pax_enter_kernel
++      add     r2, sp, #(SVC_REGS_SIZE + \stack_hole + 4)
++#else
+       add     r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
++#endif
+  SPFIX(       addeq   r2, r2, #4      )
+       str     r3, [sp, #-4]!          @ save the "real" r0 copied
+                                       @ from the exception stack
+@@ -382,6 +475,9 @@ ENDPROC(__fiq_abt)
+       .macro  usr_entry, trace=1, uaccess=1
+  UNWIND(.fnstart      )
+  UNWIND(.cantunwind   )       @ don't unwind the user space
++
++      pax_enter_kernel_user
++
+       sub     sp, sp, #PT_REGS_SIZE
+  ARM( stmib   sp, {r1 - r12}  )
+  THUMB(       stmia   sp, {r0 - r12}  )
+@@ -495,7 +591,9 @@ __und_usr:
+       tst     r3, #PSR_T_BIT                  @ Thumb mode?
+       bne     __und_usr_thumb
+       sub     r4, r2, #4                      @ ARM instr at LR - 4
++      pax_open_userland
+ 1:    ldrt    r0, [r4]
++      pax_close_userland
+  ARM_BE8(rev  r0, r0)                         @ little endian instruction
+       uaccess_disable ip
+@@ -531,11 +629,15 @@ __und_usr_thumb:
+  */
+       .arch   armv6t2
+ #endif
++      pax_open_userland
+ 2:    ldrht   r5, [r4]
++      pax_close_userland
+ ARM_BE8(rev16 r5, r5)                         @ little endian instruction
+       cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
+       blo     __und_usr_fault_16_pan          @ 16bit undefined instruction
++      pax_open_userland
+ 3:    ldrht   r0, [r2]
++      pax_close_userland
+ ARM_BE8(rev16 r0, r0)                         @ little endian instruction
+       uaccess_disable ip
+       add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
+@@ -566,7 +668,8 @@ ENDPROC(__und_usr)
+  */
+       .pushsection .text.fixup, "ax"
+       .align  2
+-4:    str     r4, [sp, #S_PC]                 @ retry current instruction
++4:    pax_close_userland
++      str     r4, [sp, #S_PC]                 @ retry current instruction
+       ret     r9
+       .popsection
+       .pushsection __ex_table,"a"
+@@ -788,7 +891,7 @@ ENTRY(__switch_to)
+  THUMB(       str     lr, [ip], #4               )
+       ldr     r4, [r2, #TI_TP_VALUE]
+       ldr     r5, [r2, #TI_TP_VALUE + 4]
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       mrc     p15, 0, r6, c3, c0, 0           @ Get domain register
+       str     r6, [r1, #TI_CPU_DOMAIN]        @ Save old domain register
+       ldr     r6, [r2, #TI_CPU_DOMAIN]
+@@ -799,7 +902,7 @@ ENTRY(__switch_to)
+       ldr     r8, =__stack_chk_guard
+       ldr     r7, [r7, #TSK_STACK_CANARY]
+ #endif
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       mcr     p15, 0, r6, c3, c0, 0           @ Set domain register
+ #endif
+       mov     r5, r0
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 10c3283..c47cdf5 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -11,18 +11,46 @@
+ #include <asm/assembler.h>
+ #include <asm/unistd.h>
+ #include <asm/ftrace.h>
++#include <asm/domain.h>
+ #include <asm/unwind.h>
++#include "entry-header.S"
++
+ #ifdef CONFIG_NEED_RET_TO_USER
+ #include <mach/entry-macro.S>
+ #else
+       .macro  arch_ret_to_user, tmp1, tmp2
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ save regs
++      stmdb   sp!, {r1, r2}
++      @ read DACR from cpu_domain into r1
++      mov     r2, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r2, r2, #(0x1fc0)
++      bic     r2, r2, #(0x3f)
++      ldr     r1, [r2, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_KERNEXEC
++      @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++      bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ set current DOMAIN_USER to DOMAIN_UDEREF
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++#endif
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r2, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r1, r2}
++#endif
+       .endm
+ #endif
+-#include "entry-header.S"
+-
+-
+       .align  5
+ #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
+ /*
+@@ -36,7 +64,9 @@ ret_fast_syscall:
+  UNWIND(.cantunwind   )
+       disable_irq_notrace                     @ disable interrupts
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+-      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++      tst     r1, #_TIF_SYSCALL_WORK
++      bne     fast_work_pending
++      tst     r1, #_TIF_WORK_MASK
+       bne     fast_work_pending
+       /* perform architecture specific actions before user return */
+@@ -62,7 +92,9 @@ ret_fast_syscall:
+       str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
+       disable_irq_notrace                     @ disable interrupts
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+-      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++      tst     r1, #_TIF_SYSCALL_WORK
++      bne     __sys_trace_return_nosave
++      tst     r1, #_TIF_WORK_MASK
+       beq     no_work_pending
+  UNWIND(.fnend                )
+ ENDPROC(ret_fast_syscall)
+@@ -199,6 +231,12 @@ ENTRY(vector_swi)
+       uaccess_disable tbl
++      /*
++       * do this here to avoid a performance hit of wrapping the code above
++       * that directly dereferences userland to parse the SWI instruction
++       */
++      pax_enter_kernel_user
++
+       adr     tbl, sys_call_table             @ load syscall table pointer
+ #if defined(CONFIG_OABI_COMPAT)
+diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
+index 6391728..6bf90b8 100644
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -196,6 +196,59 @@
+       msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
+       .endm
++      .macro  pax_enter_kernel_user
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read DACR from cpu_domain into r1
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      ldr     r1, [r0, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      @ set current DOMAIN_USER to DOMAIN_NOACCESS
++      bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++      @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++      bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++      orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++      @ write r1 to current_thread_info()->cpu_domain
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
++
++      .macro  pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      @ save regs
++      stmdb   sp!, {r0, r1}
++      @ read old DACR from stack into r1
++      ldr     r1, [sp, #(8 + S_SP)]
++      sub     r1, r1, #8
++      ldr     r1, [r1]
++
++      @ write r1 to current_thread_info()->cpu_domain
++      mov     r0, sp
++      @ assume 8K pages, since we have to split the immediate in two
++      bic     r0, r0, #(0x1fc0)
++      bic     r0, r0, #(0x3f)
++      str     r1, [r0, #TI_CPU_DOMAIN]
++      @ write r1 to DACR
++      mcr     p15, 0, r1, c3, c0, 0
++      @ instruction sync
++      instr_sync
++      @ restore regs
++      ldmia   sp!, {r0, r1}
++#endif
++      .endm
+       .macro  svc_exit, rpsr, irq = 0
+       .if     \irq != 0
+@@ -219,6 +272,8 @@
+       uaccess_restore
+       str     r1, [tsk, #TI_ADDR_LIMIT]
++      pax_exit_kernel
++
+ #ifndef CONFIG_THUMB2_KERNEL
+       @ ARM mode SVC restore
+       msr     spsr_cxsf, \rpsr
+diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
+index 059c3da..8e45cfc 100644
+--- a/arch/arm/kernel/fiq.c
++++ b/arch/arm/kernel/fiq.c
+@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
+       void *base = vectors_page;
+       unsigned offset = FIQ_OFFSET;
++      pax_open_kernel();
+       memcpy(base + offset, start, length);
++      pax_close_kernel();
++
+       if (!cache_is_vipt_nonaliasing())
+               flush_icache_range((unsigned long)base + offset, offset +
+                                  length);
+diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
+index 0c7efc3..3927085 100644
+--- a/arch/arm/kernel/module-plts.c
++++ b/arch/arm/kernel/module-plts.c
+@@ -30,17 +30,12 @@ struct plt_entries {
+       u32     lit[PLT_ENT_COUNT];
+ };
+-static bool in_init(const struct module *mod, u32 addr)
+-{
+-      return addr - (u32)mod->init_layout.base < mod->init_layout.size;
+-}
+-
+ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
+ {
+       struct plt_entries *plt, *plt_end;
+       int c, *count;
+-      if (in_init(mod, loc)) {
++      if (within_module_init(loc, mod)) {
+               plt = (void *)mod->arch.init_plt->sh_addr;
+               plt_end = (void *)plt + mod->arch.init_plt->sh_size;
+               count = &mod->arch.init_plt_count;
+diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
+index 4f14b5c..91ff261 100644
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -38,17 +38,47 @@
+ #endif
+ #ifdef CONFIG_MMU
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+-      void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+-                              GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++      void *p;
++
++      if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
++              return NULL;
++
++      p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
++                              GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+                               __builtin_return_address(0));
+       if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
+               return p;
+       return __vmalloc_node_range(size, 1,  VMALLOC_START, VMALLOC_END,
+-                              GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++                              GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+                               __builtin_return_address(0));
+ }
++
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++      return __module_alloc(size, PAGE_KERNEL);
++#else
++      return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++void module_memfree_exec(void *module_region)
++{
++      module_memfree(module_region);
++}
++EXPORT_SYMBOL(module_memfree_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++      return __module_alloc(size, PAGE_KERNEL_EXEC);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
+ #endif
+ int
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index 69bda1a..755113a 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+       else
+               __acquire(&patch_lock);
++      pax_open_kernel();
+       if (thumb2 && __opcode_is_thumb16(insn)) {
+               *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
+               size = sizeof(u16);
+@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+               *(u32 *)waddr = insn;
+               size = sizeof(u32);
+       }
++      pax_close_kernel();
+       if (waddr != addr) {
+               flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 612eb53..5a44c8c 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -118,8 +118,8 @@ void __show_regs(struct pt_regs *regs)
+       show_regs_print_info(KERN_DEFAULT);
+-      print_symbol("PC is at %s\n", instruction_pointer(regs));
+-      print_symbol("LR is at %s\n", regs->ARM_lr);
++      printk("PC is at %pA\n", (void *)instruction_pointer(regs));
++      printk("LR is at %pA\n", (void *)regs->ARM_lr);
+       printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
+              "sp : %08lx  ip : %08lx  fp : %08lx\n",
+               regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+@@ -233,7 +233,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
+       memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       /*
+        * Copy the initial value of the domain access control register
+        * from the current thread: thread->addr_limit will have been
+@@ -337,7 +337,7 @@ static struct vm_area_struct gate_vma = {
+ static int __init gate_vma_init(void)
+ {
+-      gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
++      gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+       return 0;
+ }
+ arch_initcall(gate_vma_init);
+@@ -366,92 +366,14 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+       return is_gate_vma(vma) ? "[vectors]" : NULL;
+ }
+-/* If possible, provide a placement hint at a random offset from the
+- * stack for the sigpage and vdso pages.
+- */
+-static unsigned long sigpage_addr(const struct mm_struct *mm,
+-                                unsigned int npages)
+-{
+-      unsigned long offset;
+-      unsigned long first;
+-      unsigned long last;
+-      unsigned long addr;
+-      unsigned int slots;
+-
+-      first = PAGE_ALIGN(mm->start_stack);
+-
+-      last = TASK_SIZE - (npages << PAGE_SHIFT);
+-
+-      /* No room after stack? */
+-      if (first > last)
+-              return 0;
+-
+-      /* Just enough room? */
+-      if (first == last)
+-              return first;
+-
+-      slots = ((last - first) >> PAGE_SHIFT) + 1;
+-
+-      offset = get_random_int() % slots;
+-
+-      addr = first + (offset << PAGE_SHIFT);
+-
+-      return addr;
+-}
+-
+-static struct page *signal_page;
+-extern struct page *get_signal_page(void);
+-
+-static const struct vm_special_mapping sigpage_mapping = {
+-      .name = "[sigpage]",
+-      .pages = &signal_page,
+-};
+-
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+       struct mm_struct *mm = current->mm;
+-      struct vm_area_struct *vma;
+-      unsigned long npages;
+-      unsigned long addr;
+-      unsigned long hint;
+-      int ret = 0;
+-
+-      if (!signal_page)
+-              signal_page = get_signal_page();
+-      if (!signal_page)
+-              return -ENOMEM;
+-
+-      npages = 1; /* for sigpage */
+-      npages += vdso_total_pages;
+       if (down_write_killable(&mm->mmap_sem))
+               return -EINTR;
+-      hint = sigpage_addr(mm, npages);
+-      addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
+-      if (IS_ERR_VALUE(addr)) {
+-              ret = addr;
+-              goto up_fail;
+-      }
+-
+-      vma = _install_special_mapping(mm, addr, PAGE_SIZE,
+-              VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+-              &sigpage_mapping);
+-
+-      if (IS_ERR(vma)) {
+-              ret = PTR_ERR(vma);
+-              goto up_fail;
+-      }
+-
+-      mm->context.sigpage = addr;
+-
+-      /* Unlike the sigpage, failure to install the vdso is unlikely
+-       * to be fatal to the process, so no error check needed
+-       * here.
+-       */
+-      arm_install_vdso(mm, addr + PAGE_SIZE);
+-
+- up_fail:
++      mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
+       up_write(&mm->mmap_sem);
+-      return ret;
++      return 0;
+ }
+ #endif
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index ce131ed..26f9765 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
+       regs->ARM_ip = ip;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+ {
+       current_thread_info()->syscall = scno;
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
+index 3fa867a..d610607 100644
+--- a/arch/arm/kernel/reboot.c
++++ b/arch/arm/kernel/reboot.c
+@@ -120,6 +120,7 @@ void machine_power_off(void)
+       if (pm_power_off)
+               pm_power_off();
++      while (1);
+ }
+ /*
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index df7f2a7..d9d2bc1 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -112,21 +112,23 @@ EXPORT_SYMBOL(elf_hwcap);
+ unsigned int elf_hwcap2 __read_mostly;
+ EXPORT_SYMBOL(elf_hwcap2);
++pteval_t __supported_pte_mask __read_only;
++pmdval_t __supported_pmd_mask __read_only;
+ #ifdef MULTI_CPU
+-struct processor processor __read_mostly;
++struct processor processor __read_only;
+ #endif
+ #ifdef MULTI_TLB
+-struct cpu_tlb_fns cpu_tlb __read_mostly;
++struct cpu_tlb_fns cpu_tlb __read_only;
+ #endif
+ #ifdef MULTI_USER
+-struct cpu_user_fns cpu_user __read_mostly;
++struct cpu_user_fns cpu_user __read_only;
+ #endif
+ #ifdef MULTI_CACHE
+-struct cpu_cache_fns cpu_cache __read_mostly;
++struct cpu_cache_fns cpu_cache __read_only;
+ #endif
+ #ifdef CONFIG_OUTER_CACHE
+-struct outer_cache_fns outer_cache __read_mostly;
++struct outer_cache_fns outer_cache __read_only;
+ EXPORT_SYMBOL(outer_cache);
+ #endif
+@@ -257,9 +259,13 @@ static int __get_cpu_architecture(void)
+                * Register 0 and check for VMSAv7 or PMSAv7 */
+               unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
+               if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+-                  (mmfr0 & 0x000000f0) >= 0x00000030)
++                  (mmfr0 & 0x000000f0) >= 0x00000030) {
+                       cpu_arch = CPU_ARCH_ARMv7;
+-              else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
++                      if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
++                              __supported_pte_mask |= L_PTE_PXN;
++                              __supported_pmd_mask |= PMD_PXNTABLE;
++                      }
++              } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+                        (mmfr0 & 0x000000f0) == 0x00000020)
+                       cpu_arch = CPU_ARCH_ARMv6;
+               else
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 7b8f214..ece8e28 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -24,8 +24,6 @@
+ extern const unsigned long sigreturn_codes[7];
+-static unsigned long signal_return_offset;
+-
+ #ifdef CONFIG_CRUNCH
+ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
+ {
+@@ -388,8 +386,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+                        * except when the MPU has protected the vectors
+                        * page from PL0
+                        */
+-                      retcode = mm->context.sigpage + signal_return_offset +
+-                                (idx << 2) + thumb;
++                      retcode = mm->context.sigpage + (idx << 2) + thumb;
+               } else
+ #endif
+               {
+@@ -601,33 +598,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+       } while (thread_flags & _TIF_WORK_MASK);
+       return 0;
+ }
+-
+-struct page *get_signal_page(void)
+-{
+-      unsigned long ptr;
+-      unsigned offset;
+-      struct page *page;
+-      void *addr;
+-
+-      page = alloc_pages(GFP_KERNEL, 0);
+-
+-      if (!page)
+-              return NULL;
+-
+-      addr = page_address(page);
+-
+-      /* Give the signal return code some randomness */
+-      offset = 0x200 + (get_random_int() & 0x7fc);
+-      signal_return_offset = offset;
+-
+-      /*
+-       * Copy signal return handlers into the vector page, and
+-       * set sigreturn to be a pointer to these.
+-       */
+-      memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+-
+-      ptr = (unsigned long)addr + offset;
+-      flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+-
+-      return page;
+-}
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 8615216..f5be307 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -82,7 +82,7 @@ enum ipi_msg_type {
+ static DECLARE_COMPLETION(cpu_running);
+-static struct smp_operations smp_ops;
++static struct smp_operations smp_ops __read_only;
+ void __init smp_set_ops(const struct smp_operations *ops)
+ {
+diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
+index b10e136..cb5edf9 100644
+--- a/arch/arm/kernel/tcm.c
++++ b/arch/arm/kernel/tcm.c
+@@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = {
+               .virtual        = ITCM_OFFSET,
+               .pfn            = __phys_to_pfn(ITCM_OFFSET),
+               .length         = 0,
+-              .type           = MT_MEMORY_RWX_ITCM,
++              .type           = MT_MEMORY_RX_ITCM,
+       }
+ };
+@@ -362,7 +362,9 @@ no_dtcm:
+               start = &__sitcm_text;
+               end   = &__eitcm_text;
+               ram   = &__itcm_start;
++              pax_open_kernel();
+               memcpy(start, ram, itcm_code_sz);
++              pax_close_kernel();
+               pr_debug("CPU ITCM: copied code from %p - %p\n",
+                        start, end);
+               itcm_present = true;
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index bc69838..e5dfdd4 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
+ #ifdef CONFIG_KALLSYMS
+-      printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
++      printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
+ #else
+       printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ static int die_owner = -1;
+ static unsigned int die_nest_count;
++extern void gr_handle_kernel_exploit(void);
++
+ static unsigned long oops_begin(void)
+ {
+       int cpu;
+@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
++
++      gr_handle_kernel_exploit();
++
+       if (signr)
+               do_exit(signr);
+ }
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index d24e5dd..77cf6cf 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -44,7 +44,8 @@
+ #endif
+ #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+-      defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
++      defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) || \
++      defined(CONFIG_PAX_REFCOUNT)
+ #define ARM_EXIT_KEEP(x)      x
+ #define ARM_EXIT_DISCARD(x)
+ #else
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index c94b90d..0cc6830 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -59,7 +59,7 @@ static unsigned long hyp_default_vectors;
+ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+ /* The VMID used in the VTTBR */
+-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
++static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u32 kvm_next_vmid;
+ static unsigned int kvm_vmid_bits __read_mostly;
+ static DEFINE_SPINLOCK(kvm_vmid_lock);
+@@ -388,7 +388,7 @@ void force_vm_exit(const cpumask_t *mask)
+  */
+ static bool need_new_vmid_gen(struct kvm *kvm)
+ {
+-      return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
++      return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
+ }
+ /**
+@@ -421,7 +421,7 @@ static void update_vttbr(struct kvm *kvm)
+       /* First user of a new VMID generation? */
+       if (unlikely(kvm_next_vmid == 0)) {
+-              atomic64_inc(&kvm_vmid_gen);
++              atomic64_inc_unchecked(&kvm_vmid_gen);
+               kvm_next_vmid = 1;
+               /*
+@@ -438,7 +438,7 @@ static void update_vttbr(struct kvm *kvm)
+               kvm_call_hyp(__kvm_flush_vm_context);
+       }
+-      kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
++      kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
+       kvm->arch.vmid = kvm_next_vmid;
+       kvm_next_vmid++;
+       kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+  *  ASM optimised string functions
+  */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
+index 1712f13..a3165dc 100644
+--- a/arch/arm/lib/csumpartialcopyuser.S
++++ b/arch/arm/lib/csumpartialcopyuser.S
+@@ -71,8 +71,8 @@
+  *  Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+  */
+-#define FN_ENTRY      ENTRY(csum_partial_copy_from_user)
+-#define FN_EXIT               ENDPROC(csum_partial_copy_from_user)
++#define FN_ENTRY      ENTRY(__csum_partial_copy_from_user)
++#define FN_EXIT               ENDPROC(__csum_partial_copy_from_user)
+ #include "csumpartialcopygeneric.S"
+diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
+index 8044591..c9b2609 100644
+--- a/arch/arm/lib/delay.c
++++ b/arch/arm/lib/delay.c
+@@ -29,7 +29,7 @@
+ /*
+  * Default to the loop-based delay implementation.
+  */
+-struct arm_delay_ops arm_delay_ops = {
++struct arm_delay_ops arm_delay_ops __read_only = {
+       .delay          = __loop_delay,
+       .const_udelay   = __loop_const_udelay,
+       .udelay         = __loop_udelay,
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 6bd1089..e999400 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -84,7 +84,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
+       return 1;
+ }
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(3)
+ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
+ {
+       unsigned long ua_flags;
+@@ -157,7 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
+       return n;
+ }
+       
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(2)
+ __clear_user_memset(void __user *addr, unsigned long n)
+ {
+       unsigned long ua_flags;
+diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
+index 06332f6..1fa0c71 100644
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -724,8 +724,10 @@ void __init exynos_pm_init(void)
+       tmp |= pm_data->wake_disable_mask;
+       pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
+-      exynos_pm_syscore_ops.suspend   = pm_data->pm_suspend;
+-      exynos_pm_syscore_ops.resume    = pm_data->pm_resume;
++      pax_open_kernel();
++      const_cast(exynos_pm_syscore_ops.suspend)       = pm_data->pm_suspend;
++      const_cast(exynos_pm_syscore_ops.resume)        = pm_data->pm_resume;
++      pax_close_kernel();
+       register_syscore_ops(&exynos_pm_syscore_ops);
+       suspend_set_ops(&exynos_suspend_ops);
+diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
+index afba546..9e5403d 100644
+--- a/arch/arm/mach-mmp/mmp2.c
++++ b/arch/arm/mach-mmp/mmp2.c
+@@ -98,7 +98,9 @@ void __init mmp2_init_irq(void)
+ {
+       mmp2_init_icu();
+ #ifdef CONFIG_PM
+-      icu_irq_chip.irq_set_wake = mmp2_set_wake;
++      pax_open_kernel();
++      const_cast(icu_irq_chip.irq_set_wake) = mmp2_set_wake;
++      pax_close_kernel();
+ #endif
+ }
+diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
+index 1ccbba9..7a95c29 100644
+--- a/arch/arm/mach-mmp/pxa910.c
++++ b/arch/arm/mach-mmp/pxa910.c
+@@ -84,7 +84,9 @@ void __init pxa910_init_irq(void)
+ {
+       icu_init_irq();
+ #ifdef CONFIG_PM
+-      icu_irq_chip.irq_set_wake = pxa910_set_wake;
++      pax_open_kernel();
++      const_cast(icu_irq_chip.irq_set_wake) = pxa910_set_wake;
++      pax_close_kernel();
+ #endif
+ }
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index ae2a018..297ad08 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -156,7 +156,7 @@ exit:
+ /*
+  * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+- * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
++ * areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This is
+  * needed for the HW I/O coherency mechanism to work properly without
+  * deadlock.
+  */
+@@ -164,7 +164,7 @@ static void __iomem *
+ armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+                        unsigned int mtype, void *caller)
+ {
+-      mtype = MT_UNCACHED;
++      mtype = MT_UNCACHED_RW;
+       return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+ }
+@@ -174,7 +174,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
+       coherency_cpu_base = of_iomap(np, 0);
+       arch_ioremap_caller = armada_wa_ioremap_caller;
+-      pci_ioremap_set_mem_type(MT_UNCACHED);
++      pci_ioremap_set_mem_type(MT_UNCACHED_RW);
+       /*
+        * We should switch the PL310 to I/O coherency mode only if
+diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
+index f39bd51..866c780 100644
+--- a/arch/arm/mach-mvebu/pmsu.c
++++ b/arch/arm/mach-mvebu/pmsu.c
+@@ -93,7 +93,7 @@
+ #define ARMADA_370_CRYPT0_ENG_ATTR     0x1
+ extern void ll_disable_coherency(void);
+-extern void ll_enable_coherency(void);
++extern int ll_enable_coherency(void);
+ extern void armada_370_xp_cpu_resume(void);
+ extern void armada_38x_cpu_resume(void);
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index b6443a4..20a0b74 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
+ }
+ #endif
+-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
++struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
+       .late_init = n8x0_menelaus_late_init,
+ };
+diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+index ad98246..69437a8 100644
+--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
++++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+@@ -88,7 +88,7 @@ struct cpu_pm_ops {
+       void (*resume)(void);
+       void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
+       void (*hotplug_restart)(void);
+-};
++} __no_const;
+ static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
+ static struct powerdomain *mpuss_pd;
+@@ -106,7 +106,7 @@ static void dummy_cpu_resume(void)
+ static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
+ {}
+-static struct cpu_pm_ops omap_pm_ops = {
++static struct cpu_pm_ops omap_pm_ops __read_only = {
+       .finish_suspend         = default_finish_suspend,
+       .resume                 = dummy_cpu_resume,
+       .scu_prepare            = dummy_scu_prepare,
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index b4de3da..e027393 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -19,6 +19,7 @@
+ #include <linux/device.h>
+ #include <linux/smp.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <asm/smp_scu.h>
+diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
+index e920dd8..ef999171 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -530,7 +530,7 @@ void omap_device_delete(struct omap_device *od)
+ struct platform_device __init *omap_device_build(const char *pdev_name,
+                                                int pdev_id,
+                                                struct omap_hwmod *oh,
+-                                               void *pdata, int pdata_len)
++                                               const void *pdata, int pdata_len)
+ {
+       struct omap_hwmod *ohs[] = { oh };
+@@ -558,7 +558,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
+ struct platform_device __init *omap_device_build_ss(const char *pdev_name,
+                                                   int pdev_id,
+                                                   struct omap_hwmod **ohs,
+-                                                  int oh_cnt, void *pdata,
++                                                  int oh_cnt, const void *pdata,
+                                                   int pdata_len)
+ {
+       int ret = -ENOMEM;
+diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
+index 78c02b3..c94109a 100644
+--- a/arch/arm/mach-omap2/omap_device.h
++++ b/arch/arm/mach-omap2/omap_device.h
+@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
+ /* Core code interface */
+ struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
+-                                        struct omap_hwmod *oh, void *pdata,
++                                        struct omap_hwmod *oh, const void *pdata,
+                                         int pdata_len);
+ struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
+                                        struct omap_hwmod **oh, int oh_cnt,
+-                                       void *pdata, int pdata_len);
++                                       const void *pdata, int pdata_len);
+ struct omap_device *omap_device_alloc(struct platform_device *pdev,
+                                     struct omap_hwmod **ohs, int oh_cnt);
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 1052b29..54669b0 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -206,10 +206,10 @@ struct omap_hwmod_soc_ops {
+       void (*update_context_lost)(struct omap_hwmod *oh);
+       int (*get_context_lost)(struct omap_hwmod *oh);
+       int (*disable_direct_prcm)(struct omap_hwmod *oh);
+-};
++} __no_const;
+ /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
+-static struct omap_hwmod_soc_ops soc_ops;
++static struct omap_hwmod_soc_ops soc_ops __read_only;
+ /* omap_hwmod_list contains all registered struct omap_hwmods */
+ static LIST_HEAD(omap_hwmod_list);
+diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
+index 95fee54..b5dd79d 100644
+--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
++++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <asm/pgtable.h>
+ #include "powerdomain.h"
+@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
+ void __init am43xx_powerdomains_init(void)
+ {
+-      omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
++      pax_open_kernel();
++      const_cast(omap4_pwrdm_operations.pwrdm_has_voltdm) = am43xx_check_vcvp;
++      pax_close_kernel();
+       pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
+       pwrdm_register_pwrdms(powerdomains_am43xx);
+       pwrdm_complete_init();
+diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
+index ff0a68c..b312aa0 100644
+--- a/arch/arm/mach-omap2/wd_timer.c
++++ b/arch/arm/mach-omap2/wd_timer.c
+@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
+       struct omap_hwmod *oh;
+       char *oh_name = "wd_timer2";
+       char *dev_name = "omap_wdt";
+-      struct omap_wd_timer_platform_data pdata;
++      static struct omap_wd_timer_platform_data pdata = {
++              .read_reset_sources = prm_read_reset_sources
++      };
+       if (!cpu_class_is_omap2() || of_have_populated_dt())
+               return 0;
+@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
+               return -EINVAL;
+       }
+-      pdata.read_reset_sources = prm_read_reset_sources;
+-
+       pdev = omap_device_build(dev_name, id, oh, &pdata,
+                                sizeof(struct omap_wd_timer_platform_data));
+       WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
+diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+index 92ec8c3..3b09472 100644
+--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
++++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+@@ -240,7 +240,7 @@ static struct platform_device smdk6410_b_pwr_5v = {
+ };
+ #endif
+-static struct s3c_ide_platdata smdk6410_ide_pdata __initdata = {
++static const struct s3c_ide_platdata smdk6410_ide_pdata __initconst = {
+       .setup_gpio     = s3c64xx_ide_setup_gpio,
+ };
+diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
+index 0c6bb45..0f18d70 100644
+--- a/arch/arm/mach-shmobile/platsmp-apmu.c
++++ b/arch/arm/mach-shmobile/platsmp-apmu.c
+@@ -22,6 +22,7 @@
+ #include <asm/proc-fns.h>
+ #include <asm/smp_plat.h>
+ #include <asm/suspend.h>
++#include <asm/pgtable.h>
+ #include "common.h"
+ #include "platsmp-apmu.h"
+ #include "rcar-gen2.h"
+@@ -316,6 +317,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
+ void __init shmobile_smp_apmu_suspend_init(void)
+ {
+-      shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
++      pax_open_kernel();
++      const_cast(shmobile_suspend_ops.enter) = shmobile_smp_apmu_enter_suspend;
++      pax_close_kernel();
+ }
+ #endif
+diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
+index afcee04..63e52ac 100644
+--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
+@@ -178,7 +178,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
+       bool entered_lp2 = false;
+       if (tegra_pending_sgi())
+-              ACCESS_ONCE(abort_flag) = true;
++              ACCESS_ONCE_RW(abort_flag) = true;
+       cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
+index a69b22d..8523a03 100644
+--- a/arch/arm/mach-tegra/irq.c
++++ b/arch/arm/mach-tegra/irq.c
+@@ -20,6 +20,7 @@
+ #include <linux/cpu_pm.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/irq.h>
+ #include <linux/kernel.h>
+diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
+index 8538910..2f39bc4 100644
+--- a/arch/arm/mach-ux500/pm.c
++++ b/arch/arm/mach-ux500/pm.c
+@@ -10,6 +10,7 @@
+  */
+ #include <linux/kernel.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
+index 7cd9865..a00b6ab 100644
+--- a/arch/arm/mach-zynq/platsmp.c
++++ b/arch/arm/mach-zynq/platsmp.c
+@@ -24,6 +24,7 @@
+ #include <linux/io.h>
+ #include <asm/cacheflush.h>
+ #include <asm/smp_scu.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include "common.h"
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index d15a7fe..6cc4fc9 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -445,6 +445,7 @@ config CPU_32v5
+ config CPU_32v6
+       bool
++      select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+       select TLS_REG_EMUL if !CPU_32v6K && !MMU
+ config CPU_32v6K
+@@ -599,6 +600,7 @@ config CPU_CP15_MPU
+ config CPU_USE_DOMAINS
+       bool
++      depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+       help
+         This option enables or disables the use of domain switching
+         via the set_fs() function.
+@@ -809,7 +811,7 @@ config NEED_KUSER_HELPERS
+ config KUSER_HELPERS
+       bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+-      depends on MMU
++      depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
+       default y
+       help
+         Warning: disabling this option may break user programs.
+@@ -823,7 +825,7 @@ config KUSER_HELPERS
+         See Documentation/arm/kernel_user_helpers.txt for details.
+         However, the fixed address nature of these helpers can be used
+-        by ROP (return orientated programming) authors when creating
++        by ROP (Return Oriented Programming) authors when creating
+         exploits.
+         If all of the binaries and libraries which run on your platform
+@@ -838,7 +840,7 @@ config KUSER_HELPERS
+ config VDSO
+       bool "Enable VDSO for acceleration of some system calls"
+-      depends on AEABI && MMU && CPU_V7
++      depends on AEABI && MMU && CPU_V7 && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+       default y if ARM_ARCH_TIMER
+       select GENERIC_TIME_VSYSCALL
+       help
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 7d5f4c7..c6a0816 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -778,6 +778,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+       u16 tinstr = 0;
+       int isize = 4;
+       int thumb2_32b = 0;
++      bool is_user_mode = user_mode(regs);
+       if (interrupts_enabled(regs))
+               local_irq_enable();
+@@ -786,14 +787,24 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+       if (thumb_mode(regs)) {
+               u16 *ptr = (u16 *)(instrptr & ~1);
+-              fault = probe_kernel_address(ptr, tinstr);
++              if (is_user_mode) {
++                      pax_open_userland();
++                      fault = probe_kernel_address(ptr, tinstr);
++                      pax_close_userland();
++              } else
++                      fault = probe_kernel_address(ptr, tinstr);
+               tinstr = __mem_to_opcode_thumb16(tinstr);
+               if (!fault) {
+                       if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+                           IS_T32(tinstr)) {
+                               /* Thumb-2 32-bit */
+                               u16 tinst2 = 0;
+-                              fault = probe_kernel_address(ptr + 1, tinst2);
++                              if (is_user_mode) {
++                                      pax_open_userland();
++                                      fault = probe_kernel_address(ptr + 1, tinst2);
++                                      pax_close_userland();
++                              } else
++                                      fault = probe_kernel_address(ptr + 1, tinst2);
+                               tinst2 = __mem_to_opcode_thumb16(tinst2);
+                               instr = __opcode_thumb32_compose(tinstr, tinst2);
+                               thumb2_32b = 1;
+@@ -803,7 +814,12 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+                       }
+               }
+       } else {
+-              fault = probe_kernel_address((void *)instrptr, instr);
++              if (is_user_mode) {
++                      pax_open_userland();
++                      fault = probe_kernel_address((void *)instrptr, instr);
++                      pax_close_userland();
++              } else
++                      fault = probe_kernel_address((void *)instrptr, instr);
+               instr = __mem_to_opcode_arm(instr);
+       }
+@@ -812,7 +828,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+               goto bad_or_fault;
+       }
+-      if (user_mode(regs))
++      if (is_user_mode)
+               goto user;
+       ai_sys += 1;
+diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
+index cc12905..88463b3 100644
+--- a/arch/arm/mm/cache-l2x0.c
++++ b/arch/arm/mm/cache-l2x0.c
+@@ -44,7 +44,7 @@ struct l2c_init_data {
+       void (*configure)(void __iomem *);
+       void (*unlock)(void __iomem *, unsigned);
+       struct outer_cache_fns outer_cache;
+-};
++} __do_const;
+ #define CACHE_LINE_SIZE               32
+diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
+index c8c8b9e..c55cc79 100644
+--- a/arch/arm/mm/context.c
++++ b/arch/arm/mm/context.c
+@@ -43,7 +43,7 @@
+ #define NUM_USER_ASIDS                ASID_FIRST_VERSION
+ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
++static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+ static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
+ static DEFINE_PER_CPU(atomic64_t, active_asids);
+@@ -193,7 +193,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ {
+       static u32 cur_idx = 1;
+       u64 asid = atomic64_read(&mm->context.id);
+-      u64 generation = atomic64_read(&asid_generation);
++      u64 generation = atomic64_read_unchecked(&asid_generation);
+       if (asid != 0) {
+               u64 newasid = generation | (asid & ~ASID_MASK);
+@@ -225,7 +225,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+        */
+       asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+       if (asid == NUM_USER_ASIDS) {
+-              generation = atomic64_add_return(ASID_FIRST_VERSION,
++              generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
+                                                &asid_generation);
+               flush_context(cpu);
+               asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+@@ -254,14 +254,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
+       cpu_set_reserved_ttbr0();
+       asid = atomic64_read(&mm->context.id);
+-      if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
++      if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
+           && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
+               goto switch_mm_fastpath;
+       raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+       /* Check that our ASID belongs to the current generation. */
+       asid = atomic64_read(&mm->context.id);
+-      if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
++      if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
+               asid = new_context(mm, cpu);
+               atomic64_set(&mm->context.id, asid);
+       }
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 3a2e678..ebdbf80 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -25,6 +25,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/system_info.h>
+ #include <asm/tlbflush.h>
++#include <asm/sections.h>
+ #include "fault.h"
+@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+       if (fixup_exception(regs))
+               return;
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (addr < TASK_SIZE) {
++              if (current->signal->curr_ip)
++                      printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++              else
++                      printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++      }
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++      if ((fsr & FSR_WRITE) &&
++          (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
++           (MODULES_VADDR <= addr && addr < MODULES_END)))
++      {
++              if (current->signal->curr_ip)
++                      printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++              else
++                      printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++      }
++#endif
++
+       /*
+        * No handler, we'll have to terminate things with extreme prejudice.
+        */
+@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+       }
+ #endif
++#ifdef CONFIG_PAX_PAGEEXEC
++      if ((tsk->mm->pax_flags & MF_PAX_PAGEEXEC) && (fsr & FSR_LNX_PF)) {
++              pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++              do_group_exit(SIGKILL);
++      }
++#endif
++
+       tsk->thread.address = addr;
+       tsk->thread.error_code = fsr;
+       tsk->thread.trap_no = 14;
+@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ #endif                                        /* CONFIG_MMU */
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 20; i++) {
++              unsigned char c;
++              if (get_user(c, (__force unsigned char __user *)pc+i))
++                      printk(KERN_CONT "?? ");
++              else
++                      printk(KERN_CONT "%02x ", c);
++      }
++      printk("\n");
++
++      printk(KERN_ERR "PAX: bytes at SP-4: ");
++      for (i = -1; i < 20; i++) {
++              unsigned long c;
++              if (get_user(c, (__force unsigned long __user *)sp+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08lx ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * First Level Translation Fault Handler
+  *
+@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+       const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+       struct siginfo info;
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (addr < TASK_SIZE && is_domain_fault(fsr)) {
++              if (current->signal->curr_ip)
++                      printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++              else
++                      printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++              goto die;
++      }
++#endif
++
+       if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+               return;
++die:
+       pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+               inf->name, fsr, addr);
+       show_pte(current->mm, addr);
+@@ -574,15 +647,118 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+       ifsr_info[nr].name = name;
+ }
++asmlinkage int sys_sigreturn(struct pt_regs *regs);
++asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
++
+ asmlinkage void __exception
+ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+ {
+       const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+       struct siginfo info;
++      unsigned long pc = instruction_pointer(regs);
++
++      if (user_mode(regs)) {
++              unsigned long sigpage = current->mm->context.sigpage;
++
++              if (sigpage <= pc && pc < sigpage + 7*4) {
++                      if (pc < sigpage + 3*4)
++                              sys_sigreturn(regs);
++                      else
++                              sys_rt_sigreturn(regs);
++                      return;
++              }
++              if (pc == 0xffff0f60UL) {
++                      /*
++                       * PaX: __kuser_cmpxchg64 emulation
++                       */
++                      // TODO
++                      //regs->ARM_pc = regs->ARM_lr;
++                      //return;
++              }
++              if (pc == 0xffff0fa0UL) {
++                      /*
++                       * PaX: __kuser_memory_barrier emulation
++                       */
++                      // dmb(); implied by the exception
++                      regs->ARM_pc = regs->ARM_lr;
++#ifdef CONFIG_ARM_THUMB
++                      if (regs->ARM_lr & 1) {
++                              regs->ARM_cpsr |= PSR_T_BIT;
++                              regs->ARM_pc &= ~0x1U;
++                      } else
++                              regs->ARM_cpsr &= ~PSR_T_BIT;
++#endif
++                      return;
++              }
++              if (pc == 0xffff0fc0UL) {
++                      /*
++                       * PaX: __kuser_cmpxchg emulation
++                       */
++                      // TODO
++                      //long new;
++                      //int op;
++
++                      //op = FUTEX_OP_SET << 28;
++                      //new = futex_atomic_op_inuser(op, regs->ARM_r2);
++                      //regs->ARM_r0 = old != new;
++                      //regs->ARM_pc = regs->ARM_lr;
++                      //return;
++              }
++              if (pc == 0xffff0fe0UL) {
++                      /*
++                       * PaX: __kuser_get_tls emulation
++                       */
++                      regs->ARM_r0 = current_thread_info()->tp_value[0];
++                      regs->ARM_pc = regs->ARM_lr;
++#ifdef CONFIG_ARM_THUMB
++                      if (regs->ARM_lr & 1) {
++                              regs->ARM_cpsr |= PSR_T_BIT;
++                              regs->ARM_pc &= ~0x1U;
++                      } else
++                              regs->ARM_cpsr &= ~PSR_T_BIT;
++#endif
++                      return;
++              }
++      }
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
++              if (current->signal->curr_ip)
++                      printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++                                      pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++              else
++                      printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++                                      pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++              goto die;
++      }
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++      if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
++#ifdef CONFIG_THUMB2_KERNEL
++              unsigned short bkpt;
++
++              if (!probe_kernel_address((const unsigned short *)pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
++#else
++              unsigned int bkpt;
++
++              if (!probe_kernel_address((const unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
++#endif
++                      current->thread.error_code = ifsr;
++                      current->thread.trap_no = 0;
++                      pax_report_refcount_error(regs, NULL);
++                      fixup_exception(regs);
++                      return;
++              }
++      }
++#endif
+       if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+               return;
++die:
+       pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+               inf->name, ifsr, addr);
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index 05ec5e0..0b70277 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -3,6 +3,7 @@
+ /*
+  * Fault status register encodings.  We steal bit 31 for our own purposes.
++ * Set when the FSR value is from an instruction fault.
+  */
+ #define FSR_LNX_PF            (1 << 31)
+ #define FSR_WRITE             (1 << 11)
+@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
+ }
+ #endif
++/* valid for LPAE and !LPAE */
++static inline int is_xn_fault(unsigned int fsr)
++{
++      return ((fsr_fs(fsr) & 0x3c) == 0xc);
++}
++
++static inline int is_domain_fault(unsigned int fsr)
++{
++      return ((fsr_fs(fsr) & 0xD) == 0x9);
++}
++
+ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ unsigned long search_exception_table(unsigned long addr);
+ void early_abt_enable(void);
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 370581a..b985cc1 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -747,7 +747,46 @@ void free_tcmmem(void)
+ {
+ #ifdef CONFIG_HAVE_TCM
+       extern char __tcm_start, __tcm_end;
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++      unsigned long addr;
++      pgd_t *pgd;
++      pud_t *pud;
++      pmd_t *pmd;
++      int cpu_arch = cpu_architecture();
++      unsigned int cr = get_cr();
++
++      if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
++              /* make pages tables, etc before .text NX */
++              for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
++                      pgd = pgd_offset_k(addr);
++                      pud = pud_offset(pgd, addr);
++                      pmd = pmd_offset(pud, addr);
++                      __section_update(pmd, addr, PMD_SECT_XN);
++              }
++              /* make init NX */
++              for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
++                      pgd = pgd_offset_k(addr);
++                      pud = pud_offset(pgd, addr);
++                      pmd = pmd_offset(pud, addr);
++                      __section_update(pmd, addr, PMD_SECT_XN);
++              }
++              /* make kernel code/rodata RX */
++              for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
++                      pgd = pgd_offset_k(addr);
++                      pud = pud_offset(pgd, addr);
++                      pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_ARM_LPAE
++                      __section_update(pmd, addr, PMD_SECT_RDONLY);
++#else
++                      __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
++#endif
++              }
++      }
++#endif
++
++#ifdef CONFIG_HAVE_TCM
+       poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
+       free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
+ #endif
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index ff0eed2..f17f1c9 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -411,9 +411,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
+       unsigned int mtype;
+       if (cached)
+-              mtype = MT_MEMORY_RWX;
++              mtype = MT_MEMORY_RX;
+       else
+-              mtype = MT_MEMORY_RWX_NONCACHED;
++              mtype = MT_MEMORY_RX_NONCACHED;
+       return __arm_ioremap_caller(phys_addr, size, mtype,
+                       __builtin_return_address(0));
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 66353ca..8aad9f8 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct vm_area_struct *vma;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       /*
+@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       if (len > TASK_SIZE)
+               return -ENOMEM;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -99,19 +103,21 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       info.high_limit = TASK_SIZE;
+       info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+-                      const unsigned long len, const unsigned long pgoff,
+-                      const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++                      unsigned long len, unsigned long pgoff,
++                      unsigned long flags)
+ {
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       /*
+@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               if (do_align)
+@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               else
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                              (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -182,14 +192,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       unsigned long random_factor = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE)
+               random_factor = arch_mmap_rnd();
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
+ }
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 30fe03f..738d54e 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -243,7 +243,15 @@ __setup("noalign", noalign_setup);
+ #define PROT_PTE_S2_DEVICE    PROT_PTE_DEVICE
+ #define PROT_SECT_DEVICE      PMD_TYPE_SECT|PMD_SECT_AP_WRITE
+-static struct mem_type mem_types[] = {
++#ifdef CONFIG_PAX_KERNEXEC
++#define L_PTE_KERNEXEC                L_PTE_RDONLY
++#define PMD_SECT_KERNEXEC     PMD_SECT_RDONLY
++#else
++#define L_PTE_KERNEXEC                L_PTE_DIRTY
++#define PMD_SECT_KERNEXEC     PMD_SECT_AP_WRITE
++#endif
++
++static struct mem_type mem_types[] __read_only = {
+       [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
+               .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+                                 L_PTE_SHARED,
+@@ -272,19 +280,19 @@ static struct mem_type mem_types[] = {
+               .prot_sect      = PROT_SECT_DEVICE,
+               .domain         = DOMAIN_IO,
+       },
+-      [MT_UNCACHED] = {
++      [MT_UNCACHED_RW] = {
+               .prot_pte       = PROT_PTE_DEVICE,
+               .prot_l1        = PMD_TYPE_TABLE,
+               .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
+               .domain         = DOMAIN_IO,
+       },
+-      [MT_CACHECLEAN] = {
+-              .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
++      [MT_CACHECLEAN_RO] = {
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
+               .domain    = DOMAIN_KERNEL,
+       },
+ #ifndef CONFIG_ARM_LPAE
+-      [MT_MINICLEAN] = {
+-              .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
++      [MT_MINICLEAN_RO] = {
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
+               .domain    = DOMAIN_KERNEL,
+       },
+ #endif
+@@ -300,7 +308,7 @@ static struct mem_type mem_types[] = {
+               .prot_l1   = PMD_TYPE_TABLE,
+               .domain    = DOMAIN_VECTORS,
+       },
+-      [MT_MEMORY_RWX] = {
++      [__MT_MEMORY_RWX] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+@@ -313,17 +321,30 @@ static struct mem_type mem_types[] = {
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
+-      [MT_ROM] = {
+-              .prot_sect = PMD_TYPE_SECT,
++      [MT_MEMORY_RX] = {
++              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
++              .prot_l1   = PMD_TYPE_TABLE,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++              .domain    = DOMAIN_KERNEL,
++      },
++      [MT_ROM_RX] = {
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
+               .domain    = DOMAIN_KERNEL,
+       },
+-      [MT_MEMORY_RWX_NONCACHED] = {
++      [MT_MEMORY_RW_NONCACHED] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_MT_BUFFERABLE,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
++      [MT_MEMORY_RX_NONCACHED] = {
++              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
++                              L_PTE_MT_BUFFERABLE,
++              .prot_l1   = PMD_TYPE_TABLE,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++              .domain    = DOMAIN_KERNEL,
++      },
+       [MT_MEMORY_RW_DTCM] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_XN,
+@@ -331,9 +352,10 @@ static struct mem_type mem_types[] = {
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+               .domain    = DOMAIN_KERNEL,
+       },
+-      [MT_MEMORY_RWX_ITCM] = {
+-              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
++      [MT_MEMORY_RX_ITCM] = {
++              .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
+               .prot_l1   = PMD_TYPE_TABLE,
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+               .domain    = DOMAIN_KERNEL,
+       },
+       [MT_MEMORY_RW_SO] = {
+@@ -586,9 +608,14 @@ static void __init build_mem_type_table(void)
+                * Mark cache clean areas and XIP ROM read only
+                * from SVC mode and no access from userspace.
+                */
+-              mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+-              mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+-              mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++              mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#ifdef CONFIG_PAX_KERNEXEC
++              mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++              mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++              mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#endif
++              mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++              mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ #endif
+               /*
+@@ -605,13 +632,17 @@ static void __init build_mem_type_table(void)
+                       mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+-                      mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+-                      mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
++                      mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
++                      mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
+-                      mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
+-                      mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
++                      mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
++                      mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
+               }
+       }
+@@ -622,15 +653,20 @@ static void __init build_mem_type_table(void)
+       if (cpu_arch >= CPU_ARCH_ARMv6) {
+               if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+                       /* Non-cacheable Normal is XCB = 001 */
+-                      mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++                      mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++                              PMD_SECT_BUFFERED;
++                      mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+                               PMD_SECT_BUFFERED;
+               } else {
+                       /* For both ARMv6 and non-TEX-remapping ARMv7 */
+-                      mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++                      mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++                              PMD_SECT_TEX(1);
++                      mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+                               PMD_SECT_TEX(1);
+               }
+       } else {
+-              mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++              mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++              mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+       }
+ #ifdef CONFIG_ARM_LPAE
+@@ -651,6 +687,8 @@ static void __init build_mem_type_table(void)
+       user_pgprot |= PTE_EXT_PXN;
+ #endif
++      user_pgprot |= __supported_pte_mask;
++
+       for (i = 0; i < 16; i++) {
+               pteval_t v = pgprot_val(protection_map[i]);
+               protection_map[i] = __pgprot(v | user_pgprot);
+@@ -668,21 +706,24 @@ static void __init build_mem_type_table(void)
+       mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
+       mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
+-      mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+-      mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
++      mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
++      mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
++      mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
++      mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
+-      mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
+-      mem_types[MT_ROM].prot_sect |= cp->pmd;
++      mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
++      mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
++      mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
+       switch (cp->pmd) {
+       case PMD_SECT_WT:
+-              mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
++              mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
+               break;
+       case PMD_SECT_WB:
+       case PMD_SECT_WBWA:
+-              mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
++              mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
+               break;
+       }
+       pr_info("Memory policy: %sData cache %s\n",
+@@ -959,7 +1000,7 @@ static void __init create_mapping(struct map_desc *md)
+               return;
+       }
+-      if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
++      if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
+           md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
+           (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+               pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+@@ -1320,18 +1361,15 @@ void __init arm_mm_memblock_reserve(void)
+  * Any other function or debugging method which may touch any device _will_
+  * crash the kernel.
+  */
++
++static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
++
+ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ {
+       struct map_desc map;
+       unsigned long addr;
+-      void *vectors;
+-      /*
+-       * Allocate the vector page early.
+-       */
+-      vectors = early_alloc(PAGE_SIZE * 2);
+-
+-      early_trap_init(vectors);
++      early_trap_init(&vectors);
+       /*
+        * Clear page table except top pmd used by early fixmaps
+@@ -1347,7 +1385,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+       map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
+       map.virtual = MODULES_VADDR;
+       map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+-      map.type = MT_ROM;
++      map.type = MT_ROM_RX;
+       create_mapping(&map);
+ #endif
+@@ -1358,14 +1396,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+       map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
+       map.virtual = FLUSH_BASE;
+       map.length = SZ_1M;
+-      map.type = MT_CACHECLEAN;
++      map.type = MT_CACHECLEAN_RO;
+       create_mapping(&map);
+ #endif
+ #ifdef FLUSH_BASE_MINICACHE
+       map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
+       map.virtual = FLUSH_BASE_MINICACHE;
+       map.length = SZ_1M;
+-      map.type = MT_MINICLEAN;
++      map.type = MT_MINICLEAN_RO;
+       create_mapping(&map);
+ #endif
+@@ -1374,7 +1412,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+        * location (0xffff0000).  If we aren't using high-vectors, also
+        * create a mapping at the low-vectors virtual address.
+        */
+-      map.pfn = __phys_to_pfn(virt_to_phys(vectors));
++      map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
+       map.virtual = 0xffff0000;
+       map.length = PAGE_SIZE;
+ #ifdef CONFIG_KUSER_HELPERS
+@@ -1437,12 +1475,14 @@ static void __init kmap_init(void)
+ static void __init map_lowmem(void)
+ {
+       struct memblock_region *reg;
++#ifndef CONFIG_PAX_KERNEXEC
+ #ifdef CONFIG_XIP_KERNEL
+       phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
+ #else
+       phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ #endif
+       phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
++#endif
+       /* Map all the lowmem memory banks. */
+       for_each_memblock(memory, reg) {
+@@ -1458,11 +1498,48 @@ static void __init map_lowmem(void)
+               if (start >= end)
+                       break;
++#ifdef CONFIG_PAX_KERNEXEC
++              map.pfn = __phys_to_pfn(start);
++              map.virtual = __phys_to_virt(start);
++              map.length = end - start;
++
++              if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
++                      struct map_desc kernel;
++                      struct map_desc initmap;
++
++                      /* when freeing initmem we will make this RW */
++                      initmap.pfn = __phys_to_pfn(__pa(__init_begin));
++                      initmap.virtual = (unsigned long)__init_begin;
++                      initmap.length = _sdata - __init_begin;
++                      initmap.type = __MT_MEMORY_RWX;
++                      create_mapping(&initmap);
++
++                      /* when freeing initmem we will make this RX */
++                      kernel.pfn = __phys_to_pfn(__pa(_stext));
++                      kernel.virtual = (unsigned long)_stext;
++                      kernel.length = __init_begin - _stext;
++                      kernel.type = __MT_MEMORY_RWX;
++                      create_mapping(&kernel);
++
++                      if (map.virtual < (unsigned long)_stext) {
++                              map.length = (unsigned long)_stext - map.virtual;
++                              map.type = __MT_MEMORY_RWX;
++                              create_mapping(&map);
++                      }
++
++                      map.pfn = __phys_to_pfn(__pa(_sdata));
++                      map.virtual = (unsigned long)_sdata;
++                      map.length = end - __pa(_sdata);
++              }
++
++              map.type = MT_MEMORY_RW;
++              create_mapping(&map);
++#else
+               if (end < kernel_x_start) {
+                       map.pfn = __phys_to_pfn(start);
+                       map.virtual = __phys_to_virt(start);
+                       map.length = end - start;
+-                      map.type = MT_MEMORY_RWX;
++                      map.type = __MT_MEMORY_RWX;
+                       create_mapping(&map);
+               } else if (start >= kernel_x_end) {
+@@ -1486,7 +1563,7 @@ static void __init map_lowmem(void)
+                       map.pfn = __phys_to_pfn(kernel_x_start);
+                       map.virtual = __phys_to_virt(kernel_x_start);
+                       map.length = kernel_x_end - kernel_x_start;
+-                      map.type = MT_MEMORY_RWX;
++                      map.type = __MT_MEMORY_RWX;
+                       create_mapping(&map);
+@@ -1499,6 +1576,7 @@ static void __init map_lowmem(void)
+                               create_mapping(&map);
+                       }
+               }
++#endif
+       }
+ }
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index 93d0b6d..2db6d99 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -20,6 +20,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/hwcap.h>
+ #include <asm/opcodes.h>
++#include <asm/pgtable.h>
+ #include "bpf_jit_32.h"
+@@ -72,54 +73,38 @@ struct jit_ctx {
+ #endif
+ };
++#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
++int bpf_jit_enable __read_only;
++#else
+ int bpf_jit_enable __read_mostly;
++#endif
+-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+-                    unsigned int size)
+-{
+-      void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+-
+-      if (!ptr)
+-              return -EFAULT;
+-      memcpy(ret, ptr, size);
+-      return 0;
+-}
+-
+-static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+ {
+       u8 ret;
+       int err;
+-      if (offset < 0)
+-              err = call_neg_helper(skb, offset, &ret, 1);
+-      else
+-              err = skb_copy_bits(skb, offset, &ret, 1);
++      err = skb_copy_bits(skb, offset, &ret, 1);
+       return (u64)err << 32 | ret;
+ }
+-static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+ {
+       u16 ret;
+       int err;
+-      if (offset < 0)
+-              err = call_neg_helper(skb, offset, &ret, 2);
+-      else
+-              err = skb_copy_bits(skb, offset, &ret, 2);
++      err = skb_copy_bits(skb, offset, &ret, 2);
+       return (u64)err << 32 | ntohs(ret);
+ }
+-static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+ {
+       u32 ret;
+       int err;
+-      if (offset < 0)
+-              err = call_neg_helper(skb, offset, &ret, 4);
+-      else
+-              err = skb_copy_bits(skb, offset, &ret, 4);
++      err = skb_copy_bits(skb, offset, &ret, 4);
+       return (u64)err << 32 | ntohl(ret);
+ }
+@@ -191,8 +176,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+ {
+       u32 *ptr;
+       /* We are guaranteed to have aligned memory. */
++      pax_open_kernel();
+       for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
+               *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
++      pax_close_kernel();
+ }
+ static void build_prologue(struct jit_ctx *ctx)
+@@ -554,6 +541,9 @@ static int build_body(struct jit_ctx *ctx)
+               case BPF_LD | BPF_B | BPF_ABS:
+                       load_order = 0;
+ load:
++                      /* the interpreter will deal with the negative K */
++                      if ((int)k < 0)
++                              return -ENOTSUPP;
+                       emit_mov_i(r_off, k, ctx);
+ load_common:
+                       ctx->seen |= SEEN_DATA | SEEN_CALL;
+@@ -568,18 +558,6 @@ load_common:
+                               condt = ARM_COND_HI;
+                       }
+-                      /*
+-                       * test for negative offset, only if we are
+-                       * currently scheduled to take the fast
+-                       * path. this will update the flags so that
+-                       * the slowpath instruction are ignored if the
+-                       * offset is negative.
+-                       *
+-                       * for loard_order == 0 the HI condition will
+-                       * make loads at offset 0 take the slow path too.
+-                       */
+-                      _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+-
+                       _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+                             ctx);
+diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
+index 8151bde..9be301f 100644
+--- a/arch/arm/plat-iop/setup.c
++++ b/arch/arm/plat-iop/setup.c
+@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
+               .virtual        = IOP3XX_PERIPHERAL_VIRT_BASE,
+               .pfn            = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
+               .length         = IOP3XX_PERIPHERAL_SIZE,
+-              .type           = MT_UNCACHED,
++              .type           = MT_UNCACHED_RW,
+       },
+ };
+diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
+index a5bc92d..0bb4730 100644
+--- a/arch/arm/plat-omap/sram.c
++++ b/arch/arm/plat-omap/sram.c
+@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
+        * Looks like we need to preserve some bootloader code at the
+        * beginning of SRAM for jumping to flash for reboot to work...
+        */
++      pax_open_kernel();
+       memset_io(omap_sram_base + omap_sram_skip, 0,
+                 omap_sram_size - omap_sram_skip);
++      pax_close_kernel();
+ }
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index bc3f00f..88ded6a 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -891,6 +891,7 @@ config RELOCATABLE
+ config RANDOMIZE_BASE
+       bool "Randomize the address of the kernel image"
++      depends on BROKEN_SECURITY
+       select ARM64_MODULE_PLTS if MODULES
+       select RELOCATABLE
+       help
+diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
+index 0cc758c..de67415 100644
+--- a/arch/arm64/Kconfig.debug
++++ b/arch/arm64/Kconfig.debug
+@@ -6,6 +6,7 @@ config ARM64_PTDUMP
+       bool "Export kernel pagetable layout to userspace via debugfs"
+       depends on DEBUG_KERNEL
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+         help
+         Say Y here if you want to show the kernel pagetable layout in a
+         debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
+index aefda98..2937874 100644
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -29,7 +29,7 @@ struct sha1_ce_state {
+       u32                     finalize;
+ };
+-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
++asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+                                 int blocks);
+ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+@@ -39,8 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+       sctx->finalize = 0;
+       kernel_neon_begin_partial(16);
+-      sha1_base_do_update(desc, data, len,
+-                          (sha1_block_fn *)sha1_ce_transform);
++      sha1_base_do_update(desc, data, len, sha1_ce_transform);
+       kernel_neon_end();
+       return 0;
+@@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
+       sctx->finalize = finalize;
+       kernel_neon_begin_partial(16);
+-      sha1_base_do_update(desc, data, len,
+-                          (sha1_block_fn *)sha1_ce_transform);
++      sha1_base_do_update(desc, data, len, sha1_ce_transform);
+       if (!finalize)
+-              sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
++              sha1_base_do_finalize(desc, sha1_ce_transform);
+       kernel_neon_end();
+       return sha1_base_finish(desc, out);
+ }
+@@ -78,7 +76,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
+       sctx->finalize = 0;
+       kernel_neon_begin_partial(16);
+-      sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
++      sha1_base_do_finalize(desc, sha1_ce_transform);
+       kernel_neon_end();
+       return sha1_base_finish(desc, out);
+ }
+diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
+index c0235e0..86eb684 100644
+--- a/arch/arm64/include/asm/atomic.h
++++ b/arch/arm64/include/asm/atomic.h
+@@ -57,11 +57,13 @@
+ #define atomic_set(v, i)              WRITE_ONCE(((v)->counter), (i))
+ #define atomic_add_return_relaxed     atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed   atomic_add_return_relaxed
+ #define atomic_add_return_acquire     atomic_add_return_acquire
+ #define atomic_add_return_release     atomic_add_return_release
+ #define atomic_add_return             atomic_add_return
+ #define atomic_inc_return_relaxed(v)  atomic_add_return_relaxed(1, (v))
++#define atomic_inc_return_unchecked_relaxed(v)        atomic_add_return_relaxed(1, (v))
+ #define atomic_inc_return_acquire(v)  atomic_add_return_acquire(1, (v))
+ #define atomic_inc_return_release(v)  atomic_add_return_release(1, (v))
+ #define atomic_inc_return(v)          atomic_add_return(1, (v))
+@@ -128,6 +130,8 @@
+ #define __atomic_add_unless(v, a, u)  ___atomic_add_unless(v, a, u,)
+ #define atomic_andnot                 atomic_andnot
++#define atomic_inc_return_unchecked_relaxed(v)        atomic_add_return_relaxed(1, (v))
++
+ /*
+  * 64-bit atomic operations.
+  */
+@@ -206,5 +210,16 @@
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1, 0)
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++#define atomic64_xchg_unchecked(v, n)         atomic64_xchg((v), (n))
++
+ #endif
+ #endif
+diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
+index 5082b30..9ef38c2 100644
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -16,10 +16,14 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
++#include <linux/const.h>
++
+ #include <asm/cachetype.h>
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT                7
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ /*
+  * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 5394c84..05e5a95 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -123,16 +123,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+       switch (size) {
+       case 1:
+-              ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
++              ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
+               break;
+       case 2:
+-              ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
++              ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
+               break;
+       case 4:
+-              ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
++              ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
+               break;
+       case 8:
+-              ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
++              ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
+               break;
+       default:
+               BUILD_BUG();
+diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
+index d25f4f1..61d52da 100644
+--- a/arch/arm64/include/asm/pgalloc.h
++++ b/arch/arm64/include/asm/pgalloc.h
+@@ -51,6 +51,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+       __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate(mm, pud, pmd);
++}
+ #else
+ static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+ {
+@@ -80,6 +85,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+ {
+       __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+ }
++
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++      pgd_populate(mm, pgd, pud);
++}
+ #else
+ static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+ {
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index e20bd43..7e476da 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -23,6 +23,9 @@
+ #include <asm/pgtable-hwdef.h>
+ #include <asm/pgtable-prot.h>
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ /*
+  * VMALLOC range.
+  *
+@@ -718,6 +721,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
+ #define kc_offset_to_vaddr(o) ((o) | VA_START)
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* __ASM_PGTABLE_H */
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index ace0a96..c7c4d3c 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -194,4 +194,11 @@ void cpu_enable_pan(void *__unused);
+ void cpu_enable_uao(void *__unused);
+ void cpu_enable_cache_maint_trap(void *__unused);
++#ifdef CONFIG_PAX_RAP
++static inline void pax_reload_rap_cookie(unsigned long *rap_cookie)
++{
++      asm volatile("mov\tx19, %0\n\t" : : "r"(*rap_cookie) : "r19");
++}
++#endif
++
+ #endif /* __ASM_PROCESSOR_H */
+diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
+index 2eb714c..3a10471 100644
+--- a/arch/arm64/include/asm/string.h
++++ b/arch/arm64/include/asm/string.h
+@@ -17,40 +17,40 @@
+ #define __ASM_STRING_H
+ #define __HAVE_ARCH_STRRCHR
+-extern char *strrchr(const char *, int c);
++extern char *strrchr(const char *, int c) __nocapture(-1);
+ #define __HAVE_ARCH_STRCHR
+-extern char *strchr(const char *, int c);
++extern char *strchr(const char *, int c) __nocapture(-1);
+ #define __HAVE_ARCH_STRCMP
+-extern int strcmp(const char *, const char *);
++extern int strcmp(const char *, const char *) __nocapture();
+ #define __HAVE_ARCH_STRNCMP
+-extern int strncmp(const char *, const char *, __kernel_size_t);
++extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2);
+ #define __HAVE_ARCH_STRLEN
+-extern __kernel_size_t strlen(const char *);
++extern __kernel_size_t strlen(const char *) __nocapture(1);
+ #define __HAVE_ARCH_STRNLEN
+-extern __kernel_size_t strnlen(const char *, __kernel_size_t);
++extern __kernel_size_t strnlen(const char *, __kernel_size_t) __nocapture(1);
+ #define __HAVE_ARCH_MEMCPY
+-extern void *memcpy(void *, const void *, __kernel_size_t);
+-extern void *__memcpy(void *, const void *, __kernel_size_t);
++extern void *memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
++extern void *__memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
+ #define __HAVE_ARCH_MEMMOVE
+-extern void *memmove(void *, const void *, __kernel_size_t);
+-extern void *__memmove(void *, const void *, __kernel_size_t);
++extern void *memmove(void *, const void *, __kernel_size_t) __nocapture(2);
++extern void *__memmove(void *, const void *, __kernel_size_t) __nocapture(2);
+ #define __HAVE_ARCH_MEMCHR
+-extern void *memchr(const void *, int, __kernel_size_t);
++extern void *memchr(const void *, int, __kernel_size_t) __nocapture(-1);
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *, int, __kernel_size_t);
+ extern void *__memset(void *, int, __kernel_size_t);
+ #define __HAVE_ARCH_MEMCMP
+-extern int memcmp(const void *, const void *, size_t);
++extern int memcmp(const void *, const void *, size_t) __nocapture(1, 2);
+ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
+index db84983..d256a3edc 100644
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -110,6 +110,7 @@ static inline void set_fs(mm_segment_t fs)
+  */
+ #define untagged_addr(addr)           sign_extend64(addr, 55)
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size)   __range_ok(addr, size)
+ #define user_addr_max                 get_fs
+@@ -279,6 +280,9 @@ static inline unsigned long __must_check __copy_from_user(void *to, const void _
+ static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       kasan_check_read(from, n);
+       check_object_size(from, n, true);
+       return __arch_copy_to_user(to, from, n);
+@@ -286,6 +290,9 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       kasan_check_write(to, n);
+       if (access_ok(VERIFY_READ, from, n)) {
+@@ -298,6 +305,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       kasan_check_read(from, n);
+       if (access_ok(VERIFY_WRITE, to, n)) {
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index 65d81f9..6a46f09 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -166,7 +166,7 @@ EXPORT_SYMBOL(arch_hibernation_header_restore);
+ static int create_safe_exec_page(void *src_start, size_t length,
+                                unsigned long dst_addr,
+                                phys_addr_t *phys_dst_addr,
+-                               void *(*allocator)(gfp_t mask),
++                               unsigned long (*allocator)(gfp_t mask),
+                                gfp_t mask)
+ {
+       int rc = 0;
+@@ -174,7 +174,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+-      unsigned long dst = (unsigned long)allocator(mask);
++      unsigned long dst = allocator(mask);
+       if (!dst) {
+               rc = -ENOMEM;
+@@ -184,9 +184,9 @@ static int create_safe_exec_page(void *src_start, size_t length,
+       memcpy((void *)dst, src_start, length);
+       flush_icache_range(dst, dst + length);
+-      pgd = pgd_offset_raw(allocator(mask), dst_addr);
++      pgd = pgd_offset_raw((pgd_t *)allocator(mask), dst_addr);
+       if (pgd_none(*pgd)) {
+-              pud = allocator(mask);
++              pud = (pud_t *)allocator(mask);
+               if (!pud) {
+                       rc = -ENOMEM;
+                       goto out;
+@@ -196,7 +196,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+       pud = pud_offset(pgd, dst_addr);
+       if (pud_none(*pud)) {
+-              pmd = allocator(mask);
++              pmd = (pmd_t *)allocator(mask);
+               if (!pmd) {
+                       rc = -ENOMEM;
+                       goto out;
+@@ -206,7 +206,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+       pmd = pmd_offset(pud, dst_addr);
+       if (pmd_none(*pmd)) {
+-              pte = allocator(mask);
++              pte = (pte_t *)allocator(mask);
+               if (!pte) {
+                       rc = -ENOMEM;
+                       goto out;
+@@ -449,7 +449,7 @@ int swsusp_arch_resume(void)
+       rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
+                                  (unsigned long)hibernate_exit,
+                                  &phys_hibernate_exit,
+-                                 (void *)get_safe_page, GFP_ATOMIC);
++                                 get_safe_page, GFP_ATOMIC);
+       if (rc) {
+               pr_err("Failed to create safe executable page for hibernate_exit code.");
+               goto out;
+diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
+index 37e47a9..f8597fc 100644
+--- a/arch/arm64/kernel/probes/decode-insn.c
++++ b/arch/arm64/kernel/probes/decode-insn.c
+@@ -157,10 +157,10 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
+               mod = __module_address((unsigned long)addr);
+               if (mod && within_module_init((unsigned long)addr, mod) &&
+                       !within_module_init((unsigned long)scan_end, mod))
+-                      scan_end = (kprobe_opcode_t *)mod->init_layout.base;
++                      scan_end = (kprobe_opcode_t *)mod->init_layout.base_rx;
+               else if (mod && within_module_core((unsigned long)addr, mod) &&
+                       !within_module_core((unsigned long)scan_end, mod))
+-                      scan_end = (kprobe_opcode_t *)mod->core_layout.base;
++                      scan_end = (kprobe_opcode_t *)mod->core_layout.base_rx;
+               preempt_enable();
+       }
+ #endif
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 6cd2612..56d72e5c 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
+ /*
+  * Function pointers to optional machine specific functions
+  */
+-void (*pm_power_off)(void);
++void (* pm_power_off)(void);
+ EXPORT_SYMBOL_GPL(pm_power_off);
+ void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+@@ -109,7 +109,7 @@ void machine_shutdown(void)
+  * activity (executing tasks, handling interrupts). smp_send_stop()
+  * achieves this.
+  */
+-void machine_halt(void)
++void __noreturn machine_halt(void)
+ {
+       local_irq_disable();
+       smp_send_stop();
+@@ -122,12 +122,13 @@ void machine_halt(void)
+  * achieves this. When the system power is turned off, it will take all CPUs
+  * with it.
+  */
+-void machine_power_off(void)
++void __noreturn machine_power_off(void)
+ {
+       local_irq_disable();
+       smp_send_stop();
+       if (pm_power_off)
+               pm_power_off();
++      while(1);
+ }
+ /*
+@@ -139,7 +140,7 @@ void machine_power_off(void)
+  * executing pre-reset code, and using RAM that the primary CPU's code wishes
+  * to use. Implementing such co-ordination would be essentially impossible.
+  */
+-void machine_restart(char *cmd)
++void __noreturn machine_restart(char *cmd)
+ {
+       /* Disable interrupts first */
+       local_irq_disable();
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index d34fd72..8b6faee 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -95,8 +95,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+               struct pt_regs *irq_args;
+               unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+-              if (object_is_on_stack((void *)orig_sp) &&
+-                 object_is_on_stack((void *)frame->fp)) {
++              if (object_starts_on_stack((void *)orig_sp) &&
++                 object_starts_on_stack((void *)frame->fp)) {
+                       frame->sp = orig_sp;
+                       /* orig_sp is the saved pt_regs, find the elr */
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 771a01a7f..db6d9cc 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -511,7 +511,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
+                       __show_regs(regs);
+       }
+-      return sys_ni_syscall();
++      return -ENOSYS;
+ }
+ static const char *esr_class_str[] = {
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index c3a58a1..78fbf54 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ /*
+  * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index 0388ece..87c8df1 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+-#define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN    15
++#define PAX_DELTA_STACK_LEN   15
++#endif
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index 479330b..53717a8 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -2,9 +2,9 @@
+ #define __ASM_AVR32_KMAP_TYPES_H
+ #ifdef CONFIG_DEBUG_HIGHMEM
+-# define KM_TYPE_NR 29
++# define KM_TYPE_NR 30
+ #else
+-# define KM_TYPE_NR 14
++# define KM_TYPE_NR 15
+ #endif
+ #endif /* __ASM_AVR32_KMAP_TYPES_H */
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index a4b7eda..d057f9e 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+ int exception_trace = 1;
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 20; i++) {
++              unsigned char c;
++              if (get_user(c, (unsigned char *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%02x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * This routine handles page faults. It determines the address and the
+  * problem, and then passes it off to one of the appropriate routines.
+@@ -178,6 +195,16 @@ bad_area:
+       up_read(&mm->mmap_sem);
+       if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++                      if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++                              pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++                              do_group_exit(SIGKILL);
++                      }
++              }
++#endif
++
+               if (exception_trace && printk_ratelimit())
+                       printk("%s%s[%d]: segfault at %08lx pc %08lx "
+                              "sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
+index f3337ee..15b6f8d 100644
+--- a/arch/blackfin/Kconfig.debug
++++ b/arch/blackfin/Kconfig.debug
+@@ -18,6 +18,7 @@ config DEBUG_VERBOSE
+ config DEBUG_MMRS
+       tristate "Generate Blackfin MMR tree"
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       help
+         Create a tree of Blackfin MMRs via the debugfs tree.  If
+         you enable this, you will find all MMRs laid out in the
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 568885a..f8008df 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,6 +7,7 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
++#include <linux/const.h>
+ #include <linux/linkage.h>    /* for asmlinkage */
+ /*
+@@ -14,7 +15,7 @@
+  * Blackfin loads 32 bytes for cache
+  */
+ #define L1_CACHE_SHIFT        5
+-#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES        (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES       L1_CACHE_BYTES
+ #define ARCH_DMA_MINALIGN     L1_CACHE_BYTES
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index 7caf25d..ee65ac5 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
+index 1c2a5e2..2579e5f 100644
+--- a/arch/frv/include/asm/atomic.h
++++ b/arch/frv/include/asm/atomic.h
+@@ -146,6 +146,16 @@ static inline void atomic64_dec(atomic64_t *v)
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new)         (__xchg_64(new, &(v)->counter))
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+       int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 2797163..c2a401df9 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
++#include <linux/const.h>
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT                (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __cacheline_aligned   __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index 43901f2..0d8b865 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -2,6 +2,6 @@
+ #ifndef _ASM_KMAP_TYPES_H
+ #define _ASM_KMAP_TYPES_H
+-#define KM_TYPE_NR 17
++#define KM_TYPE_NR 18
+ #endif
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 836f1470..4cf23f5 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(current->mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       goto success;
+       }
+@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       info.high_limit = (current->mm->start_stack - 0x00200000);
+       info.align_mask = 0;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if (!(addr & ~PAGE_MASK))
+               goto success;
+diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
+index 69952c18..4fa2908 100644
+--- a/arch/hexagon/include/asm/cache.h
++++ b/arch/hexagon/include/asm/cache.h
+@@ -21,9 +21,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
++#include <linux/const.h>
++
+ /* Bytes per L1 cache line */
+-#define L1_CACHE_SHIFT                (5)
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT                5
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define ARCH_DMA_MINALIGN     L1_CACHE_BYTES
+diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+index 18ca6a9..77b0e0d 100644
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -519,6 +519,7 @@ config KEXEC
+       bool "kexec system call"
+       depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+       select KEXEC_CORE
++      depends on !GRKERNSEC_KMEM
+       help
+         kexec is a system call that implements the ability to shutdown your
+         current kernel, and to start another kernel.  It is like a reboot
+diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
+index c100d78..07538cc 100644
+--- a/arch/ia64/Makefile
++++ b/arch/ia64/Makefile
+@@ -98,5 +98,6 @@ endef
+ archprepare: make_nr_irqs_h
+ PHONY += make_nr_irqs_h
++make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+ make_nr_irqs_h:
+       $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index f565ad3..484af46 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -307,4 +307,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+ #define atomic64_inc(v)                       atomic64_add(1, (v))
+ #define atomic64_dec(v)                       atomic64_sub(1, (v))
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ASM_IA64_ATOMIC_H */
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index 988254a..e1ee885 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
++#include <linux/const.h>
+ /*
+  * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+ /* Bytes per L1 (data) cache line.  */
+ #define L1_CACHE_SHIFT                CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT      L1_CACHE_SHIFT
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index 5a83c5c..4d7f553 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -42,6 +42,13 @@
+  */
+ #define ELF_ET_DYN_BASE               (TASK_UNMAPPED_BASE + 0x800000000UL)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN    (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN   (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND               0x70000001
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
+index f5e70e9..624fad5 100644
+--- a/arch/ia64/include/asm/pgalloc.h
++++ b/arch/ia64/include/asm/pgalloc.h
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+       pgd_val(*pgd_entry) = __pa(pud);
+ }
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++      pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+       return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+       pud_val(*pud_entry) = __pa(pmd);
+ }
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++      pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+       return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 9f3ed9e..c99b418 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+  *    David Mosberger-Tang <davidm@hpl.hp.com>
+  */
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -139,6 +139,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY     __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC        __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC   __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC     __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC   PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC     PAGE_COPY
++#endif
++
+ #define PAGE_GATE     __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL   __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index ca9e761..40dffaf 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -73,7 +73,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+       unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
+       asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+-      ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++      ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index bfe1319..da0014b 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -70,6 +70,7 @@
+        && ((segment).seg == KERNEL_DS.seg                                             \
+            || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));        \
+ })
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size)   __access_ok((addr), (size), get_fs())
+ /*
+@@ -241,17 +242,23 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++      if (count > INT_MAX)
++              return count;
++
+       check_object_size(from, count, true);
+-      return __copy_user(to, (__force void __user *) from, count);
++      return __copy_user(to, (void __force_user *) from, count);
+ }
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++      if (count > INT_MAX)
++              return count;
++
+       check_object_size(to, count, false);
+-      return __copy_user((__force void __user *) to, from, count);
++      return __copy_user((void __force_user *) to, from, count);
+ }
+ #define __copy_to_user_inatomic               __copy_to_user
+@@ -260,11 +267,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({                                                                                    \
+       void __user *__cu_to = (to);                                                    \
+       const void *__cu_from = (from);                                                 \
+-      long __cu_len = (n);                                                            \
++      unsigned long __cu_len = (n);                                                   \
+                                                                                       \
+-      if (__access_ok(__cu_to, __cu_len, get_fs())) {                                 \
+-              check_object_size(__cu_from, __cu_len, true);                   \
+-              __cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);  \
++      if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) {          \
++              check_object_size(__cu_from, __cu_len, true);                           \
++              __cu_len = __copy_user(__cu_to, (void __force_user *)  __cu_from, __cu_len);    \
+       }                                                                               \
+       __cu_len;                                                                       \
+ })
+@@ -272,10 +279,10 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-      check_object_size(to, n, false);
+-      if (likely(__access_ok(from, n, get_fs())))
+-              n = __copy_user((__force void __user *) to, from, n);
+-      else
++      if (likely(__access_ok(from, n, get_fs()))) {
++              check_object_size(to, n, false);
++              n = __copy_user((void __force_user *) to, from, n);
++      } else if ((long)n > 0)
+               memset(to, 0, n);
+       return n;
+ }
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 6ab0ae7..88f1b60 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+-      return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
++      return within_module_init(addr, mod);
+ }
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+-      return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
++      return within_module_core(addr, mod);
+ }
+ static inline int
+@@ -676,6 +676,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+             case RV_BDREL:
+               val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
++              if (within_module_rx(val, &mod->init_layout))
++                      val -= mod->init_layout.base_rx;
++              else if (within_module_rw(val, &mod->init_layout))
++                      val -= mod->init_layout.base_rw;
++              else if (within_module_rx(val, &mod->core_layout))
++                      val -= mod->core_layout.base_rx;
++              else if (within_module_rw(val, &mod->core_layout))
++                      val -= mod->core_layout.base_rw;
+               break;
+             case RV_LTV:
+@@ -810,15 +818,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+                *     addresses have been selected...
+                */
+               uint64_t gp;
+-              if (mod->core_layout.size > MAX_LTOFF)
++              if (mod->core_layout.size_rx + mod->core_layout.size_rw > MAX_LTOFF)
+                       /*
+                        * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+                        * at the end of the module.
+                        */
+-                      gp = mod->core_layout.size - MAX_LTOFF / 2;
++                      gp = mod->core_layout.size_rx + mod->core_layout.size_rw - MAX_LTOFF / 2;
+               else
+-                      gp = mod->core_layout.size / 2;
+-              gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
++                      gp = (mod->core_layout.size_rx + mod->core_layout.size_rw) / 2;
++              gp = (uint64_t) mod->core_layout.base_rx + ((gp + 7) & -8);
+               mod->arch.gp = gp;
+               DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+       }
+diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
+index c39c3cd..3c77738 100644
+--- a/arch/ia64/kernel/palinfo.c
++++ b/arch/ia64/kernel/palinfo.c
+@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata palinfo_cpu_notifier =
++static struct notifier_block palinfo_cpu_notifier =
+ {
+       .notifier_call = palinfo_cpu_callback,
+       .priority = 0,
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 41e33f8..65180b2a 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+       unsigned long align_mask = 0;
+       struct mm_struct *mm = current->mm;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       if (len > RGN_MAP_LIMIT)
+               return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+       if (REGION_NUMBER(addr) == RGN_HPAGE)
+               addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              addr = mm->free_area_cache;
++      else
++#endif
++
+       if (!addr)
+               addr = TASK_UNMAPPED_BASE;
+@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+       info.high_limit = TASK_SIZE;
+       info.align_mask = align_mask;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index dc506b0..39baade 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -171,7 +171,7 @@ SECTIONS {
+       /* Per-cpu data: */
+       . = ALIGN(PERCPU_PAGE_SIZE);
+       PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+-      __phys_per_cpu_start = __per_cpu_load;
++      __phys_per_cpu_start = per_cpu_load;
+       /*
+        * ensure percpu data fits
+        * into percpu page size
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index fa6ad95..b46bd89 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
+       return pte_present(pte);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 8; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ #     define VM_READ_BIT      0
+ #     define VM_WRITE_BIT     1
+ #     define VM_EXEC_BIT      2
+@@ -151,8 +168,21 @@ retry:
+       if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
+               goto bad_area;
+-      if ((vma->vm_flags & mask) != mask)
++      if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++                      if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++                              goto bad_area;
++
++                      up_read(&mm->mmap_sem);
++                      pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               goto bad_area;
++      }
+       /*
+        * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 85de86d..db7f6b8 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+               unsigned long pgoff, unsigned long flags)
+ {
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+       if (len > RGN_MAP_LIMIT)
+               return -ENOMEM;
+@@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+       info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+       info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index 1841ef6..74d8330 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -119,6 +119,19 @@ ia64_init_addr_space (void)
+               vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+               vma->vm_end = vma->vm_start + PAGE_SIZE;
+               vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++                      vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++                      if (current->mm->pax_flags & MF_PAX_MPROTECT)
++                              vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++              }
++#endif
++
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+               down_write(&current->mm->mmap_sem);
+               if (insert_vm_struct(current->mm, vma)) {
+@@ -279,7 +292,7 @@ static int __init gate_vma_init(void)
+       gate_vma.vm_start = FIXADDR_USER_START;
+       gate_vma.vm_end = FIXADDR_USER_END;
+       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+-      gate_vma.vm_page_prot = __P101;
++      gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+       return 0;
+ }
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee98..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT                4
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif  /* _ASM_M32R_CACHE_H */
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       prefetch(from);
+       if (access_ok(VERIFY_WRITE, to, n))
+               __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       prefetchw(to);
+       if (access_ok(VERIFY_READ, from, n))
+               __copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index 0395c51..5f26031 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define        L1_CACHE_SHIFT  4
+-#define        L1_CACHE_BYTES  (1<< L1_CACHE_SHIFT)
++#define        L1_CACHE_BYTES  (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define ARCH_DMA_MINALIGN     L1_CACHE_BYTES
+diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
+index 4e5aa2f..172c469 100644
+--- a/arch/m68k/kernel/time.c
++++ b/arch/m68k/kernel/time.c
+@@ -107,6 +107,7 @@ static int rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+       switch (cmd) {
+       case RTC_PLL_GET:
++              memset(&pll, 0, sizeof(pll));
+               if (!mach_get_rtc_pll || mach_get_rtc_pll(&pll))
+                       return -EINVAL;
+               return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index db1b7da..8e13684 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & HUGEPT_MASK;
+       info.align_offset = 0;
++      info.threadstack_offset = 0;
+       return vm_unmapped_area(&info);
+ }
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index 4efe96a..60e8699 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
++#include <linux/const.h>
+ #include <asm/registers.h>
+ #define L1_CACHE_SHIFT 5
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES        (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES       L1_CACHE_BYTES
+diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
+index 5c3f688..f8cc1b3 100644
+--- a/arch/mips/Kbuild
++++ b/arch/mips/Kbuild
+@@ -1,7 +1,7 @@
+ # Fail on warnings - also for files referenced in subdirs
+ # -Werror can be disabled for specific files using:
+ # CFLAGS_<file.o> := -Wno-error
+-subdir-ccflags-y := -Werror
++# subdir-ccflags-y := -Werror
+ # platform specific definitions
+ include arch/mips/Kbuild.platforms
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 212ff92..36b3437 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -50,6 +50,7 @@ config MIPS
+       select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_NMI
+       select VIRT_TO_BUS
++      select HAVE_GCC_PLUGINS
+       select MODULES_USE_ELF_REL if MODULES
+       select MODULES_USE_ELF_RELA if MODULES && 64BIT
+       select CLONE_BACKWARDS
+@@ -2561,7 +2562,7 @@ config RELOCATION_TABLE_SIZE
+ config RANDOMIZE_BASE
+       bool "Randomize the address of the kernel image"
+-      depends on RELOCATABLE
++      depends on RELOCATABLE && BROKEN_SECURITY
+       ---help---
+          Randomizes the physical and virtual address at which the
+          kernel image is loaded, as a security feature that
+@@ -2777,6 +2778,7 @@ source "kernel/Kconfig.preempt"
+ config KEXEC
+       bool "Kexec system call"
+       select KEXEC_CORE
++      depends on !GRKERNSEC_KMEM
+       help
+         kexec is a system call that implements the ability to shutdown your
+         current kernel, and to start another kernel.  It is like a reboot
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 0ab176b..c4469a4 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -22,15 +22,39 @@
+ #include <asm/cmpxchg.h>
+ #include <asm/war.h>
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i)          { (i) }
++#ifdef CONFIG_64BIT
++#define _ASM_EXTABLE(from, to)                \
++"     .section __ex_table,\"a\"\n"    \
++"     .dword  " #from ", " #to"\n"    \
++"     .previous\n"
++#else
++#define _ASM_EXTABLE(from, to)                \
++"     .section __ex_table,\"a\"\n"    \
++"     .word   " #from ", " #to"\n"    \
++"     .previous\n"
++#endif
++
+ /*
+  * atomic_read - read atomic variable
+  * @v: pointer of type atomic_t
+  *
+  * Atomically reads the value of @v.
+  */
+-#define atomic_read(v)                READ_ONCE((v)->counter)
++static inline int atomic_read(const atomic_t *v)
++{
++      return READ_ONCE(v->counter);
++}
++
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      return READ_ONCE(v->counter);
++}
+ /*
+  * atomic_set - set atomic variable
+@@ -39,47 +63,77 @@
+  *
+  * Atomically sets the value of @v to @i.
+  */
+-#define atomic_set(v, i)      WRITE_ONCE((v)->counter, (i))
++static inline void atomic_set(atomic_t *v, int i)
++{
++      WRITE_ONCE(v->counter, i);
++}
+-#define ATOMIC_OP(op, c_op, asm_op)                                         \
+-static __inline__ void atomic_##op(int i, atomic_t * v)                             \
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      WRITE_ONCE(v->counter, i);
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST                               \
++      "       b       4f              \n"     \
++      "       .set    noreorder       \n"     \
++      "3:     b       5f              \n"     \
++      "       move    %0, %1          \n"     \
++      "       .set    reorder         \n"
++#define __OVERFLOW_EXTABLE    \
++      "3:\n"                  \
++      _ASM_EXTABLE(2b, 3b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, extable)                            \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v)       \
+ {                                                                           \
+       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+               int temp;                                                     \
+                                                                             \
+               __asm__ __volatile__(                                         \
+-              "       .set    arch=r4000                              \n"   \
+-              "1:     ll      %0, %1          # atomic_" #op "        \n"   \
+-              "       " #asm_op " %0, %2                              \n"   \
++              "       .set    mips3                                   \n"   \
++              "1:     ll      %0, %1          # atomic_" #op #suffix "\n"   \
++              "2:     " #asm_op " %0, %2                              \n"   \
+               "       sc      %0, %1                                  \n"   \
+               "       beqzl   %0, 1b                                  \n"   \
++              extable                                                       \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
+               : "Ir" (i));                                                  \
+       } else if (kernel_uses_llsc) {                                        \
+               int temp;                                                     \
+                                                                             \
+-              do {                                                          \
+-                      __asm__ __volatile__(                                 \
+-                      "       .set    "MIPS_ISA_LEVEL"                \n"   \
+-                      "       ll      %0, %1          # atomic_" #op "\n"   \
+-                      "       " #asm_op " %0, %2                      \n"   \
+-                      "       sc      %0, %1                          \n"   \
+-                      "       .set    mips0                           \n"   \
+-                      : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)  \
+-                      : "Ir" (i));                                          \
+-              } while (unlikely(!temp));                                    \
++              __asm__ __volatile__(                                         \
++              "       .set    "MIPS_ISA_LEVEL"                        \n"   \
++              "1:     ll      %0, %1          # atomic_" #op #suffix "\n"   \
++              "2:     " #asm_op " %0, %2                              \n"   \
++              "       sc      %0, %1                                  \n"   \
++              "       beqz    %0, 1b                                  \n"   \
++                      extable                                               \
++              "       .set    mips0                                   \n"   \
++              : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
++              : "Ir" (i));                                                  \
+       } else {                                                              \
+               unsigned long flags;                                          \
+                                                                             \
+               raw_local_irq_save(flags);                                    \
+-              v->counter c_op i;                                            \
++              __asm__ __volatile__(                                         \
++              "2:     " #asm_op " %0, %1                              \n"   \
++              extable                                                       \
++              : "+r" (v->counter) : "Ir" (i));                              \
+               raw_local_irq_restore(flags);                                 \
+       }                                                                     \
+ }
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                                  \
+-static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)             \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, )              \
++                            __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable)            \
++static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t * v) \
+ {                                                                           \
+       int result;                                                           \
+                                                                             \
+@@ -87,12 +141,15 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)         \
+               int temp;                                                     \
+                                                                             \
+               __asm__ __volatile__(                                         \
+-              "       .set    arch=r4000                              \n"   \
+-              "1:     ll      %1, %2          # atomic_" #op "_return \n"   \
+-              "       " #asm_op " %0, %1, %3                          \n"   \
++              "       .set    mips3                                   \n"   \
++              "1:     ll      %1, %2  # atomic_" #op "_return" #suffix"\n"  \
++              "2:     " #asm_op " %0, %1, %3                          \n"   \
+               "       sc      %0, %2                                  \n"   \
+               "       beqzl   %0, 1b                                  \n"   \
+-              "       " #asm_op " %0, %1, %3                          \n"   \
++              post_op                                                       \
++              extable                                                       \
++              "4:     " #asm_op " %0, %1, %3                          \n"   \
++              "5:                                                     \n"   \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (result), "=&r" (temp),                               \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
+@@ -100,32 +157,40 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)        \
+       } else if (kernel_uses_llsc) {                                        \
+               int temp;                                                     \
+                                                                             \
+-              do {                                                          \
+-                      __asm__ __volatile__(                                 \
+-                      "       .set    "MIPS_ISA_LEVEL"                \n"   \
+-                      "       ll      %1, %2  # atomic_" #op "_return \n"   \
+-                      "       " #asm_op " %0, %1, %3                  \n"   \
+-                      "       sc      %0, %2                          \n"   \
+-                      "       .set    mips0                           \n"   \
+-                      : "=&r" (result), "=&r" (temp),                       \
+-                        "+" GCC_OFF_SMALL_ASM() (v->counter)                \
+-                      : "Ir" (i));                                          \
+-              } while (unlikely(!result));                                  \
+-                                                                            \
+-              result = temp; result c_op i;                                 \
++              __asm__ __volatile__(                                         \
++              "       .set    "MIPS_ISA_LEVEL"                        \n"   \
++              "1:     ll      %1, %2  # atomic_" #op "_return" #suffix "\n" \
++              "2:     " #asm_op " %0, %1, %3                          \n"   \
++              "       sc      %0, %2                                  \n"   \
++              post_op                                                       \
++              extable                                                       \
++              "4:     " #asm_op " %0, %1, %3                          \n"   \
++              "5:                                                     \n"   \
++              "       .set    mips0                                   \n"   \
++              : "=&r" (result), "=&r" (temp),                               \
++                "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
++              : "Ir" (i));                                                  \
+       } else {                                                              \
+               unsigned long flags;                                          \
+                                                                             \
+               raw_local_irq_save(flags);                                    \
+-              result = v->counter;                                          \
+-              result c_op i;                                                \
+-              v->counter = result;                                          \
++              __asm__ __volatile__(                                         \
++              "       lw      %0, %1                                  \n"   \
++              "2:     " #asm_op " %0, %1, %2                          \n"   \
++              "       sw      %0, %1                                  \n"   \
++              "3:                                                     \n"   \
++              extable                                                       \
++              : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter)        \
++              : "Ir" (i));                                                  \
+               raw_local_irq_restore(flags);                                 \
+       }                                                                     \
+                                                                             \
+       return result;                                                        \
+ }
++#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , )        \
++                                   __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC_FETCH_OP(op, c_op, asm_op)                                   \
+ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)              \
+ {                                                                           \
+@@ -173,13 +238,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)         \
+       return result;                                                        \
+ }
+-#define ATOMIC_OPS(op, c_op, asm_op)                                        \
+-      ATOMIC_OP(op, c_op, asm_op)                                           \
+-      ATOMIC_OP_RETURN(op, c_op, asm_op)                                    \
+-      ATOMIC_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC_OPS(op, asm_op)                                              \
++      ATOMIC_OP(op, asm_op)                                         \
++      ATOMIC_OP_RETURN(op, asm_op)                                  \
++      ATOMIC_FETCH_OP(op, asm_op)
+-ATOMIC_OPS(add, +=, addu)
+-ATOMIC_OPS(sub, -=, subu)
++ATOMIC_OPS(add, addu)
++ATOMIC_OPS(sub, subu)
+ #define atomic_add_return_relaxed     atomic_add_return_relaxed
+ #define atomic_sub_return_relaxed     atomic_sub_return_relaxed
+@@ -187,13 +252,13 @@ ATOMIC_OPS(sub, -=, subu)
+ #define atomic_fetch_sub_relaxed      atomic_fetch_sub_relaxed
+ #undef ATOMIC_OPS
+-#define ATOMIC_OPS(op, c_op, asm_op)                                        \
+-      ATOMIC_OP(op, c_op, asm_op)                                           \
+-      ATOMIC_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC_OPS(op, asm_op)                                              \
++      ATOMIC_OP(op, asm_op)                                         \
++      ATOMIC_FETCH_OP(op, asm_op)
+-ATOMIC_OPS(and, &=, and)
+-ATOMIC_OPS(or, |=, or)
+-ATOMIC_OPS(xor, ^=, xor)
++ATOMIC_OPS(and, and)
++ATOMIC_OPS(or, or)
++ATOMIC_OPS(xor, xor)
+ #define atomic_fetch_and_relaxed      atomic_fetch_and_relaxed
+ #define atomic_fetch_or_relaxed               atomic_fetch_or_relaxed
+@@ -202,7 +267,9 @@ ATOMIC_OPS(xor, ^=, xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+ /*
+  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+@@ -212,7 +279,7 @@ ATOMIC_OPS(xor, ^=, xor)
+  * Atomically test @v and subtract @i if @v is greater or equal than @i.
+  * The function returns the old value of @v minus @i.
+  */
+-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
++static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
+ {
+       int result;
+@@ -222,7 +289,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+               int temp;
+               __asm__ __volatile__(
+-              "       .set    arch=r4000                              \n"
++              "       .set    "MIPS_ISA_LEVEL"                        \n"
+               "1:     ll      %1, %2          # atomic_sub_if_positive\n"
+               "       subu    %0, %1, %3                              \n"
+               "       bltz    %0, 1f                                  \n"
+@@ -271,8 +338,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+       return result;
+ }
+-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
++
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
++                                         int new)
++{
++      return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg(atomic_t *v, int new)
++{
++      return xchg(&v->counter, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++      return xchg(&(v->counter), new);
++}
+ /**
+  * __atomic_add_unless - add unless the number is a given value
+@@ -300,6 +385,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic_dec_return(v) atomic_sub_return(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_return_unchecked(1, v);
++}
+ /*
+  * atomic_sub_and_test - subtract value from variable and test result
+@@ -321,6 +410,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+  * other cases.
+  */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_return_unchecked(1, v) == 0;
++}
+ /*
+  * atomic_dec_and_test - decrement by 1 and test
+@@ -345,6 +438,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+  * Atomically increments @v by 1.
+  */
+ #define atomic_inc(v) atomic_add(1, (v))
++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++      atomic_add_unchecked(1, v);
++}
+ /*
+  * atomic_dec - decrement and test
+@@ -353,6 +450,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+  * Atomically decrements @v by 1.
+  */
+ #define atomic_dec(v) atomic_sub(1, (v))
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++      atomic_sub_unchecked(1, v);
++}
+ /*
+  * atomic_add_negative - add and test if negative
+@@ -374,54 +475,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+  * @v: pointer of type atomic64_t
+  *
+  */
+-#define atomic64_read(v)      READ_ONCE((v)->counter)
++static inline long atomic64_read(const atomic64_t *v)
++{
++      return READ_ONCE(v->counter);
++}
++
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      return READ_ONCE(v->counter);
++}
+ /*
+  * atomic64_set - set atomic variable
+  * @v: pointer of type atomic64_t
+  * @i: required value
+  */
+-#define atomic64_set(v, i)    WRITE_ONCE((v)->counter, (i))
++static inline void atomic64_set(atomic64_t *v, long i)
++{
++      WRITE_ONCE(v->counter, i);
++}
+-#define ATOMIC64_OP(op, c_op, asm_op)                                       \
+-static __inline__ void atomic64_##op(long i, atomic64_t * v)                \
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++      WRITE_ONCE(v->counter, i);
++}
++
++#define __ATOMIC64_OP(op, suffix, asm_op, extable)                          \
++static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v)    \
+ {                                                                           \
+       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+               long temp;                                                    \
+                                                                             \
+               __asm__ __volatile__(                                         \
+-              "       .set    arch=r4000                              \n"   \
+-              "1:     lld     %0, %1          # atomic64_" #op "      \n"   \
+-              "       " #asm_op " %0, %2                              \n"   \
++              "       .set    "MIPS_ISA_LEVEL"                        \n"   \
++              "1:     lld     %0, %1          # atomic64_" #op #suffix "\n" \
++              "2:     " #asm_op " %0, %2                              \n"   \
+               "       scd     %0, %1                                  \n"   \
+               "       beqzl   %0, 1b                                  \n"   \
++              extable                                                       \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
+               : "Ir" (i));                                                  \
+       } else if (kernel_uses_llsc) {                                        \
+               long temp;                                                    \
+                                                                             \
+-              do {                                                          \
+-                      __asm__ __volatile__(                                 \
+-                      "       .set    "MIPS_ISA_LEVEL"                \n"   \
+-                      "       lld     %0, %1          # atomic64_" #op "\n" \
+-                      "       " #asm_op " %0, %2                      \n"   \
+-                      "       scd     %0, %1                          \n"   \
+-                      "       .set    mips0                           \n"   \
+-                      : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
+-                      : "Ir" (i));                                          \
+-              } while (unlikely(!temp));                                    \
++              __asm__ __volatile__(                                         \
++              "       .set    "MIPS_ISA_LEVEL"                        \n"   \
++              "1:     lld     %0, %1          # atomic64_" #op #suffix "\n" \
++              "2:     " #asm_op " %0, %2                              \n"   \
++              "       scd     %0, %1                                  \n"   \
++              "       beqz    %0, 1b                                  \n"   \
++                      extable                                               \
++              "       .set    mips0                                   \n"   \
++              : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
++              : "Ir" (i));                                                  \
+       } else {                                                              \
+               unsigned long flags;                                          \
+                                                                             \
+               raw_local_irq_save(flags);                                    \
+-              v->counter c_op i;                                            \
++              __asm__ __volatile__(                                         \
++              "2:     " #asm_op " %0, %1                              \n"   \
++              extable                                                       \
++              : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i));           \
+               raw_local_irq_restore(flags);                                 \
+       }                                                                     \
+ }
+-#define ATOMIC64_OP_RETURN(op, c_op, asm_op)                                \
+-static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, )    \
++                              __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable)          \
++static inline long atomic64_##op##_return##suffix##_relaxed(long i, atomic64##suffix##_t * v)\
+ {                                                                           \
+       long result;                                                          \
+                                                                             \
+@@ -429,12 +553,15 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+               long temp;                                                    \
+                                                                             \
+               __asm__ __volatile__(                                         \
+-              "       .set    arch=r4000                              \n"   \
++              "       .set    mips3                                   \n"   \
+               "1:     lld     %1, %2          # atomic64_" #op "_return\n"  \
+-              "       " #asm_op " %0, %1, %3                          \n"   \
++              "2:     " #asm_op " %0, %1, %3                          \n"   \
+               "       scd     %0, %2                                  \n"   \
+               "       beqzl   %0, 1b                                  \n"   \
+-              "       " #asm_op " %0, %1, %3                          \n"   \
++              post_op                                                       \
++              extable                                                       \
++              "4:     " #asm_op " %0, %1, %3                          \n"   \
++              "5:                                                     \n"   \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (result), "=&r" (temp),                               \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
+@@ -442,33 +569,42 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+       } else if (kernel_uses_llsc) {                                        \
+               long temp;                                                    \
+                                                                             \
+-              do {                                                          \
+-                      __asm__ __volatile__(                                 \
+-                      "       .set    "MIPS_ISA_LEVEL"                \n"   \
+-                      "       lld     %1, %2  # atomic64_" #op "_return\n"  \
+-                      "       " #asm_op " %0, %1, %3                  \n"   \
+-                      "       scd     %0, %2                          \n"   \
+-                      "       .set    mips0                           \n"   \
+-                      : "=&r" (result), "=&r" (temp),                       \
+-                        "=" GCC_OFF_SMALL_ASM() (v->counter)                \
+-                      : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)          \
+-                      : "memory");                                          \
+-              } while (unlikely(!result));                                  \
+-                                                                            \
+-              result = temp; result c_op i;                                 \
++              __asm__ __volatile__(                                         \
++              "       .set    "MIPS_ISA_LEVEL"                        \n"   \
++              "1:     lld     %1, %2  # atomic64_" #op "_return" #suffix "\n"\
++              "2:     " #asm_op " %0, %1, %3                          \n"   \
++              "       scd     %0, %2                                  \n"   \
++              "       beqz    %0, 1b                                  \n"   \
++              post_op                                                       \
++              extable                                                       \
++              "4:     " #asm_op " %0, %1, %3                          \n"   \
++              "5:                                                     \n"   \
++              "       .set    mips0                                   \n"   \
++              : "=&r" (result), "=&r" (temp),                               \
++                "=" GCC_OFF_SMALL_ASM() (v->counter)                        \
++              : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)                  \
++              : "memory");                                                  \
+       } else {                                                              \
+               unsigned long flags;                                          \
+                                                                             \
+               raw_local_irq_save(flags);                                    \
+-              result = v->counter;                                          \
+-              result c_op i;                                                \
+-              v->counter = result;                                          \
++              __asm__ __volatile__(                                         \
++              "       ld      %0, %1                                  \n"   \
++              "2:     " #asm_op " %0, %1, %2                          \n"   \
++              "       sd      %0, %1                                  \n"   \
++              "3:                                                     \n"   \
++              extable                                                       \
++              : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter)        \
++              : "Ir" (i));                                                  \
+               raw_local_irq_restore(flags);                                 \
+       }                                                                     \
+                                                                             \
+       return result;                                                        \
+ }
++#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , )    \
++                                     __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_FETCH_OP(op, c_op, asm_op)                                 \
+ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
+ {                                                                           \
+@@ -517,13 +653,13 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
+       return result;                                                        \
+ }
+-#define ATOMIC64_OPS(op, c_op, asm_op)                                              \
+-      ATOMIC64_OP(op, c_op, asm_op)                                         \
+-      ATOMIC64_OP_RETURN(op, c_op, asm_op)                                  \
+-      ATOMIC64_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC64_OPS(op, asm_op)                                            \
++      ATOMIC64_OP(op, asm_op)                                       \
++      ATOMIC64_OP_RETURN(op, asm_op)                                \
++      ATOMIC64_FETCH_OP(op, asm_op)
+-ATOMIC64_OPS(add, +=, daddu)
+-ATOMIC64_OPS(sub, -=, dsubu)
++ATOMIC64_OPS(add, daddu)
++ATOMIC64_OPS(sub, dsubu)
+ #define atomic64_add_return_relaxed   atomic64_add_return_relaxed
+ #define atomic64_sub_return_relaxed   atomic64_sub_return_relaxed
+@@ -531,13 +667,13 @@ ATOMIC64_OPS(sub, -=, dsubu)
+ #define atomic64_fetch_sub_relaxed    atomic64_fetch_sub_relaxed
+ #undef ATOMIC64_OPS
+-#define ATOMIC64_OPS(op, c_op, asm_op)                                              \
+-      ATOMIC64_OP(op, c_op, asm_op)                                         \
+-      ATOMIC64_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC64_OPS(op, asm_op)                                            \
++      ATOMIC64_OP(op, asm_op)                                       \
++      ATOMIC64_FETCH_OP(op, asm_op)
+-ATOMIC64_OPS(and, &=, and)
+-ATOMIC64_OPS(or, |=, or)
+-ATOMIC64_OPS(xor, ^=, xor)
++ATOMIC64_OPS(and, and)
++ATOMIC64_OPS(or, or)
++ATOMIC64_OPS(xor, xor)
+ #define atomic64_fetch_and_relaxed    atomic64_fetch_and_relaxed
+ #define atomic64_fetch_or_relaxed     atomic64_fetch_or_relaxed
+@@ -546,7 +682,11 @@ ATOMIC64_OPS(xor, ^=, xor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
+ /*
+  * atomic64_sub_if_positive - conditionally subtract integer from atomic
+@@ -557,7 +697,7 @@ ATOMIC64_OPS(xor, ^=, xor)
+  * Atomically test @v and subtract @i if @v is greater or equal than @i.
+  * The function returns the old value of @v minus @i.
+  */
+-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
+ {
+       long result;
+@@ -567,7 +707,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+               long temp;
+               __asm__ __volatile__(
+-              "       .set    arch=r4000                              \n"
++              "       .set    "MIPS_ISA_LEVEL"                        \n"
+               "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
+               "       dsubu   %0, %1, %3                              \n"
+               "       bltz    %0, 1f                                  \n"
+@@ -616,9 +756,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+       return result;
+ }
+-#define atomic64_cmpxchg(v, o, n) \
+-      ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
++
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++                                            long new)
++{
++      return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg(atomic64_t *v, long new)
++{
++      return xchg(&v->counter, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++      return xchg(&(v->counter), new);
++}
+ /**
+  * atomic64_add_unless - add unless the number is a given value
+@@ -648,6 +805,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
+ /*
+  * atomic64_sub_and_test - subtract value from variable and test result
+@@ -669,6 +827,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+  * other cases.
+  */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
++#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
+ /*
+  * atomic64_dec_and_test - decrement by 1 and test
+@@ -693,6 +852,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+  * Atomically increments @v by 1.
+  */
+ #define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
+ /*
+  * atomic64_dec - decrement and test
+@@ -701,6 +861,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+  * Atomically decrements @v by 1.
+  */
+ #define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
+ /*
+  * atomic64_add_negative - add and test if negative
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index b4db69f..8f3b093 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
++#include <linux/const.h>
+ #include <kmalloc.h>
+ #define L1_CACHE_SHIFT                CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_SHIFT               L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES               L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index 2b3dc29..1f7bdc4 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -458,6 +458,13 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
+ #endif
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN   (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+ #define ARCH_DLINFO                                                   \
+ do {                                                                  \
+diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
+index c1f6afa..38cc6e9 100644
+--- a/arch/mips/include/asm/exec.h
++++ b/arch/mips/include/asm/exec.h
+@@ -12,6 +12,6 @@
+ #ifndef _ASM_EXEC_H
+ #define _ASM_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ #endif /* _ASM_EXEC_H */
+diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
+index 9e8ef59..1139d6b 100644
+--- a/arch/mips/include/asm/hw_irq.h
++++ b/arch/mips/include/asm/hw_irq.h
+@@ -10,7 +10,7 @@
+ #include <linux/atomic.h>
+-extern atomic_t irq_err_count;
++extern atomic_unchecked_t irq_err_count;
+ /*
+  * interrupt-retrigger: NOP for now. This may not be appropriate for all
+diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
+index 15e0fec..3ee3eec 100644
+--- a/arch/mips/include/asm/irq.h
++++ b/arch/mips/include/asm/irq.h
+@@ -11,7 +11,6 @@
+ #include <linux/linkage.h>
+ #include <linux/smp.h>
+-#include <linux/irqdomain.h>
+ #include <asm/mipsmtregs.h>
+diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
+index 8feaed6..1bd8a64 100644
+--- a/arch/mips/include/asm/local.h
++++ b/arch/mips/include/asm/local.h
+@@ -13,15 +13,25 @@ typedef struct
+       atomic_long_t a;
+ } local_t;
++typedef struct {
++      atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l)       atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i)     atomic_long_set_unchecked(&(l)->a, (i))
+ #define local_add(i, l) atomic_long_add((i), (&(l)->a))
++#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
+ #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
++#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
+ #define local_inc(l)  atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l)        atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l)  atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l)        atomic_long_dec_unchecked(&(l)->a)
+ /*
+  * Same as above, but return the result value
+@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
+       return result;
+ }
++static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
++{
++      unsigned long result;
++
++      if (kernel_uses_llsc && R10000_LLSC_WAR) {
++              unsigned long temp;
++
++              __asm__ __volatile__(
++              "       .set    mips3                                   \n"
++              "1:"    __LL    "%1, %2         # local_add_return      \n"
++              "       addu    %0, %1, %3                              \n"
++                      __SC    "%0, %2                                 \n"
++              "       beqzl   %0, 1b                                  \n"
++              "       addu    %0, %1, %3                              \n"
++              "       .set    mips0                                   \n"
++              : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++              : "Ir" (i), "m" (l->a.counter)
++              : "memory");
++      } else if (kernel_uses_llsc) {
++              unsigned long temp;
++
++              __asm__ __volatile__(
++              "       .set    mips3                                   \n"
++              "1:"    __LL    "%1, %2         # local_add_return      \n"
++              "       addu    %0, %1, %3                              \n"
++                      __SC    "%0, %2                                 \n"
++              "       beqz    %0, 1b                                  \n"
++              "       addu    %0, %1, %3                              \n"
++              "       .set    mips0                                   \n"
++              : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++              : "Ir" (i), "m" (l->a.counter)
++              : "memory");
++      } else {
++              unsigned long flags;
++
++              local_irq_save(flags);
++              result = l->a.counter;
++              result += i;
++              l->a.counter = result;
++              local_irq_restore(flags);
++      }
++
++      return result;
++}
++
+ static __inline__ long local_sub_return(long i, local_t * l)
+ {
+       unsigned long result;
+@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
+ #define local_cmpxchg(l, o, n) \
+       ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++      ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+ /**
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 5f98759..a3a7cb2 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+   #ifdef CONFIG_CPU_MIPS32
+     typedef struct { unsigned long pte_low, pte_high; } pte_t;
+     #define pte_val(x)          ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+-    #define __pte(x)    ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++    #define __pte(x)    ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+   #else
+      typedef struct { unsigned long long pte; } pte_t;
+      #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index 93c079a..1d6bf7c 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+       set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate(mm, pud, pmd);
++}
+ #endif
+ /*
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 70128d3..471bc25 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -20,6 +20,9 @@
+ #include <asm/io.h>
+ #include <asm/pgtable-bits.h>
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ struct mm_struct;
+ struct vm_area_struct;
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index e309d8f..20eefec 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -101,6 +101,9 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_NOTIFY_RESUME     5       /* callback before returning to user */
+ #define TIF_UPROBE            6       /* breakpointed or singlestepping */
+ #define TIF_RESTORE_SIGMASK   9       /* restore signal mask in do_signal() */
++/* li takes a 32bit immediate */
++#define TIF_GRSEC_SETXID      10      /* update credentials on syscall entry/exit */
++
+ #define TIF_USEDFPU           16      /* FPU was used by this task this quantum (SMP) */
+ #define TIF_MEMDIE            18      /* is terminating due to OOM killer */
+ #define TIF_NOHZ              19      /* in adaptive nohz mode */
+@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_USEDMSA          (1<<TIF_USEDMSA)
+ #define _TIF_MSA_CTX_LIVE     (1<<TIF_MSA_CTX_LIVE)
+ #define _TIF_SYSCALL_TRACEPOINT       (1<<TIF_SYSCALL_TRACEPOINT)
++#define _TIF_GRSEC_SETXID     (1<<TIF_GRSEC_SETXID)
+ #define _TIF_WORK_SYSCALL_ENTRY       (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
+                                _TIF_SYSCALL_AUDIT | \
+-                               _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++                               _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
++                               _TIF_GRSEC_SETXID)
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT        (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
+-                               _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
++                               _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK                \
+@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
+ /* work to do on any return to u-space */
+ #define _TIF_ALLWORK_MASK     (_TIF_NOHZ | _TIF_WORK_MASK |           \
+                                _TIF_WORK_SYSCALL_EXIT |               \
+-                               _TIF_SYSCALL_TRACEPOINT)
++                               _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+ /*
+  * We stash processor id into a COP0 register to retrieve it fast
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index 21a2aab..c00b80d 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -147,6 +147,7 @@ static inline bool eva_kernel_access(void)
+       __ok == 0;                                                      \
+ })
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size)                                   \
+       likely(__access_ok((addr), (size), __access_mask))
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 58ad63d..051b4b7 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -36,6 +36,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE               (TASK32_SIZE / 3 * 2)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN   (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index 49fb881..b9ab7c2 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -40,6 +40,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE               (TASK32_SIZE / 3 * 2)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN   (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
+index 44a1f79..2bd6aa3 100644
+--- a/arch/mips/kernel/irq-gt641xx.c
++++ b/arch/mips/kernel/irq-gt641xx.c
+@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
+               }
+       }
+-      atomic_inc(&irq_err_count);
++      atomic_inc_unchecked(&irq_err_count);
+ }
+ void __init gt641xx_irq_init(void)
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index f25f7ea..19e1c62 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -34,17 +34,17 @@ void ack_bad_irq(unsigned int irq)
+       printk("unexpected IRQ # %d\n", irq);
+ }
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+ int arch_show_interrupts(struct seq_file *p, int prec)
+ {
+-      seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++      seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+       return 0;
+ }
+ asmlinkage void spurious_interrupt(void)
+ {
+-      atomic_inc(&irq_err_count);
++      atomic_inc_unchecked(&irq_err_count);
+ }
+ void __init init_IRQ(void)
+@@ -61,6 +61,8 @@ void __init init_IRQ(void)
+ }
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
++
++extern void gr_handle_kernel_exploit(void);
+ static inline void check_stack_overflow(void)
+ {
+       unsigned long sp;
+@@ -76,6 +78,7 @@ static inline void check_stack_overflow(void)
+               printk("do_IRQ: stack overflow: %ld\n",
+                      sp - sizeof(struct thread_info));
+               dump_stack();
++              gr_handle_kernel_exploit();
+       }
+ }
+ #else
+diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
+index 5b31a94..15ac4a1 100644
+--- a/arch/mips/kernel/pm-cps.c
++++ b/arch/mips/kernel/pm-cps.c
+@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
+       nc_core_ready_count = nc_addr;
+       /* Ensure ready_count is zero-initialised before the assembly runs */
+-      ACCESS_ONCE(*nc_core_ready_count) = 0;
++      ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
+       coupled_barrier(&per_cpu(pm_barrier, core), online);
+       /* Run the generated entry code */
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index d2d0615..46c1803 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -545,18 +545,6 @@ out:
+       return pc;
+ }
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() & ~PAGE_MASK;
+-
+-      return sp & ALMASK;
+-}
+-
+ static void arch_dump_stack(void *info)
+ {
+       struct pt_regs *regs;
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 6103b24..8253315 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -882,6 +882,10 @@ long arch_ptrace(struct task_struct *child, long request,
+       return ret;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+  * Notification of system call entry/exit
+  * - triggered by current->work.syscall_trace
+@@ -899,6 +903,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+       if (secure_computing(NULL) == -1)
+               return -1;
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_enter(regs, regs->regs[2]);
+diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
+index 4472a7f..c5905e6 100644
+--- a/arch/mips/kernel/sync-r4k.c
++++ b/arch/mips/kernel/sync-r4k.c
+@@ -18,8 +18,8 @@
+ #include <asm/mipsregs.h>
+ static unsigned int initcount = 0;
+-static atomic_t count_count_start = ATOMIC_INIT(0);
+-static atomic_t count_count_stop = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
+ #define COUNTON 100
+ #define NR_LOOPS 3
+@@ -46,13 +46,13 @@ void synchronise_count_master(int cpu)
+       for (i = 0; i < NR_LOOPS; i++) {
+               /* slaves loop on '!= 2' */
+-              while (atomic_read(&count_count_start) != 1)
++              while (atomic_read_unchecked(&count_count_start) != 1)
+                       mb();
+-              atomic_set(&count_count_stop, 0);
++              atomic_set_unchecked(&count_count_stop, 0);
+               smp_wmb();
+               /* Let the slave writes its count register */
+-              atomic_inc(&count_count_start);
++              atomic_inc_unchecked(&count_count_start);
+               /* Count will be initialised to current timer */
+               if (i == 1)
+@@ -67,11 +67,11 @@ void synchronise_count_master(int cpu)
+               /*
+                * Wait for slave to leave the synchronization point:
+                */
+-              while (atomic_read(&count_count_stop) != 1)
++              while (atomic_read_unchecked(&count_count_stop) != 1)
+                       mb();
+-              atomic_set(&count_count_start, 0);
++              atomic_set_unchecked(&count_count_start, 0);
+               smp_wmb();
+-              atomic_inc(&count_count_stop);
++              atomic_inc_unchecked(&count_count_stop);
+       }
+       /* Arrange for an interrupt in a short while */
+       write_c0_compare(read_c0_count() + COUNTON);
+@@ -96,8 +96,8 @@ void synchronise_count_slave(int cpu)
+        */
+       for (i = 0; i < NR_LOOPS; i++) {
+-              atomic_inc(&count_count_start);
+-              while (atomic_read(&count_count_start) != 2)
++              atomic_inc_unchecked(&count_count_start);
++              while (atomic_read_unchecked(&count_count_start) != 2)
+                       mb();
+               /*
+@@ -106,8 +106,8 @@ void synchronise_count_slave(int cpu)
+               if (i == NR_LOOPS-1)
+                       write_c0_count(initcount);
+-              atomic_inc(&count_count_stop);
+-              while (atomic_read(&count_count_stop) != 2)
++              atomic_inc_unchecked(&count_count_stop);
++              while (atomic_read_unchecked(&count_count_stop) != 2)
+                       mb();
+       }
+       /* Arrange for an interrupt in a short while */
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 3de85be..73560ec 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -695,7 +695,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+       };
+       prev_state = exception_enter();
+-      die_if_kernel("Integer overflow", regs);
++      if (unlikely(!user_mode(regs))) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++              if (fixup_exception(regs)) {
++                      pax_report_refcount_error(regs, NULL);
++                      exception_exit(prev_state);
++                      return;
++              }
++#endif
++
++              die("Integer overflow", regs);
++      }
+       force_sig_info(SIGFPE, &info, current);
+       exception_exit(prev_state);
+diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
+index 927dc94..27269ee 100644
+--- a/arch/mips/lib/ashldi3.c
++++ b/arch/mips/lib/ashldi3.c
+@@ -2,7 +2,11 @@
+ #include "libgcc.h"
+-long long notrace __ashldi3(long long u, word_type b)
++#ifdef CONFIG_64BIT
++DWtype notrace __ashlti3(DWtype u, word_type b)
++#else
++DWtype notrace __ashldi3(DWtype u, word_type b)
++#endif
+ {
+       DWunion uu, w;
+       word_type bm;
+@@ -11,19 +15,22 @@ long long notrace __ashldi3(long long u, word_type b)
+               return u;
+       uu.ll = u;
+-      bm = 32 - b;
++      bm = BITS_PER_LONG - b;
+       if (bm <= 0) {
+               w.s.low = 0;
+-              w.s.high = (unsigned int) uu.s.low << -bm;
++              w.s.high = (unsigned long) uu.s.low << -bm;
+       } else {
+-              const unsigned int carries = (unsigned int) uu.s.low >> bm;
++              const unsigned long carries = (unsigned long) uu.s.low >> bm;
+-              w.s.low = (unsigned int) uu.s.low << b;
+-              w.s.high = ((unsigned int) uu.s.high << b) | carries;
++              w.s.low = (unsigned long) uu.s.low << b;
++              w.s.high = ((unsigned long) uu.s.high << b) | carries;
+       }
+       return w.ll;
+ }
+-
++#ifdef CONFIG_64BIT
++EXPORT_SYMBOL(__ashlti3);
++#else
+ EXPORT_SYMBOL(__ashldi3);
++#endif
+diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
+index 9fdf1a5..6741f0e 100644
+--- a/arch/mips/lib/ashrdi3.c
++++ b/arch/mips/lib/ashrdi3.c
+@@ -2,7 +2,11 @@
+ #include "libgcc.h"
+-long long notrace __ashrdi3(long long u, word_type b)
++#ifdef CONFIG_64BIT
++DWtype notrace __ashrti3(DWtype u, word_type b)
++#else
++DWtype notrace __ashrdi3(DWtype u, word_type b)
++#endif
+ {
+       DWunion uu, w;
+       word_type bm;
+@@ -11,21 +15,24 @@ long long notrace __ashrdi3(long long u, word_type b)
+               return u;
+       uu.ll = u;
+-      bm = 32 - b;
++      bm = BITS_PER_LONG - b;
+       if (bm <= 0) {
+               /* w.s.high = 1..1 or 0..0 */
+               w.s.high =
+-                  uu.s.high >> 31;
++                  uu.s.high >> (BITS_PER_LONG - 1);
+               w.s.low = uu.s.high >> -bm;
+       } else {
+-              const unsigned int carries = (unsigned int) uu.s.high << bm;
++              const unsigned long carries = (unsigned long) uu.s.high << bm;
+               w.s.high = uu.s.high >> b;
+-              w.s.low = ((unsigned int) uu.s.low >> b) | carries;
++              w.s.low = ((unsigned long) uu.s.low >> b) | carries;
+       }
+       return w.ll;
+ }
+-
++#ifdef CONFIG_64BIT
++EXPORT_SYMBOL(__ashrti3);
++#else
+ EXPORT_SYMBOL(__ashrdi3);
++#endif
+diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
+index 05909d58..b03284b 100644
+--- a/arch/mips/lib/libgcc.h
++++ b/arch/mips/lib/libgcc.h
+@@ -5,13 +5,19 @@
+ typedef int word_type __attribute__ ((mode (__word__)));
++#ifdef CONFIG_64BIT
++typedef int DWtype __attribute__((mode(TI)));
++#else
++typedef long long DWtype;
++#endif
++
+ #ifdef __BIG_ENDIAN
+ struct DWstruct {
+-      int high, low;
++      long high, low;
+ };
+ #elif defined(__LITTLE_ENDIAN)
+ struct DWstruct {
+-      int low, high;
++      long low, high;
+ };
+ #else
+ #error I feel sick.
+@@ -19,7 +25,7 @@ struct DWstruct {
+ typedef union {
+       struct DWstruct s;
+-      long long ll;
++      DWtype ll;
+ } DWunion;
+ #endif /* __ASM_LIBGCC_H */
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 9560ad7..da27540 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -31,6 +31,23 @@
+ int show_unhandled_signals = 1;
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * This routine handles page faults.  It determines the address,
+  * and the problem, and then passes it off to one of the appropriate
+@@ -205,6 +222,14 @@ bad_area:
+ bad_area_nosemaphore:
+       /* User mode accesses just cause a SIGSEGV */
+       if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
++                      pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               tsk->thread.cp0_badvaddr = address;
+               tsk->thread.error_code = write;
+               if (show_unhandled_signals &&
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index 72f7478..06abd2a 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -474,10 +474,10 @@ void __init mem_init(void)
+ #ifdef CONFIG_64BIT
+       if ((unsigned long) &_text > (unsigned long) CKSEG0)
+-              /* The -4 is a hack so that user tools don't have to handle
++              /* The -0x2000-4 is a hack so that user tools don't have to handle
+                  the overflow.  */
+               kclist_add(&kcore_kseg0, (void *) CKSEG0,
+-                              0x80000000 - 4, KCORE_TEXT);
++                              0x80000000 - 0x2000 - 4, KCORE_TEXT);
+ #endif
+ }
+ #endif /* !CONFIG_NEED_MULTIPLE_NODES */
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index 3530376..754dde3 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+       struct vm_area_struct *vma;
+       unsigned long addr = addr0;
+       int do_color_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (unlikely(len > TASK_SIZE))
+@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+               do_color_align = 1;
+       /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_color_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.length = len;
+       info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       if (dir == DOWN) {
+               info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+@@ -160,14 +166,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       unsigned long random_factor = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE)
+               random_factor = arch_mmap_rnd();
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
+ }
+diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
+index cfceaea..65deeb4 100644
+--- a/arch/mips/sgi-ip27/ip27-nmi.c
++++ b/arch/mips/sgi-ip27/ip27-nmi.c
+@@ -187,9 +187,9 @@ void
+ cont_nmi_dump(void)
+ {
+ #ifndef REAL_NMI_SIGNAL
+-      static atomic_t nmied_cpus = ATOMIC_INIT(0);
++      static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
+-      atomic_inc(&nmied_cpus);
++      atomic_inc_unchecked(&nmied_cpus);
+ #endif
+       /*
+        * Only allow 1 cpu to proceed
+@@ -233,7 +233,7 @@ cont_nmi_dump(void)
+               udelay(10000);
+       }
+ #else
+-      while (atomic_read(&nmied_cpus) != num_online_cpus());
++      while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
+ #endif
+       /*
+diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
+index 160b880..3b53fdc 100644
+--- a/arch/mips/sni/rm200.c
++++ b/arch/mips/sni/rm200.c
+@@ -270,7 +270,7 @@ spurious_8259A_irq:
+                              "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
+                       spurious_irq_mask |= irqmask;
+               }
+-              atomic_inc(&irq_err_count);
++              atomic_inc_unchecked(&irq_err_count);
+               /*
+                * Theoretically we do not have to handle this IRQ,
+                * but in Linux this does not cause problems and is
+diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
+index 41e873b..34d33a7 100644
+--- a/arch/mips/vr41xx/common/icu.c
++++ b/arch/mips/vr41xx/common/icu.c
+@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
+       printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
+-      atomic_inc(&irq_err_count);
++      atomic_inc_unchecked(&irq_err_count);
+       return -1;
+ }
+diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
+index ae0e4ee..e8f0692 100644
+--- a/arch/mips/vr41xx/common/irq.c
++++ b/arch/mips/vr41xx/common/irq.c
+@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
+       irq_cascade_t *cascade;
+       if (irq >= NR_IRQS) {
+-              atomic_inc(&irq_err_count);
++              atomic_inc_unchecked(&irq_err_count);
+               return;
+       }
+@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
+               ret = cascade->get_irq(irq);
+               irq = ret;
+               if (ret < 0)
+-                      atomic_inc(&irq_err_count);
++                      atomic_inc_unchecked(&irq_err_count);
+               else
+                       irq_dispatch(irq);
+               if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index 967d144..db12197 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
++#include <linux/const.h>
++
+ /* L1 cache */
+ #define L1_CACHE_NWAYS                4       /* number of ways in caches */
+ #define L1_CACHE_NENTRIES     256     /* number of entries in each way */
+-#define L1_CACHE_BYTES                16      /* bytes per entry */
+ #define L1_CACHE_SHIFT                4       /* shift for bytes per entry */
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)   /* bytes per entry */
+ #define L1_CACHE_WAYDISP      0x1000  /* displacement of one way from the next */
+ #define L1_CACHE_TAG_VALID    0x00000001      /* cache tag valid bit */
+diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+index bcb5df2..84fabd2 100644
+--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+@@ -16,13 +16,15 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
++#include <linux/const.h>
++
+ /*
+  * L1 cache
+  */
+ #define L1_CACHE_NWAYS                4               /* number of ways in caches */
+ #define L1_CACHE_NENTRIES     128             /* number of entries in each way */
+-#define L1_CACHE_BYTES                32              /* bytes per entry */
+ #define L1_CACHE_SHIFT                5               /* shift for bytes per entry */
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)   /* bytes per entry */
+ #define L1_CACHE_WAYDISP      0x1000          /* distance from one way to the next */
+ #define L1_CACHE_TAG_VALID    0x00000001      /* cache tag valid bit */
+diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
+index 4ce7a01..449202a 100644
+--- a/arch/openrisc/include/asm/cache.h
++++ b/arch/openrisc/include/asm/cache.h
+@@ -19,11 +19,13 @@
+ #ifndef __ASM_OPENRISC_CACHE_H
+ #define __ASM_OPENRISC_CACHE_H
++#include <linux/const.h>
++
+ /* FIXME: How can we replace these with values from the CPU...
+  * they shouldn't be hard-coded!
+  */
+-#define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif /* __ASM_OPENRISC_CACHE_H */
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 5394b9c..e77a306 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -327,6 +327,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+       return dec;
+ }
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !CONFIG_64BIT */
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index df0f52b..810699b 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
++#include <linux/const.h>
+ /*
+  * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors
+@@ -14,6 +15,8 @@
+ #define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 78c9fd3..42fa66a 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -342,6 +342,13 @@ struct pt_regs;   /* forward declaration... */
+ #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x10000UL
++
++#define PAX_DELTA_MMAP_LEN    16
++#define PAX_DELTA_STACK_LEN   16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+    but it's not easy, and we've already done it here.  */
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index f08dda3..ea6aa1b 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+                       (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++      pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
+@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ #define pmd_alloc_one(mm, addr)               ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x)                       do { } while (0)
+ #define pgd_populate(mm, pmd, pte)    BUG()
++#define pgd_populate_kernel(mm, pmd, pte)     BUG()
+ #endif
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index c2c43f7..b08ffd9 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -236,6 +236,17 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY       PAGE_EXECREAD
+ #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC     __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC   PAGE_SHARED
++# define PAGE_COPY_NOEXEC     PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL   __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC      __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX       __pgprot(_PAGE_KERNEL_RWX)
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 4828478..89b1fbe 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -221,17 +221,17 @@ static inline unsigned long __must_check copy_from_user(void *to,
+                                           const void __user *from,
+                                           unsigned long n)
+ {
+-        int sz = __compiletime_object_size(to);
++        size_t sz = __compiletime_object_size(to);
+         unsigned long ret = n;
+-        if (likely(sz == -1 || sz >= n))
++        if (likely(sz == (size_t)-1 || sz >= n))
+                 ret = __copy_from_user(to, from, n);
+         else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
+       else
+                 __bad_copy_user();
+-      if (unlikely(ret))
++      if (unlikely(ret && (long)ret > 0))
+               memset(to + (n - ret), 0, ret);
+         return ret;
+ }
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index a0ecdb4a..71d2069 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -100,14 +100,12 @@
+  * or init pieces the location is */
+ static inline int in_init(struct module *me, void *loc)
+ {
+-      return (loc >= me->init_layout.base &&
+-              loc <= (me->init_layout.base + me->init_layout.size));
++      within_module_init((unsigned long)loc, me);
+ }
+ static inline int in_core(struct module *me, void *loc)
+ {
+-      return (loc >= me->core_layout.base &&
+-              loc <= (me->core_layout.base + me->core_layout.size));
++      within_module_core((unsigned long)loc, me);
+ }
+ static inline int in_local(struct module *me, void *loc)
+@@ -367,13 +365,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+       }
+       /* align things a bit */
+-      me->core_layout.size = ALIGN(me->core_layout.size, 16);
+-      me->arch.got_offset = me->core_layout.size;
+-      me->core_layout.size += gots * sizeof(struct got_entry);
++      me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16);
++      me->arch.got_offset = me->core_layout.size_rw;
++      me->core_layout.size_rw += gots * sizeof(struct got_entry);
+-      me->core_layout.size = ALIGN(me->core_layout.size, 16);
+-      me->arch.fdesc_offset = me->core_layout.size;
+-      me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
++      me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16);
++      me->arch.fdesc_offset = me->core_layout.size_rw;
++      me->core_layout.size_rw += fdescs * sizeof(Elf_Fdesc);
+       me->arch.got_max = gots;
+       me->arch.fdesc_max = fdescs;
+@@ -391,7 +389,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+       BUG_ON(value == 0);
+-      got = me->core_layout.base + me->arch.got_offset;
++      got = me->core_layout.base_rw + me->arch.got_offset;
+       for (i = 0; got[i].addr; i++)
+               if (got[i].addr == value)
+                       goto out;
+@@ -409,7 +407,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+-      Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
++      Elf_Fdesc *fdesc = me->core_layout.base_rw + me->arch.fdesc_offset;
+       if (!value) {
+               printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -427,7 +425,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+       /* Create new one */
+       fdesc->addr = value;
+-      fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
++      fdesc->gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset;
+       return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -847,7 +845,7 @@ register_unwind_table(struct module *me,
+       table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+       end = table + sechdrs[me->arch.unwind_section].sh_size;
+-      gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
++      gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset;
+       DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+              me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 0a393a0..5b3199e0 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -92,6 +92,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       unsigned long task_size = TASK_SIZE;
+       int do_color_align, last_mmap;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+       if (len > task_size)
+               return -ENOMEM;
+@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+               goto found_addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_color_align && last_mmap)
+                       addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -127,6 +132,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       info.high_limit = mmap_upper_limit();
+       info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+       info.align_offset = shared_align_offset(last_mmap, pgoff);
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+ found_addr:
+@@ -146,6 +152,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       unsigned long addr = addr0;
+       int do_color_align, last_mmap;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ #ifdef CONFIG_64BIT
+       /* This should only ever run for 32-bit processes.  */
+@@ -170,6 +177,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       }
+       /* requesting a specific address */
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_color_align && last_mmap)
+                       addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -187,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+       info.align_offset = shared_align_offset(last_mmap, pgoff);
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if (!(addr & ~PAGE_MASK))
+               goto found_addr;
+@@ -252,6 +264,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+       mm->mmap_legacy_base = mmap_legacy_base();
+       mm->mmap_base = mmap_upper_limit();
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP) {
++              mm->mmap_legacy_base += mm->delta_mmap;
++              mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++      }
++#endif
++
+       if (mmap_is_legacy()) {
+               mm->mmap_base = mm->mmap_legacy_base;
+               mm->get_unmapped_area = arch_get_unmapped_area;
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 97d6b20..2ab0232 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -719,9 +719,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+                       down_read(&current->mm->mmap_sem);
+                       vma = find_vma(current->mm,regs->iaoq[0]);
+-                      if (vma && (regs->iaoq[0] >= vma->vm_start)
+-                              && (vma->vm_flags & VM_EXEC)) {
+-
++                      if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+                               fault_address = regs->iaoq[0];
+                               fault_space = regs->iasq[0];
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 163af2c..ed77b14 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -16,6 +16,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/uaccess.h>
++#include <linux/unistd.h>
+ #include <asm/traps.h>
+@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+-      if (code == 6 || code == 16)
++      if (code == 6 || code == 7 || code == 16)
+           return VM_EXEC;
+       switch (inst & 0xf0000000) {
+@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+                       }
+ #endif
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when rt_sigreturn trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: unpatched PLT emulation */
++              unsigned int bl, depwi;
++
++              err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++              err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++              if (err)
++                      break;
++
++              if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++                      unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++                      err = get_user(ldw, (unsigned int *)addr);
++                      err |= get_user(bv, (unsigned int *)(addr+4));
++                      err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++                      if (err)
++                              break;
++
++                      if (ldw == 0x0E801096U &&
++                          bv == 0xEAC0C000U &&
++                          ldw2 == 0x0E881095U)
++                      {
++                              unsigned int resolver, map;
++
++                              err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++                              err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++                              if (err)
++                                      break;
++
++                              regs->gr[20] = instruction_pointer(regs)+8;
++                              regs->gr[21] = map;
++                              regs->gr[22] = resolver;
++                              regs->iaoq[0] = resolver | 3UL;
++                              regs->iaoq[1] = regs->iaoq[0] + 4;
++                              return 3;
++                      }
++              }
++      } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++      if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++              return 1;
++#endif
++
++      do { /* PaX: rt_sigreturn emulation */
++              unsigned int ldi1, ldi2, bel, nop;
++
++              err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++              err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++              err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++              err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++              if (err)
++                      break;
++
++              if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++                  ldi2 == 0x3414015AU &&
++                  bel == 0xE4008200U &&
++                  nop == 0x08000240U)
++              {
++                      regs->gr[25] = (ldi1 & 2) >> 1;
++                      regs->gr[20] = __NR_rt_sigreturn;
++                      regs->gr[31] = regs->iaoq[1] + 16;
++                      regs->sr[0] = regs->iasq[1];
++                      regs->iaoq[0] = 0x100UL;
++                      regs->iaoq[1] = regs->iaoq[0] + 4;
++                      regs->iasq[0] = regs->sr[2];
++                      regs->iasq[1] = regs->sr[2];
++                      return 2;
++              }
++      } while (0);
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+       const struct exception_table_entry *fix;
+@@ -230,8 +341,33 @@ retry:
+ good_area:
+-      if ((vma->vm_flags & acc_type) != acc_type)
++      if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++                  (address & ~3UL) == instruction_pointer(regs))
++              {
++                      up_read(&mm->mmap_sem);
++                      switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++                      case 3:
++                              return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++                      case 2:
++                              return;
++#endif
++
++                      }
++                      pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               goto bad_area;
++      }
+       /*
+        * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 792cb17..1a96a22 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -146,6 +146,7 @@ config PPC
+       select ARCH_USE_BUILTIN_BSWAP
+       select OLD_SIGSUSPEND
+       select OLD_SIGACTION if PPC32
++      select HAVE_GCC_PLUGINS
+       select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK
+       select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+@@ -446,6 +447,7 @@ config KEXEC
+       bool "kexec system call"
+       depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
+       select KEXEC_CORE
++      depends on !GRKERNSEC_KMEM
+       help
+         kexec is a system call that implements the ability to shutdown your
+         current kernel, and to start another kernel.  It is like a reboot
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index f08d567..94e5497 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -12,6 +12,11 @@
+ #define ATOMIC_INIT(i)                { (i) }
++#define _ASM_EXTABLE(from, to)                        \
++"     .section        __ex_table,\"a\"\n"     \
++      PPC_LONG"       " #from ", " #to"\n"    \
++"     .previous\n"
++
+ /*
+  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
+  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
+@@ -39,38 +44,79 @@ static __inline__ int atomic_read(const atomic_t *v)
+       return t;
+ }
++static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      int t;
++
++      __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++      return t;
++}
++
+ static __inline__ void atomic_set(atomic_t *v, int i)
+ {
+       __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+-#define ATOMIC_OP(op, asm_op)                                         \
+-static __inline__ void atomic_##op(int a, atomic_t *v)                        \
++static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##o.
++#define __OVERFLOW_PRE                        \
++      "       mcrxr   cr0\n"
++#define __OVERFLOW_POST                       \
++      "       bf 4*cr0+so, 3f\n"      \
++      "2:     .long 0x00c00b00\n"     \
++      "3:\n"
++#define __OVERFLOW_EXTABLE            \
++      "\n4:\n"                        \
++      _ASM_EXTABLE(2b, 4b)
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_PRE
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable)     \
++static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v)  \
+ {                                                                     \
+       int t;                                                          \
+                                                                       \
+       __asm__ __volatile__(                                           \
+-"1:   lwarx   %0,0,%3         # atomic_" #op "\n"                     \
++"1:   lwarx   %0,0,%3         # atomic_" #op #suffix "\n"             \
++      pre_op                                                          \
+       #asm_op " %0,%2,%0\n"                                           \
++      post_op                                                         \
+       PPC405_ERR77(0,%3)                                              \
+ "     stwcx.  %0,0,%3 \n"                                             \
+ "     bne-    1b\n"                                                   \
++      extable                                                         \
+       : "=&r" (t), "+m" (v->counter)                                  \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+ }                                                                     \
+-#define ATOMIC_OP_RETURN_RELAXED(op, asm_op)                          \
+-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)    \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , )         \
++                            __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline int atomic_##op##_return##suffix##_relaxed(int a, atomic##suffix##_t *v)\
+ {                                                                     \
+       int t;                                                          \
+                                                                       \
+       __asm__ __volatile__(                                           \
+-"1:   lwarx   %0,0,%3         # atomic_" #op "_return_relaxed\n"      \
++"1:   lwarx   %0,0,%2         # atomic_" #op "_return" #suffix "_relaxed\n"\
++      pre_op                                                          \
+       #asm_op " %0,%2,%0\n"                                           \
++      post_op                                                         \
+       PPC405_ERR77(0, %3)                                             \
+ "     stwcx.  %0,0,%3\n"                                              \
+ "     bne-    1b\n"                                                   \
++      extable                                                         \
+       : "=&r" (t), "+m" (v->counter)                                  \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+@@ -78,6 +124,9 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)  \
+       return t;                                                       \
+ }
++#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
++                                   __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)                           \
+ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)     \
+ {                                                                     \
+@@ -105,6 +154,7 @@ ATOMIC_OPS(add, add)
+ ATOMIC_OPS(sub, subf)
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+ #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+@@ -126,41 +176,22 @@ ATOMIC_OPS(xor, xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP_RELAXED
+ #undef ATOMIC_OP_RETURN_RELAXED
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+ #define atomic_add_negative(a, v)     (atomic_add_return((a), (v)) < 0)
+-static __inline__ void atomic_inc(atomic_t *v)
+-{
+-      int t;
+-
+-      __asm__ __volatile__(
+-"1:   lwarx   %0,0,%2         # atomic_inc\n\
+-      addic   %0,%0,1\n"
+-      PPC405_ERR77(0,%2)
+-"     stwcx.  %0,0,%2 \n\
+-      bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
+-}
+-
+-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+-{
+-      int t;
+-
+-      __asm__ __volatile__(
+-"1:   lwarx   %0,0,%2         # atomic_inc_return_relaxed\n"
+-"     addic   %0,%0,1\n"
+-      PPC405_ERR77(0, %2)
+-"     stwcx.  %0,0,%2\n"
+-"     bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
+-
+-      return t;
+-}
++/*
++ * atomic_inc - increment atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1, (v))
++#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
++#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_unchecked_relaxed(1, (v))
+ /*
+  * atomic_inc_and_test - increment and test
+@@ -171,37 +202,20 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+  * other cases.
+  */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+-
+-static __inline__ void atomic_dec(atomic_t *v)
+-{
+-      int t;
+-
+-      __asm__ __volatile__(
+-"1:   lwarx   %0,0,%2         # atomic_dec\n\
+-      addic   %0,%0,-1\n"
+-      PPC405_ERR77(0,%2)\
+-"     stwcx.  %0,0,%2\n\
+-      bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
+-}
+-
+-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
++#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
++
++/* 
++ * atomic_dec - decrement atomic variable
++ * @v: pointer of type atomic_t
++ * 
++ * Atomically decrements @v by 1
++ */
++#define atomic_dec(v) atomic_sub(1, (v))
++#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
++
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+ {
+-      int t;
+-
+-      __asm__ __volatile__(
+-"1:   lwarx   %0,0,%2         # atomic_dec_return_relaxed\n"
+-"     addic   %0,%0,-1\n"
+-      PPC405_ERR77(0, %2)
+-"     stwcx.  %0,0,%2\n"
+-"     bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
+-
+-      return t;
++      atomic_sub_unchecked(1, v);
+ }
+ #define atomic_inc_return_relaxed atomic_inc_return_relaxed
+@@ -216,6 +230,16 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+ #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++      return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) 
++{
++      return xchg(&(v->counter), new);
++}
++
+ /**
+  * __atomic_add_unless - add unless the number is a given value
+  * @v: pointer of type atomic_t
+@@ -233,14 +257,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+       PPC_ATOMIC_ENTRY_BARRIER
+ "1:   lwarx   %0,0,%1         # __atomic_add_unless\n\
+       cmpw    0,%0,%3 \n\
+-      beq-    2f \n\
+-      add     %0,%2,%0 \n"
++      beq-    5f \n"
++
++      __OVERFLOW_PRE
++      __REFCOUNT_OP(add) "    %0,%2,%0 \n"
++      __OVERFLOW_POST
++
+       PPC405_ERR77(0,%2)
+ "     stwcx.  %0,0,%1 \n\
+       bne-    1b \n"
++
++      __OVERFLOW_EXTABLE
++
+       PPC_ATOMIC_EXIT_BARRIER
+ "     subf    %0,%2,%0 \n\
+-2:"
++5:"
+       : "=&r" (t)
+       : "r" (&v->counter), "r" (a), "r" (u)
+       : "cc", "memory");
+@@ -323,37 +354,59 @@ static __inline__ long atomic64_read(const atomic64_t *v)
+       return t;
+ }
++static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      long t;
++
++      __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++      return t;
++}
++
+ static __inline__ void atomic64_set(atomic64_t *v, long i)
+ {
+       __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+-#define ATOMIC64_OP(op, asm_op)                                               \
+-static __inline__ void atomic64_##op(long a, atomic64_t *v)           \
++static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++      __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable)   \
++static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
+ {                                                                     \
+       long t;                                                         \
+                                                                       \
+       __asm__ __volatile__(                                           \
+ "1:   ldarx   %0,0,%3         # atomic64_" #op "\n"                   \
++      pre_op                                                          \
+       #asm_op " %0,%2,%0\n"                                           \
++      post_op                                                         \
+ "     stdcx.  %0,0,%3 \n"                                             \
+ "     bne-    1b\n"                                                   \
++      extable                                                         \
+       : "=&r" (t), "+m" (v->counter)                                  \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+ }
+-#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                                \
+-static inline long                                                    \
+-atomic64_##op##_return_relaxed(long a, atomic64_t *v)                 \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , )             \
++                              __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline long atomic64_##op##_return##suffix##_relaxed(long a, atomic64##suffix##_t *v)\
+ {                                                                     \
+       long t;                                                         \
+                                                                       \
+       __asm__ __volatile__(                                           \
+ "1:   ldarx   %0,0,%3         # atomic64_" #op "_return_relaxed\n"    \
++      pre_op                                                          \
+       #asm_op " %0,%2,%0\n"                                           \
++      post_op                                                         \
+ "     stdcx.  %0,0,%3\n"                                              \
+ "     bne-    1b\n"                                                   \
++      extable                                                         \
+       : "=&r" (t), "+m" (v->counter)                                  \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+@@ -361,6 +414,9 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v)                      \
+       return t;                                                       \
+ }
++#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
++                                     __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)                         \
+ static inline long                                                    \
+ atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)                  \
+@@ -409,38 +465,33 @@ ATOMIC64_OPS(xor, xor)
+ #undef ATOPIC64_OPS
+ #undef ATOMIC64_FETCH_OP_RELAXED
+ #undef ATOMIC64_OP_RETURN_RELAXED
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
++#undef __OVERFLOW_PRE
++#undef __REFCOUNT_OP
+ #define atomic64_add_negative(a, v)   (atomic64_add_return((a), (v)) < 0)
+-static __inline__ void atomic64_inc(atomic64_t *v)
+-{
+-      long t;
++/*
++ * atomic64_inc - increment atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+-      __asm__ __volatile__(
+-"1:   ldarx   %0,0,%2         # atomic64_inc\n\
+-      addic   %0,%0,1\n\
+-      stdcx.  %0,0,%2 \n\
+-      bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++      atomic64_add_unchecked(1, v);
+ }
+-static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
++static inline long atomic64_inc_return_unchecked_relaxed(atomic64_unchecked_t *v)
+ {
+-      long t;
+-
+-      __asm__ __volatile__(
+-"1:   ldarx   %0,0,%2         # atomic64_inc_return_relaxed\n"
+-"     addic   %0,%0,1\n"
+-"     stdcx.  %0,0,%2\n"
+-"     bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
+-
+-      return t;
++      return atomic64_add_return_unchecked_relaxed(1, v);
+ }
+ /*
+@@ -453,34 +504,18 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
+  */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+-static __inline__ void atomic64_dec(atomic64_t *v)
++/* 
++ * atomic64_dec - decrement atomic variable
++ * @v: pointer of type atomic64_t
++ * 
++ * Atomically decrements @v by 1
++ */
++#define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
++
++static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+ {
+-      long t;
+-
+-      __asm__ __volatile__(
+-"1:   ldarx   %0,0,%2         # atomic64_dec\n\
+-      addic   %0,%0,-1\n\
+-      stdcx.  %0,0,%2\n\
+-      bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
+-}
+-
+-static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
+-{
+-      long t;
+-
+-      __asm__ __volatile__(
+-"1:   ldarx   %0,0,%2         # atomic64_dec_return_relaxed\n"
+-"     addic   %0,%0,-1\n"
+-"     stdcx.  %0,0,%2\n"
+-"     bne-    1b"
+-      : "=&r" (t), "+m" (v->counter)
+-      : "r" (&v->counter)
+-      : "cc", "xer");
+-
+-      return t;
++      atomic64_sub_unchecked(1, v);
+ }
+ #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+@@ -522,6 +557,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+ #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++      return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++      return xchg(&(v->counter), new);
++}
++
+ /**
+  * atomic64_add_unless - add unless the number is a given value
+  * @v: pointer of type atomic64_t
+@@ -537,15 +582,22 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+       __asm__ __volatile__ (
+       PPC_ATOMIC_ENTRY_BARRIER
+-"1:   ldarx   %0,0,%1         # __atomic_add_unless\n\
++"1:   ldarx   %0,0,%1         # atomic64_add_unless\n\
+       cmpd    0,%0,%3 \n\
+-      beq-    2f \n\
+-      add     %0,%2,%0 \n"
++      beq-    5f \n"
++
++      __OVERFLOW_PRE
++      __REFCOUNT_OP(add) "    %0,%2,%0 \n"
++      __OVERFLOW_POST
++
+ "     stdcx.  %0,0,%1 \n\
+       bne-    1b \n"
+       PPC_ATOMIC_EXIT_BARRIER
++
++      __OVERFLOW_EXTABLE
++
+ "     subf    %0,%2,%0 \n\
+-2:"
++5:"
+       : "=&r" (t)
+       : "r" (&v->counter), "r" (a), "r" (u)
+       : "cc", "memory");
+diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
+index 880db13..bb4ed4a 100644
+--- a/arch/powerpc/include/asm/book3s/32/hash.h
++++ b/arch/powerpc/include/asm/book3s/32/hash.h
+@@ -20,6 +20,7 @@
+ #define _PAGE_HASHPTE 0x002   /* hash_page has made an HPTE for this pte */
+ #define _PAGE_USER    0x004   /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008   /* G: prohibit speculative access */
++#define _PAGE_NX      _PAGE_GUARDED
+ #define _PAGE_COHERENT        0x010   /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE        0x020   /* I: cache inhibit */
+ #define _PAGE_WRITETHRU       0x040   /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
+index 38b33dc..945d1f1 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
+@@ -226,7 +226,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+ {
+       unsigned long set = pte_val(entry) &
+-              (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
++              (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC | _PAGE_NX);
+       unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+       pte_update(ptep, clr, set);
+diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
+index cd5e7aa..7709061 100644
+--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
++++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
+@@ -91,6 +91,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+       pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
+ }
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++      pgd_populate(mm, pgd, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
+@@ -106,6 +111,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+       pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
+ }
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate_kernel(mm, pud, pmd);
++}
++
+ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+                                   unsigned long address)
+ {
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index ffbafbf..71d037fb 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -3,6 +3,8 @@
+ #ifdef __KERNEL__
++#include <asm/reg.h>
++#include <linux/const.h>
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -22,7 +24,7 @@
+ #define L1_CACHE_SHIFT                7
+ #endif
+-#define       L1_CACHE_BYTES          (1 << L1_CACHE_SHIFT)
++#define       L1_CACHE_BYTES          (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define       SMP_CACHE_BYTES         L1_CACHE_BYTES
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index ee46ffe..b36c98c 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -30,6 +30,18 @@
+ #define ELF_ET_DYN_BASE       0x20000000
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN    (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN   (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN    15
++#define PAX_DELTA_STACK_LEN   15
++#endif
++#endif
++
+ #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+ /*
+diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
+index 8196e9c..d83a9f3 100644
+--- a/arch/powerpc/include/asm/exec.h
++++ b/arch/powerpc/include/asm/exec.h
+@@ -4,6 +4,6 @@
+ #ifndef _ASM_POWERPC_EXEC_H
+ #define _ASM_POWERPC_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ #endif /* _ASM_POWERPC_EXEC_H */
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index 5acabbd..7ea14fa 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -10,7 +10,7 @@
+  * 2 of the License, or (at your option) any later version.
+  */
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+ #endif        /* __KERNEL__ */
+ #endif        /* _ASM_POWERPC_KMAP_TYPES_H */
+diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
+index b8da913..c02b593 100644
+--- a/arch/powerpc/include/asm/local.h
++++ b/arch/powerpc/include/asm/local.h
+@@ -9,21 +9,65 @@ typedef struct
+       atomic_long_t a;
+ } local_t;
++typedef struct
++{
++      atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l)       atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l,i)        atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l,i)      atomic_long_set_unchecked(&(l)->a, (i))
+ #define local_add(i,l)        atomic_long_add((i),(&(l)->a))
++#define local_add_unchecked(i,l)      atomic_long_add_unchecked((i),(&(l)->a))
+ #define local_sub(i,l)        atomic_long_sub((i),(&(l)->a))
++#define local_sub_unchecked(i,l)      atomic_long_sub_unchecked((i),(&(l)->a))
+ #define local_inc(l)  atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l)        atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l)  atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l)        atomic_long_dec_unchecked(&(l)->a)
+ static __inline__ long local_add_return(long a, local_t *l)
+ {
+       long t;
+       __asm__ __volatile__(
++"1:"  PPC_LLARX(%0,0,%2,0) "                  # local_add_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     mcrxr   cr0\n"
++"     addo.   %0,%1,%0\n"
++"     bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++"     add     %0,%1,%0\n"
++#endif
++
++"3:\n"
++      PPC405_ERR77(0,%2)
++      PPC_STLCX       "%0,0,%2 \n\
++      bne-    1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
++      : "=&r" (t)
++      : "r" (a), "r" (&(l->a.counter))
++      : "cc", "memory");
++
++      return t;
++}
++
++static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
++{
++      long t;
++
++      __asm__ __volatile__(
+ "1:"  PPC_LLARX(%0,0,%2,0) "                  # local_add_return\n\
+       add     %0,%1,%0\n"
+       PPC405_ERR77(0,%2)
+@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
+ #define local_cmpxchg(l, o, n) \
+       (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++      (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+ /**
+diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
+index 30922f6..0bb237c 100644
+--- a/arch/powerpc/include/asm/mman.h
++++ b/arch/powerpc/include/asm/mman.h
+@@ -26,7 +26,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+ }
+ #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+       return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
+index 897d2e1..399f34f 100644
+--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
++++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
+@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ #ifndef CONFIG_PPC_64K_PAGES
+ #define pgd_populate(MM, PGD, PUD)    pgd_set(PGD, (unsigned long)PUD)
++#define pgd_populate_kernel(MM, PGD, PUD)     pgd_populate((MM), (PGD), (PUD))
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+       pud_set(pud, (unsigned long)pmd);
+ }
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      pud_populate(mm, pud, pmd);
++}
++
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+                                      pte_t *pte)
+ {
+@@ -139,6 +145,7 @@ extern void __tlb_remove_table(void *_table);
+ #endif
+ #define pud_populate(mm, pud, pmd)    pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd)     pud_populate((mm), (pud), (pmd))
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+                                      pte_t *pte)
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 56398e7..287a772 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
+  * and needs to be executable.  This means the whole heap ends
+  * up being executable.
+  */
+-#define VM_DATA_DEFAULT_FLAGS32       (VM_READ | VM_WRITE | VM_EXEC | \
+-                               VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++      (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++       VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+ #define VM_DATA_DEFAULT_FLAGS64       (VM_READ | VM_WRITE | \
+                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
+ #define is_kernel_addr(x)     ((x) >= PAGE_OFFSET)
+ #endif
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ #ifndef CONFIG_PPC_BOOK3S_64
+ /*
+  * Use the top bit of the higher-level page table entries to indicate whether
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index dd5f071..0470718 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -169,15 +169,18 @@ do {                                             \
+  * stack by default, so in the absence of a PT_GNU_STACK program header
+  * we turn execute permission off.
+  */
+-#define VM_STACK_DEFAULT_FLAGS32      (VM_READ | VM_WRITE | VM_EXEC | \
+-                                       VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++      (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++       VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+ #define VM_STACK_DEFAULT_FLAGS64      (VM_READ | VM_WRITE | \
+                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+       (is_32bit_task() ? \
+        VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+ #include <asm-generic/getorder.h>
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 9bd87f2..f600e6d 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_POWERPC_PGTABLE_H
+ #define _ASM_POWERPC_PGTABLE_H
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <linux/mmdebug.h>
+ #include <linux/mmzone.h>
+diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
+index 4ba26dd..2d1137d 100644
+--- a/arch/powerpc/include/asm/pte-common.h
++++ b/arch/powerpc/include/asm/pte-common.h
+@@ -16,6 +16,9 @@
+ #ifndef _PAGE_EXEC
+ #define _PAGE_EXEC    0
+ #endif
++#ifndef _PAGE_NX
++#define _PAGE_NX      0
++#endif
+ #ifndef _PAGE_ENDIAN
+ #define _PAGE_ENDIAN  0
+ #endif
+@@ -53,13 +56,13 @@
+ #define PMD_PAGE_SIZE(pmd)    bad_call_to_PMD_PAGE_SIZE()
+ #endif
+ #ifndef _PAGE_KERNEL_RO
+-#define _PAGE_KERNEL_RO               (_PAGE_RO)
++#define _PAGE_KERNEL_RO               (_PAGE_RO | _PAGE_NX)
+ #endif
+ #ifndef _PAGE_KERNEL_ROX
+ #define _PAGE_KERNEL_ROX      (_PAGE_EXEC | _PAGE_RO)
+ #endif
+ #ifndef _PAGE_KERNEL_RW
+-#define _PAGE_KERNEL_RW               (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
++#define _PAGE_KERNEL_RW               (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_NX)
+ #endif
+ #ifndef _PAGE_KERNEL_RWX
+ #define _PAGE_KERNEL_RWX      (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+@@ -142,15 +145,12 @@ static inline bool pte_user(pte_t pte)
+  * Note due to the way vm flags are laid out, the bits are XWR
+  */
+ #define PAGE_NONE     __pgprot(_PAGE_BASE)
+-#define PAGE_SHARED   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
+-                               _PAGE_EXEC)
+-#define PAGE_COPY     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+-#define PAGE_COPY_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+-                               _PAGE_EXEC)
+-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+-#define PAGE_READONLY_X       __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+-                               _PAGE_EXEC)
++#define PAGE_SHARED   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_NX)
++#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
++#define PAGE_COPY     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX)
++#define PAGE_COPY_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC)
++#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX)
++#define PAGE_READONLY_X       __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC)
+ #define __P000        PAGE_NONE
+ #define __P001        PAGE_READONLY
+@@ -171,11 +171,9 @@ static inline bool pte_user(pte_t pte)
+ #define __S111        PAGE_SHARED_X
+ /* Permission masks used for kernel mappings */
+-#define PAGE_KERNEL   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+-#define PAGE_KERNEL_NC        __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+-                               _PAGE_NO_CACHE)
+-#define PAGE_KERNEL_NCG       __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+-                               _PAGE_NO_CACHE | _PAGE_GUARDED)
++#define PAGE_KERNEL   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_NX)
++#define PAGE_KERNEL_NC        __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
++#define PAGE_KERNEL_NCG       __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
+ #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+ #define PAGE_KERNEL_RO        __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+ #define PAGE_KERNEL_ROX       __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 978dada..5d29335 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -270,6 +270,7 @@
+ #define SPRN_DBCR     0x136   /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR    0x012   /* Data Storage Interrupt Status Register */
+ #define   DSISR_NOHPTE                0x40000000      /* no translation found */
++#define   DSISR_GUARDED               0x10000000      /* fetch from guarded storage */
+ #define   DSISR_PROTFAULT     0x08000000      /* protection fault */
+ #define   DSISR_ISSTORE               0x02000000      /* access was a store */
+ #define   DSISR_DABRMATCH     0x00400000      /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index 0d02c11..33a8f08 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -51,7 +51,7 @@ struct smp_ops_t {
+       int   (*cpu_disable)(void);
+       void  (*cpu_die)(unsigned int nr);
+       int   (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index fa37fe9..867d3cf 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -27,6 +27,7 @@
+ #include <asm/asm-compat.h>
+ #include <asm/synch.h>
+ #include <asm/ppc-opcode.h>
++#include <asm/atomic.h>
+ #ifdef CONFIG_PPC64
+ /* use 0x800000yy when locked, where yy == CPU number */
+@@ -228,13 +229,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
+       __asm__ __volatile__(
+ "1:   " PPC_LWARX(%0,0,%1,1) "\n"
+       __DO_SIGN_EXTEND
+-"     addic.          %0,%0,1\n\
+-      ble-            2f\n"
++
++#ifdef        CONFIG_PAX_REFCOUNT
++"     mcrxr   cr0\n"
++"     addico.         %0,%0,1\n"
++"     bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++"     addic.          %0,%0,1\n"
++#endif
++
++"3:\n"
++      "ble-           4f\n"
+       PPC405_ERR77(0,%1)
+ "     stwcx.          %0,0,%1\n\
+       bne-            1b\n"
+       PPC_ACQUIRE_BARRIER
+-"2:"  : "=&r" (tmp)
++"4:"  
++
++#ifdef CONFIG_PAX_REFCOUNT
++      _ASM_EXTABLE(2b,4b)
++#endif
++
++      : "=&r" (tmp)
+       : "r" (&rw->lock)
+       : "cr0", "xer", "memory");
+@@ -310,11 +327,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
+       __asm__ __volatile__(
+       "# read_unlock\n\t"
+       PPC_RELEASE_BARRIER
+-"1:   lwarx           %0,0,%1\n\
+-      addic           %0,%0,-1\n"
++"1:   lwarx           %0,0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     mcrxr   cr0\n"
++"     addico.         %0,%0,-1\n"
++"     bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++"     addic.          %0,%0,-1\n"
++#endif
++
++"3:\n"
+       PPC405_ERR77(0,%1)
+ "     stwcx.          %0,0,%1\n\
+       bne-            1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++      _ASM_EXTABLE(2b, 4b)
++#endif
++
+       : "=&r"(tmp)
+       : "r"(&rw->lock)
+       : "cr0", "xer", "memory");
+diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
+index da3cdff..c774844 100644
+--- a/arch/powerpc/include/asm/string.h
++++ b/arch/powerpc/include/asm/string.h
+@@ -11,17 +11,17 @@
+ #define __HAVE_ARCH_MEMCMP
+ #define __HAVE_ARCH_MEMCHR
+-extern char * strcpy(char *,const char *);
+-extern char * strncpy(char *,const char *, __kernel_size_t);
+-extern __kernel_size_t strlen(const char *);
+-extern int strcmp(const char *,const char *);
+-extern int strncmp(const char *, const char *, __kernel_size_t);
+-extern char * strcat(char *, const char *);
++extern char * strcpy(char *,const char *) __nocapture(2);
++extern char * strncpy(char *,const char *, __kernel_size_t) __nocapture(2);
++extern __kernel_size_t strlen(const char *) __nocapture(1);
++extern int strcmp(const char *,const char *) __nocapture();
++extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2);
++extern char * strcat(char *, const char *) __nocapture(2);
+ extern void * memset(void *,int,__kernel_size_t);
+-extern void * memcpy(void *,const void *,__kernel_size_t);
+-extern void * memmove(void *,const void *,__kernel_size_t);
+-extern int memcmp(const void *,const void *,__kernel_size_t);
+-extern void * memchr(const void *,int,__kernel_size_t);
++extern void * memcpy(void *,const void *,__kernel_size_t) __nocapture(2);
++extern void * memmove(void *,const void *,__kernel_size_t) __nocapture(2);
++extern int memcmp(const void *,const void *,__kernel_size_t) __nocapture(1, 2);
++extern void * memchr(const void *,int,__kernel_size_t) __nocapture(1);
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 87e4b2d..c362390 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI           18      /* function descriptors must die! */
+ #endif
++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
++#define TIF_GRSEC_SETXID      6       /* update credentials on syscall entry/exit */
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE    (1<<TIF_SYSCALL_TRACE)
+@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT       (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE      (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ             (1<<TIF_NOHZ)
++#define _TIF_GRSEC_SETXID     (1<<TIF_GRSEC_SETXID)
+ #define _TIF_SYSCALL_DOTRACE  (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+-                               _TIF_NOHZ)
++                               _TIF_NOHZ | _TIF_GRSEC_SETXID)
+ #define _TIF_USER_WORK_MASK   (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+                                _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index c266227..f3dc6bb 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -58,6 +58,7 @@
+ #endif
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size)           \
+       (__chk_user_ptr(addr),                  \
+        __access_ok((__force unsigned long)(addr), (size), get_fs()))
+@@ -303,43 +304,6 @@ do {                                                              \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+               const void __user *from, unsigned long size);
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+-              const void __user *from, unsigned long n)
+-{
+-      if (likely(access_ok(VERIFY_READ, from, n))) {
+-              check_object_size(to, n, false);
+-              return __copy_tofrom_user((__force void __user *)to, from, n);
+-      }
+-      memset(to, 0, n);
+-      return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+-              const void *from, unsigned long n)
+-{
+-      if (access_ok(VERIFY_WRITE, to, n)) {
+-              check_object_size(from, n, true);
+-              return __copy_tofrom_user(to, (__force void __user *)from, n);
+-      }
+-      return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+-      __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+-                                  unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+-                                unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+-                                unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+               const void __user *from, unsigned long n)
+ {
+@@ -412,6 +376,70 @@ static inline unsigned long __copy_to_user(void __user *to,
+       return __copy_to_user_inatomic(to, from, size);
+ }
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++              const void __user *from, unsigned long n)
++{
++      if ((long)n < 0)
++              return n;
++
++      if (likely(access_ok(VERIFY_READ, from, n))) {
++              check_object_size(to, n, false);
++              return __copy_tofrom_user((void __force_user *)to, from, n);
++      }
++      memset(to, 0, n);
++      return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++              const void *from, unsigned long n)
++{
++      if ((long)n < 0)
++              return n;
++
++      if (likely(access_ok(VERIFY_WRITE, to, n))) {
++              check_object_size(from, n, true);
++              return __copy_tofrom_user(to, (void __force_user *)from, n);
++      }
++      return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++      __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++      if ((long)n < 0 || n > INT_MAX)
++              return n;
++
++      if (likely(access_ok(VERIFY_READ, from, n))) {
++              check_object_size(to, n, false);
++              n = __copy_from_user(to, from, n);
++      } else
++              memset(to, 0, n);
++      return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++      if ((long)n < 0 || n > INT_MAX)
++              return n;
++
++      if (likely(access_ok(VERIFY_WRITE, to, n))) {
++              check_object_size(from, n, true);
++              n = __copy_to_user(to, from, n);
++      }
++      return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++                                unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index fe4c075..fcb4600 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -14,6 +14,11 @@ CFLAGS_prom_init.o      += -fPIC
+ CFLAGS_btext.o                += -fPIC
+ endif
++CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++
+ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace early boot code
+ CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+@@ -26,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+ endif
++CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++
+ obj-y                         := cputable.o ptrace.o syscalls.o \
+                                  irq.o align.o signal_32.o pmc.o vdso.o \
+                                  process.o systbl.o idle.o \
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 38a1f96..ed94e42 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -1010,6 +1010,7 @@ storage_fault_common:
+       std     r14,_DAR(r1)
+       std     r15,_DSISR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
++      bl      save_nvgprs
+       mr      r4,r14
+       mr      r5,r15
+       ld      r14,PACA_EXGEN+EX_R14(r13)
+@@ -1018,8 +1019,7 @@ storage_fault_common:
+       cmpdi   r3,0
+       bne-    1f
+       b       ret_from_except_lite
+-1:    bl      save_nvgprs
+-      mr      r5,r3
++1:    mr      r5,r3
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       ld      r4,_DAR(r1)
+       bl      bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index bffec73..9cc5a35 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1520,10 +1520,10 @@ handle_page_fault:
+ 11:   ld      r4,_DAR(r1)
+       ld      r5,_DSISR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
++      bl      save_nvgprs
+       bl      do_page_fault
+       cmpdi   r3,0
+       beq+    12f
+-      bl      save_nvgprs
+       mr      r5,r3
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       lwz     r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 08887cf..0c98725 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -477,6 +477,8 @@ void migrate_irqs(void)
+ }
+ #endif
++extern void gr_handle_kernel_exploit(void);
++
+ static inline void check_stack_overflow(void)
+ {
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+@@ -489,6 +491,7 @@ static inline void check_stack_overflow(void)
+               pr_err("do_IRQ: stack overflow: %ld\n",
+                       sp - sizeof(struct thread_info));
+               dump_stack();
++              gr_handle_kernel_exploit();
+       }
+ #endif
+ }
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 5a7a78f..c0e4207 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+                       me->arch.core_plt_section = i;
+       }
+       if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+-              pr_err("Module doesn't contain .plt or .init.plt sections.\n");
++              pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
+               return -ENOEXEC;
+       }
+@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
+       pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+       /* Init, or core PLT? */
+-      if (location >= mod->core_layout.base
+-          && location < mod->core_layout.base + mod->core_layout.size)
++      if ((location >= mod->core_layout.base_rx && location < mod->core_layout.base_rx + mod->core_layout.size_rx) ||
++          (location >= mod->core_layout.base_rw && location < mod->core_layout.base_rw + mod->core_layout.size_rw))
+               entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+-      else
++      else if ((location >= mod->init_layout.base_rx && location < mod->init_layout.base_rx + mod->init_layout.size_rx) ||
++               (location >= mod->init_layout.base_rw && location < mod->init_layout.base_rw + mod->init_layout.size_rw))
+               entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++      else {
++              printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++              return ~0UL;
++      }
+       /* Find this entry, or if that fails, the next avail. entry */
+       while (entry->jump[0]) {
+@@ -301,7 +306,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
+ {
+-      module->arch.tramp = do_plt_call(module->core_layout.base,
++      module->arch.tramp = do_plt_call(module->core_layout.base_rx,
+                                        (unsigned long)ftrace_caller,
+                                        sechdrs, module);
+       if (!module->arch.tramp)
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index ad37aa1..51da6c4 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1360,8 +1360,8 @@ void show_regs(struct pt_regs * regs)
+        * Lookup NIP late so we have the best change of getting the
+        * above info out without failing
+        */
+-      printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+-      printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++      printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++      printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+       show_stack(current, (unsigned long *) regs->gpr[1]);
+       if (!user_mode(regs))
+@@ -1882,10 +1882,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+               newsp = stack[0];
+               ip = stack[STACK_FRAME_LR_SAVE];
+               if (!firstframe || ip != lr) {
+-                      printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++                      printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+                       if ((ip == rth) && curr_frame >= 0) {
+-                              printk(" (%pS)",
++                              printk(" (%pA)",
+                                      (void *)current->ret_stack[curr_frame].ret);
+                               curr_frame--;
+                       }
+@@ -1905,7 +1905,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+                       struct pt_regs *regs = (struct pt_regs *)
+                               (sp + STACK_FRAME_OVERHEAD);
+                       lr = regs->link;
+-                      printk("--- interrupt: %lx at %pS\n    LR = %pS\n",
++                      printk("--- interrupt: %lx at %pA\n    LR = %pA\n",
+                              regs->trap, (void *)regs->nip, (void *)lr);
+                       firstframe = 1;
+               }
+@@ -1942,13 +1942,6 @@ void notrace __ppc64_runlatch_off(void)
+ }
+ #endif /* CONFIG_PPC64 */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() & ~PAGE_MASK;
+-      return sp & ~0xf;
+-}
+-
+ static inline unsigned long brk_rnd(void)
+ {
+         unsigned long rnd = 0;
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index bf91658..edd21f8 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -3312,6 +3312,10 @@ static int do_seccomp(struct pt_regs *regs)
+ static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+ #endif /* CONFIG_SECCOMP */
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /**
+  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+  * @regs: the pt_regs of the task to trace (current)
+@@ -3335,6 +3339,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ {
+       user_exit();
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       /*
+        * The tracer may decide to abort the syscall, if so tracehook
+        * will return !0. Note that the tracer may also just change
+@@ -3353,6 +3362,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+       if (regs->gpr[0] >= NR_syscalls)
+               goto skip;
++
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_enter(regs, regs->gpr[0]);
+@@ -3384,6 +3394,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ {
+       int step;
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       audit_syscall_exit(regs);
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index a7daf74..d8159e5 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -1000,7 +1000,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+       /* Save user registers on the stack */
+       frame = &rt_sf->uc.uc_mcontext;
+       addr = frame;
+-      if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++      if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+               sigret = 0;
+               tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+       } else {
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 70409bb..6cc6990 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -770,7 +770,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
+       current->thread.fp_state.fpscr = 0;
+       /* Set up to return from userspace. */
+-      if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++      if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+               regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+       } else {
+               err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 62859eb..035955d 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -37,6 +37,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/ratelimit.h>
+ #include <linux/context_tracking.h>
++#include <linux/uaccess.h>
+ #include <asm/emulated_ops.h>
+ #include <asm/pgtable.h>
+@@ -145,6 +146,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
+       return flags;
+ }
++extern void gr_handle_kernel_exploit(void);
++
+ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+                              int signr)
+ {
+@@ -194,6 +197,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
++
++      gr_handle_kernel_exploit();
++
+       do_exit(signr);
+ }
+@@ -1145,6 +1151,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
+       enum ctx_state prev_state = exception_enter();
+       unsigned int reason = get_reason(regs);
++#ifdef CONFIG_PAX_REFCOUNT
++      unsigned int bkpt;
++      const struct exception_table_entry *entry;
++
++      if (reason & REASON_ILLEGAL) {
++              /* Check if PaX bad instruction */
++              if (!probe_kernel_address((const void *)regs->nip, bkpt) && bkpt == 0xc00b00) {
++                      current->thread.trap_nr = 0;
++                      pax_report_refcount_error(regs, NULL);
++                      /* fixup_exception() for PowerPC does not exist, simulate its job */
++                      if ((entry = search_exception_tables(regs->nip)) != NULL) {
++                              regs->nip = entry->fixup;
++                              return;
++                      }
++                      /* fixup_exception() could not handle */
++                      goto bail;
++              }
++      }
++#endif
++
+       /* We can now get here via a FP Unavailable exception if the core
+        * has no FPU, in that case the reason flags will be 0 */
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index 4111d30..fa5e7be 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -35,6 +35,7 @@
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
+ #include <asm/setup.h>
++#include <asm/mman.h>
+ #undef DEBUG
+@@ -180,7 +181,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+       vdso_base = VDSO32_MBASE;
+ #endif
+-      current->mm->context.vdso_base = 0;
++      current->mm->context.vdso_base = ~0UL;
+       /* vDSO has a problem and was disabled, just don't "enable" it for the
+        * process
+@@ -201,7 +202,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+       vdso_base = get_unmapped_area(NULL, vdso_base,
+                                     (vdso_pages << PAGE_SHIFT) +
+                                     ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+-                                    0, 0);
++                                    0, MAP_PRIVATE | MAP_EXECUTABLE);
+       if (IS_ERR_VALUE(vdso_base)) {
+               rc = vdso_base;
+               goto fail_mmapsem;
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+-      if (likely(access_ok(VERIFY_READ, from, n)))
+-              n = __copy_from_user(to, from, n);
+-      else
+-              memset(to, 0, n);
+-      return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+-      if (likely(access_ok(VERIFY_WRITE, to, n)))
+-              n = __copy_to_user(to, from, n);
+-      return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+                          unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+       return n;
+ }
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index bb1ffc5..9ae5cb6 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -34,6 +34,10 @@
+ #include <linux/context_tracking.h>
+ #include <linux/hugetlb.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 5; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int __user *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ /*
+  * Check whether the instruction at regs->nip is a store using
+  * an update addressing form which will update r1.
+@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+        * indicate errors in DSISR but can validly be set in SRR1.
+        */
+       if (trap == 0x400)
+-              error_code &= 0x48200000;
++              error_code &= 0x58200000;
+       else
+               is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -384,12 +415,16 @@ good_area:
+          * "undefined".  Of those that can be set, this is the only
+          * one which seems bad.
+          */
+-      if (error_code & 0x10000000)
++      if (error_code & DSISR_GUARDED)
+                 /* Guarded storage error. */
+               goto bad_area;
+ #endif /* CONFIG_8xx */
+       if (is_exec) {
++#ifdef CONFIG_PPC_STD_MMU
++              if (error_code & DSISR_GUARDED)
++                      goto bad_area;
++#endif
+               /*
+                * Allow execution from readable areas if the MMU does not
+                * provide separate controls over reading and executing.
+@@ -484,6 +519,23 @@ bad_area:
+ bad_area_nosemaphore:
+       /* User mode accesses cause a SIGSEGV */
+       if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++                      if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++                      if (is_exec && regs->nip == address) {
++#endif
++                              switch (pax_handle_fetch_fault(regs)) {
++                              }
++
++                              pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++                              do_group_exit(SIGKILL);
++                      }
++              }
++#endif
++
+               _exception(SIGSEGV, regs, code, address);
+               goto bail;
+       }
+diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
+index 2f1e443..de888bf 100644
+--- a/arch/powerpc/mm/mmap.c
++++ b/arch/powerpc/mm/mmap.c
+@@ -194,6 +194,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       unsigned long random_factor = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE)
+               random_factor = arch_mmap_rnd();
+@@ -205,9 +209,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+        */
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
+ }
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 2b27458..7c7c59b 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+       if ((mm->task_size - len) < addr)
+               return 0;
+       vma = find_vma(mm, addr);
+-      return (!vma || (addr + len) <= vma->vm_start);
++      return check_heap_stack_gap(vma, addr, len, 0);
+ }
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -276,6 +276,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
+       info.align_offset = 0;
+       addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              addr += mm->delta_mmap;
++#endif
++
+       while (addr < TASK_SIZE) {
+               info.low_limit = addr;
+               if (!slice_scan_available(addr, available, 1, &addr))
+@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+       if (fixed && addr > (mm->task_size - len))
+               return -ENOMEM;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++              addr = 0;
++#endif
++
+       /* If hint, make sure it matches our alignment restrictions */
+       if (!fixed && addr) {
+               addr = _ALIGN_UP(addr, 1ul << pshift);
+@@ -555,10 +566,10 @@ unsigned long arch_get_unmapped_area(struct file *filp,
+ }
+ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+-                                           const unsigned long addr0,
+-                                           const unsigned long len,
+-                                           const unsigned long pgoff,
+-                                           const unsigned long flags)
++                                           unsigned long addr0,
++                                           unsigned long len,
++                                           unsigned long pgoff,
++                                           unsigned long flags)
+ {
+       return slice_get_unmapped_area(addr0, len, flags,
+                                      current->mm->context.user_psize, 1);
+diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
+index 0625446..139a0aa 100644
+--- a/arch/powerpc/platforms/cell/spufs/file.c
++++ b/arch/powerpc/platforms/cell/spufs/file.c
+@@ -263,9 +263,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+       return VM_FAULT_NOPAGE;
+ }
+-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
+                               unsigned long address,
+-                              void *buf, int len, int write)
++                              void *buf, size_t len, int write)
+ {
+       struct spu_context *ctx = vma->vm_file->private_data;
+       unsigned long offset = address - vma->vm_start;
+diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
+index 26c5d5be..a308c28 100644
+--- a/arch/s390/Kconfig.debug
++++ b/arch/s390/Kconfig.debug
+@@ -9,6 +9,7 @@ config S390_PTDUMP
+       bool "Export kernel pagetable layout to userspace via debugfs"
+       depends on DEBUG_KERNEL
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       ---help---
+         Say Y here if you want to show the kernel pagetable layout in a
+         debugfs file. This information is only useful for kernel developers
+diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
+index d28cc2f..a937312 100644
+--- a/arch/s390/include/asm/atomic.h
++++ b/arch/s390/include/asm/atomic.h
+@@ -342,4 +342,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_dec_and_test(_v)     (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1, 0)
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* __ARCH_S390_ATOMIC__  */
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 05219a5..032f5f0 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -9,8 +9,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+-#define L1_CACHE_BYTES     256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT     8
++#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define NET_SKB_PAD      32
+ #define __read_mostly __section(.data..read_mostly)
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 1736c7d..261351c 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -167,6 +167,13 @@ extern unsigned int vdso_enabled;
+                               (STACK_TOP / 3 * 2) : \
+                               (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN    (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN   (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports. */
+diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
+index c4a93d6..4d2a9b4 100644
+--- a/arch/s390/include/asm/exec.h
++++ b/arch/s390/include/asm/exec.h
+@@ -7,6 +7,6 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ #endif /* __ASM_EXEC_H */
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 52d7c87..577d292 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
+       __range_ok((unsigned long)(addr), (size));      \
+ })
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __access_ok(addr, size)
+ /*
+@@ -337,6 +338,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+       might_fault();
++
++      if ((long)n < 0)
++              return n;
++
+       return __copy_to_user(to, from, n);
+ }
+@@ -360,10 +365,14 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-      unsigned int sz = __compiletime_object_size(to);
++      size_t sz = __compiletime_object_size(to);
+       might_fault();
+-      if (unlikely(sz != -1 && sz < n)) {
++
++      if ((long)n < 0)
++              return n;
++
++      if (unlikely(sz != (size_t)-1 && sz < n)) {
+               if (!__builtin_constant_p(n))
+                       copy_user_overflow(sz, n);
+               else
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index fbc0789..e7962a1 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -163,11 +163,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+       /* Increase core size by size of got & plt and set start
+          offsets for got and plt. */
+-      me->core_layout.size = ALIGN(me->core_layout.size, 4);
+-      me->arch.got_offset = me->core_layout.size;
+-      me->core_layout.size += me->arch.got_size;
+-      me->arch.plt_offset = me->core_layout.size;
+-      me->core_layout.size += me->arch.plt_size;
++      me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 4);
++      me->arch.got_offset = me->core_layout.size_rw;
++      me->core_layout.size_rw += me->arch.got_size;
++      me->arch.plt_offset = me->core_layout.size_rx;
++      me->core_layout.size_rx += me->arch.plt_size;
+       return 0;
+ }
+@@ -283,7 +283,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+               if (info->got_initialized == 0) {
+                       Elf_Addr *gotent;
+-                      gotent = me->core_layout.base + me->arch.got_offset +
++                      gotent = me->core_layout.base_rw + me->arch.got_offset +
+                               info->got_offset;
+                       *gotent = val;
+                       info->got_initialized = 1;
+@@ -306,7 +306,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+                       rc = apply_rela_bits(loc, val, 0, 64, 0);
+               else if (r_type == R_390_GOTENT ||
+                        r_type == R_390_GOTPLTENT) {
+-                      val += (Elf_Addr) me->core_layout.base - loc;
++                      val += (Elf_Addr) me->core_layout.base_rw - loc;
+                       rc = apply_rela_bits(loc, val, 1, 32, 1);
+               }
+               break;
+@@ -319,7 +319,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+       case R_390_PLTOFF64:    /* 16 bit offset from GOT to PLT. */
+               if (info->plt_initialized == 0) {
+                       unsigned int *ip;
+-                      ip = me->core_layout.base + me->arch.plt_offset +
++                      ip = me->core_layout.base_rx + me->arch.plt_offset +
+                               info->plt_offset;
+                       ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+                       ip[1] = 0x100a0004;
+@@ -338,7 +338,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+                              val - loc + 0xffffUL < 0x1ffffeUL) ||
+                             (r_type == R_390_PLT32DBL &&
+                              val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+-                              val = (Elf_Addr) me->core_layout.base +
++                              val = (Elf_Addr) me->core_layout.base_rx +
+                                       me->arch.plt_offset +
+                                       info->plt_offset;
+                       val += rela->r_addend - loc;
+@@ -360,7 +360,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+       case R_390_GOTOFF32:    /* 32 bit offset to GOT.  */
+       case R_390_GOTOFF64:    /* 64 bit offset to GOT. */
+               val = val + rela->r_addend -
+-                      ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
++                      ((Elf_Addr) me->core_layout.base_rw + me->arch.got_offset);
+               if (r_type == R_390_GOTOFF16)
+                       rc = apply_rela_bits(loc, val, 0, 16, 0);
+               else if (r_type == R_390_GOTOFF32)
+@@ -370,7 +370,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+               break;
+       case R_390_GOTPC:       /* 32 bit PC relative offset to GOT. */
+       case R_390_GOTPCDBL:    /* 32 bit PC rel. off. to GOT shifted by 1. */
+-              val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
++              val = (Elf_Addr) me->core_layout.base_rw + me->arch.got_offset +
+                       rela->r_addend - loc;
+               if (r_type == R_390_GOTPC)
+                       rc = apply_rela_bits(loc, val, 1, 32, 0);
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index bba4fa7..9c32b3c 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -217,13 +217,6 @@ unsigned long get_wchan(struct task_struct *p)
+       return 0;
+ }
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() & ~PAGE_MASK;
+-      return sp & ~0xf;
+-}
+-
+ static inline unsigned long brk_rnd(void)
+ {
+       return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index eb9df28..7b686ba 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -201,9 +201,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ }
+ static unsigned long
+-s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+-                        const unsigned long len, const unsigned long pgoff,
+-                        const unsigned long flags)
++s390_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
++                        unsigned long len, unsigned long pgoff,
++                        unsigned long flags)
+ {
+       struct mm_struct *mm = current->mm;
+       unsigned long area;
+@@ -230,6 +230,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       unsigned long random_factor = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE)
+               random_factor = arch_mmap_rnd();
+@@ -239,9 +243,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+        */
+       if (mmap_is_legacy()) {
+               mm->mmap_base = mmap_base_legacy(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = s390_get_unmapped_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+       }
+ }
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT                4
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif /* _ASM_SCORE_CACHE_H */
+diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
+index f9f3cd5..58ff438 100644
+--- a/arch/score/include/asm/exec.h
++++ b/arch/score/include/asm/exec.h
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_SCORE_EXEC_H
+ #define _ASM_SCORE_EXEC_H
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+ #endif /* _ASM_SCORE_EXEC_H */
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index aae9480..93e40a4 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -114,8 +114,3 @@ unsigned long get_wchan(struct task_struct *task)
+       return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      return sp;
+-}
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index ef9e555..331bd29 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index 6777177..d44b592 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int do_colour_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (flags & MAP_FIXED) {
+@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       if (filp || (flags & MAP_SHARED))
+               do_colour_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_colour_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.flags = 0;
+       info.length = len;
+-      info.low_limit = TASK_UNMAPPED_BASE;
++      info.low_limit = mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
+@@ -77,14 +81,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ }
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+-                        const unsigned long len, const unsigned long pgoff,
+-                        const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++                        unsigned long len, unsigned long pgoff,
++                        unsigned long flags)
+ {
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       int do_colour_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (flags & MAP_FIXED) {
+@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (filp || (flags & MAP_SHARED))
+               do_colour_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               if (do_colour_align)
+@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 59b0960..75a8bcb 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -39,6 +39,7 @@ config SPARC
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
+       select MODULES_USE_ELF_RELA
++      select HAVE_GCC_PLUGINS
+       select ODD_RT_SIGACTION
+       select OLD_SIGSUSPEND
+       select ARCH_HAS_SG_CHAIN
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index 24827a3..5dd45ac4 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -15,18 +15,38 @@
+ #define ATOMIC64_INIT(i)      { (i) }
+ #define atomic_read(v)                READ_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      return READ_ONCE(v->counter);
++}
+ #define atomic64_read(v)      READ_ONCE((v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      return READ_ONCE(v->counter);
++}
+ #define atomic_set(v, i)      WRITE_ONCE(((v)->counter), (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      WRITE_ONCE(v->counter, i);
++}
+ #define atomic64_set(v, i)    WRITE_ONCE(((v)->counter), (i))
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++      WRITE_ONCE(v->counter, i);
++}
+-#define ATOMIC_OP(op)                                                 \
+-void atomic_##op(int, atomic_t *);                                    \
+-void atomic64_##op(long, atomic64_t *);
++#define __ATOMIC_OP(op, suffix)                                               \
++void atomic_##op##suffix(int, atomic##suffix##_t *);                  \
++void atomic64_##op##suffix(long, atomic64##suffix##_t *);
+-#define ATOMIC_OP_RETURN(op)                                          \
+-int atomic_##op##_return(int, atomic_t *);                            \
+-long atomic64_##op##_return(long, atomic64_t *);
++#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
++
++#define __ATOMIC_OP_RETURN(op, suffix)                                        \
++int atomic_##op##_return##suffix(int, atomic##suffix##_t *);          \
++long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
++
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
+ #define ATOMIC_FETCH_OP(op)                                           \
+ int atomic_fetch_##op(int, atomic_t *);                                       \
+@@ -47,13 +67,23 @@ ATOMIC_OPS(xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+ #define atomic_dec_return(v)   atomic_sub_return(1, v)
+ #define atomic64_dec_return(v) atomic64_sub_return(1, v)
+ #define atomic_inc_return(v)   atomic_add_return(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_return_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_return(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++      return atomic64_add_return_unchecked(1, v);
++}
+ /*
+  * atomic_inc_and_test - increment and test
+@@ -64,6 +94,10 @@ ATOMIC_OPS(xor)
+  * other cases.
+  */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+@@ -73,25 +107,60 @@ ATOMIC_OPS(xor)
+ #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++      atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++      atomic64_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++      atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++      atomic64_sub_unchecked(1, v);
++}
+ #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++      return xchg(&v->counter, new);
++}
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+-      int c, old;
++      int c, old, new;
+       c = atomic_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic_cmpxchg((v), c, c + (a));
++
++              asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                           "tvs %%icc, 6\n"
++#endif
++
++                           : "=r" (new)
++                           : "0" (c), "ir" (a)
++                           : "cc");
++
++              old = atomic_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+@@ -101,21 +170,42 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic64_cmpxchg(v, o, n) \
+       ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++                                            long new)
++{
++      return cmpxchg(&(v->counter), old, new);
++}
++
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++      return xchg(&v->counter, new);
++}
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+-      long c, old;
++      long c, old, new;
+       c = atomic64_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic64_cmpxchg((v), c, c + (a));
++
++              asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++                           "tvs %%xcc, 6\n"
++#endif
++
++                           : "=r" (new)
++                           : "0" (c), "ir" (a)
++                           : "cc");
++
++              old = atomic64_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+-      return c != (u);
++      return c != u;
+ }
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 5bb6991..5c2132e 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -7,10 +7,12 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
++#include <linux/const.h>
++
+ #define ARCH_SLAB_MINALIGN    __alignof__(unsigned long long)
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index a24e41f..47677ff 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -114,6 +114,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE)
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   0x10000UL
++
++#define PAX_DELTA_MMAP_LEN    16
++#define PAX_DELTA_STACK_LEN   16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this cpu supports.  This can NOT be done in userspace
+    on Sparc.  */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 9331083..59c0499 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -190,6 +190,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE               0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE        0x0000000070000000UL
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE   (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN    (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN   (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP     sparc64_elf_hwcap
+diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
+index 0346c7e..c5c25b9 100644
+--- a/arch/sparc/include/asm/pgalloc_32.h
++++ b/arch/sparc/include/asm/pgalloc_32.h
+@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+ }
+ #define pgd_populate(MM, PGD, PMD)      pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD)      pgd_populate((MM), (PGD), (PMD))
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
+                                  unsigned long address)
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index 3529f13..d98a28c 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
+ }
+ #define pgd_populate(MM, PGD, PUD)    __pgd_populate(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PMD)     pgd_populate((MM), (PGD), (PMD))
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
+ }
+ #define pud_populate(MM, PUD, PMD)    __pud_populate(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD)     pud_populate((MM), (PUD), (PMD))
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
+index 59ba6f6..4518128 100644
+--- a/arch/sparc/include/asm/pgtable.h
++++ b/arch/sparc/include/asm/pgtable.h
+@@ -5,4 +5,8 @@
+ #else
+ #include <asm/pgtable_32.h>
+ #endif
++
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ #endif
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index ce6f569..593b043 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
+ #define PAGE_SHARED   SRMMU_PAGE_SHARED
+ #define PAGE_COPY     SRMMU_PAGE_COPY
+ #define PAGE_READONLY SRMMU_PAGE_RDONLY
++#define PAGE_SHARED_NOEXEC    SRMMU_PAGE_SHARED_NOEXEC
++#define PAGE_COPY_NOEXEC      SRMMU_PAGE_COPY_NOEXEC
++#define PAGE_READONLY_NOEXEC  SRMMU_PAGE_RDONLY_NOEXEC
+ #define PAGE_KERNEL   SRMMU_PAGE_KERNEL
+ /* Top-level page directory - dummy used by init-mm.
+@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
+ /*         xwr */
+ #define __P000  PAGE_NONE
+-#define __P001  PAGE_READONLY
+-#define __P010  PAGE_COPY
+-#define __P011  PAGE_COPY
++#define __P001  PAGE_READONLY_NOEXEC
++#define __P010  PAGE_COPY_NOEXEC
++#define __P011  PAGE_COPY_NOEXEC
+ #define __P100  PAGE_READONLY
+ #define __P101  PAGE_READONLY
+ #define __P110  PAGE_COPY
+ #define __P111  PAGE_COPY
+ #define __S000        PAGE_NONE
+-#define __S001        PAGE_READONLY
+-#define __S010        PAGE_SHARED
+-#define __S011        PAGE_SHARED
++#define __S001        PAGE_READONLY_NOEXEC
++#define __S010        PAGE_SHARED_NOEXEC
++#define __S011        PAGE_SHARED_NOEXEC
+ #define __S100        PAGE_READONLY
+ #define __S101        PAGE_READONLY
+ #define __S110        PAGE_SHARED
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index ae51a11..eadfd03 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -111,6 +111,11 @@
+                                   SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+                                   SRMMU_EXEC | SRMMU_REF)
++
++#define SRMMU_PAGE_SHARED_NOEXEC      __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC                __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC      __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++
+ #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+                                   SRMMU_DIRTY | SRMMU_REF)
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 29d64b1..4272fe8 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+ void handle_ld_nf(u32 insn, struct pt_regs *regs);
+ /* init_64.c */
+-extern atomic_t dcpage_flushes;
+-extern atomic_t dcpage_flushes_xcall;
++extern atomic_unchecked_t dcpage_flushes;
++extern atomic_unchecked_t dcpage_flushes_xcall;
+ extern int sysctl_tsb_ratio;
+ #endif
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 87990b7..352fff0 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -96,14 +96,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+-static void inline arch_read_lock(arch_rwlock_t *lock)
++static inline void arch_read_lock(arch_rwlock_t *lock)
+ {
+       unsigned long tmp1, tmp2;
+       __asm__ __volatile__ (
+ "1:   ldsw            [%2], %0\n"
+ "     brlz,pn         %0, 2f\n"
+-"4:    add            %0, 1, %1\n"
++"4:    addcc          %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     tvs             %%icc, 6\n"
++#endif
++
+ "     cas             [%2], %0, %1\n"
+ "     cmp             %0, %1\n"
+ "     bne,pn          %%icc, 1b\n"
+@@ -116,10 +121,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+ "     .previous"
+       : "=&r" (tmp1), "=&r" (tmp2)
+       : "r" (lock)
+-      : "memory");
++      : "memory", "cc");
+ }
+-static int inline arch_read_trylock(arch_rwlock_t *lock)
++static inline int arch_read_trylock(arch_rwlock_t *lock)
+ {
+       int tmp1, tmp2;
+@@ -127,7 +132,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ "1:   ldsw            [%2], %0\n"
+ "     brlz,a,pn       %0, 2f\n"
+ "      mov            0, %0\n"
+-"     add             %0, 1, %1\n"
++"     addcc           %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     tvs             %%icc, 6\n"
++#endif
++
+ "     cas             [%2], %0, %1\n"
+ "     cmp             %0, %1\n"
+ "     bne,pn          %%icc, 1b\n"
+@@ -140,13 +150,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+       return tmp1;
+ }
+-static void inline arch_read_unlock(arch_rwlock_t *lock)
++static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+       unsigned long tmp1, tmp2;
+       __asm__ __volatile__(
+ "1:   lduw    [%2], %0\n"
+-"     sub     %0, 1, %1\n"
++"     subcc   %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"     tvs     %%icc, 6\n"
++#endif
++
+ "     cas     [%2], %0, %1\n"
+ "     cmp     %0, %1\n"
+ "     bne,pn  %%xcc, 1b\n"
+@@ -156,7 +171,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
+       : "memory");
+ }
+-static void inline arch_write_lock(arch_rwlock_t *lock)
++static inline void arch_write_lock(arch_rwlock_t *lock)
+ {
+       unsigned long mask, tmp1, tmp2;
+@@ -181,7 +196,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
+       : "memory");
+ }
+-static void inline arch_write_unlock(arch_rwlock_t *lock)
++static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+       __asm__ __volatile__(
+ "     stw             %%g0, [%0]"
+@@ -190,7 +205,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
+       : "memory");
+ }
+-static int inline arch_write_trylock(arch_rwlock_t *lock)
++static inline int arch_write_trylock(arch_rwlock_t *lock)
+ {
+       unsigned long mask, tmp1, tmp2, result;
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index 229475f..2fca9163 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -48,6 +48,7 @@ struct thread_info {
+       struct reg_window32     reg_window[NSWINS];     /* align for ldd! */
+       unsigned long           rwbuf_stkptrs[NSWINS];
+       unsigned long           w_saved;
++      unsigned long           lowest_stack;
+ };
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index 3d7b925..493ce82 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -59,6 +59,8 @@ struct thread_info {
+       struct pt_regs          *kern_una_regs;
+       unsigned int            kern_una_insn;
++      unsigned long           lowest_stack;
++
+       unsigned long           fpregs[(7 * 256) / sizeof(unsigned long)]
+               __attribute__ ((aligned(64)));
+ };
+@@ -180,12 +182,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define TIF_NEED_RESCHED      3       /* rescheduling necessary */
+ /* flag bit 4 is available */
+ #define TIF_UNALIGNED         5       /* allowed to do unaligned accesses */
+-/* flag bit 6 is available */
++#define TIF_GRSEC_SETXID      6       /* update credentials on syscall entry/exit */
+ #define TIF_32BIT             7       /* 32-bit binary */
+ #define TIF_NOHZ              8       /* in adaptive nohz mode */
+ #define TIF_SECCOMP           9       /* secure computing */
+ #define TIF_SYSCALL_AUDIT     10      /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT        11      /* syscall tracepoint instrumentation */
++
+ /* NOTE: Thread flags >= 12 should be ones we have no interest
+  *       in using in assembly, else we can't use the mask as
+  *       an immediate value in instructions such as andcc.
+@@ -205,12 +208,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define _TIF_SYSCALL_AUDIT    (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SYSCALL_TRACEPOINT       (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_POLLING_NRFLAG   (1<<TIF_POLLING_NRFLAG)
++#define _TIF_GRSEC_SETXID     (1<<TIF_GRSEC_SETXID)
+ #define _TIF_USER_WORK_MASK   ((0xff << TI_FLAG_WSAVED_SHIFT) | \
+                                _TIF_DO_NOTIFY_RESUME_MASK | \
+                                _TIF_NEED_RESCHED)
+ #define _TIF_DO_NOTIFY_RESUME_MASK    (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
++#define _TIF_WORK_SYSCALL             \
++      (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
++       _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
++
+ #define is_32bit_task()       (test_thread_flag(TIF_32BIT))
+ /*
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index bd56c28..4b63d83 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,6 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index ea55f86..dbf15cf 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -47,6 +47,7 @@
+ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
+ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+ #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+       ({ (void)(type); __access_ok((unsigned long)(addr), size); })
+@@ -248,6 +249,9 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       if (n && __access_ok((unsigned long) to, n)) {
+               check_object_size(from, n, true);
+               return __copy_user(to, (__force void __user *) from, n);
+@@ -257,12 +261,18 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       check_object_size(from, n, true);
+       return __copy_user(to, (__force void __user *) from, n);
+ }
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       if (n && __access_ok((unsigned long) from, n)) {
+               check_object_size(to, n, false);
+               return __copy_user((__force void __user *) to, from, n);
+@@ -274,6 +284,9 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       return __copy_user((__force void __user *) to, from, n);
+ }
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 37a315d..75ce910 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/spitfire.h>
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -76,6 +77,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
+       return 1;
+ }
++static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
++{
++      return 1;
++}
++
+ static inline int access_ok(int type, const void __user * addr, unsigned long size)
+ {
+       return 1;
+@@ -212,6 +218,9 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+       unsigned long ret;
++      if ((long)size < 0 || size > INT_MAX)
++              return size;
++
+       check_object_size(to, size, false);
+       ret = ___copy_from_user(to, from, size);
+@@ -232,6 +241,9 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+       unsigned long ret;
++      if ((long)size < 0 || size > INT_MAX)
++              return size;
++
+       check_object_size(from, size, true);
+       ret = ___copy_to_user(to, from, size);
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index fdb1332..1b10f89 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -4,7 +4,7 @@
+ #
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+ extra-y     := head_$(BITS).o
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index b7780a5..28315f0 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
+         printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx    %s\n",
+              r->psr, r->pc, r->npc, r->y, print_tainted());
+-      printk("PC: <%pS>\n", (void *) r->pc);
++      printk("PC: <%pA>\n", (void *) r->pc);
+       printk("%%G: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
+              r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+              r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+       printk("%%O: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
+              r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+              r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+-      printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++      printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+       printk("%%L: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
+              rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+               rw = (struct reg_window32 *) fp;
+               pc = rw->ins[7];
+               printk("[%08lx : ", pc);
+-              printk("%pS ] ", (void *) pc);
++              printk("%pA ] ", (void *) pc);
+               fp = rw->ins[6];
+       } while (++count < 16);
+       printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index fa14402..b2a7408 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
+       printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+              rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+       if (regs->tstate & TSTATE_PRIV)
+-              printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++              printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+ void show_regs(struct pt_regs *regs)
+@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
+       printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
+              regs->tpc, regs->tnpc, regs->y, print_tainted());
+-      printk("TPC: <%pS>\n", (void *) regs->tpc);
++      printk("TPC: <%pA>\n", (void *) regs->tpc);
+       printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+              regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+              regs->u_regs[3]);
+@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
+       printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+              regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+              regs->u_regs[15]);
+-      printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++      printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+       show_regwindow(regs);
+       show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
+                      ((tp && tp->task) ? tp->task->pid : -1));
+               if (gp->tstate & TSTATE_PRIV) {
+-                      printk("             TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++                      printk("             TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+                              (void *) gp->tpc,
+                              (void *) gp->o7,
+                              (void *) gp->i7,
+diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
+index 79cc0d1..46d6233 100644
+--- a/arch/sparc/kernel/prom_common.c
++++ b/arch/sparc/kernel/prom_common.c
+@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
+ unsigned int prom_early_allocated __initdata;
+-static struct of_pdt_ops prom_sparc_ops __initdata = {
++static const struct of_pdt_ops prom_sparc_ops __initconst = {
+       .nextprop = prom_common_nextprop,
+       .getproplen = prom_getproplen,
+       .getproperty = prom_getproperty,
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 9ddc492..27a5619 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
+       return ret;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ {
+       int ret = 0;
+@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+       if (test_thread_flag(TIF_NOHZ))
+               user_exit();
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               ret = tracehook_report_syscall_entry(regs);
+@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+       if (test_thread_flag(TIF_NOHZ))
+               user_exit();
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       audit_syscall_exit(regs);
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index d3035ba..40683bd 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -891,7 +891,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+               return;
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-      atomic_inc(&dcpage_flushes);
++      atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+       this_cpu = get_cpu();
+@@ -915,7 +915,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+                       xcall_deliver(data0, __pa(pg_addr),
+                                     (u64) pg_addr, cpumask_of(cpu));
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-                      atomic_inc(&dcpage_flushes_xcall);
++                      atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+               }
+       }
+@@ -934,7 +934,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+       preempt_disable();
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-      atomic_inc(&dcpage_flushes);
++      atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+       data0 = 0;
+       pg_addr = page_address(page);
+@@ -951,7 +951,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+               xcall_deliver(data0, __pa(pg_addr),
+                             (u64) pg_addr, cpu_online_mask);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-              atomic_inc(&dcpage_flushes_xcall);
++              atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+       }
+       __local_flush_dcache_page(page);
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 646988d..b88905f 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       if (len > TASK_SIZE - PAGE_SIZE)
+               return -ENOMEM;
+       if (!addr)
+-              addr = TASK_UNMAPPED_BASE;
++              addr = current->mm->mmap_base;
+       info.flags = 0;
+       info.length = len;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index fe8b8ee..3f17a96 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       struct vm_area_struct * vma;
+       unsigned long task_size = TASK_SIZE;
+       int do_color_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       if (flags & MAP_FIXED) {
+               /* We do not accept a shared mapping if it would violate
+                * cache aliasing constraints.
+                */
+-              if ((flags & MAP_SHARED) &&
++              if ((filp || (flags & MAP_SHARED)) &&
+                   ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+                       return -EINVAL;
+               return addr;
+@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+       if (filp || (flags & MAP_SHARED))
+               do_color_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               if (do_color_align)
+                       addr = COLOR_ALIGN(addr, pgoff);
+@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (task_size - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.flags = 0;
+       info.length = len;
+-      info.low_limit = TASK_UNMAPPED_BASE;
++      info.low_limit = mm->mmap_base;
+       info.high_limit = min(task_size, VA_EXCLUDE_START);
+       info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+               VM_BUG_ON(addr != -ENOMEM);
+               info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = task_size;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -141,15 +152,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ }
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+-                        const unsigned long len, const unsigned long pgoff,
+-                        const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++                        unsigned long len, unsigned long pgoff,
++                        unsigned long flags)
+ {
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long task_size = STACK_TOP32;
+       unsigned long addr = addr0;
+       int do_color_align;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       struct vm_unmapped_area_info info;
+       /* This should only ever run for 32-bit processes.  */
+@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               /* We do not accept a shared mapping if it would violate
+                * cache aliasing constraints.
+                */
+-              if ((flags & MAP_SHARED) &&
++              if ((filp || (flags & MAP_SHARED)) &&
+                   ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+                       return -EINVAL;
+               return addr;
+@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (filp || (flags & MAP_SHARED))
+               do_color_align = 1;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               if (do_color_align)
+@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (task_size - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = STACK_TOP32;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
+ EXPORT_SYMBOL(get_fb_unmapped_area);
+ /* Essentially the same as PowerPC.  */
+-static unsigned long mmap_rnd(void)
++static unsigned long mmap_rnd(struct mm_struct *mm)
+ {
+       unsigned long rnd = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (current->flags & PF_RANDOMIZE) {
+               unsigned long val = get_random_long();
+               if (test_thread_flag(TIF_32BIT))
+@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+-      unsigned long random_factor = mmap_rnd();
++      unsigned long random_factor = mmap_rnd(mm);
+       unsigned long gap;
+       /*
+@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+           gap == RLIM_INFINITY ||
+           sysctl_legacy_va_layout) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base += mm->delta_mmap;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area;
+       } else {
+               /* We know it's 32-bit */
+@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+                       gap = (task_size / 6 * 5);
+               mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index c4a1b5c..c5e0ef3 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
+ #endif
+       .align  32
+ 1:    ldx     [%g6 + TI_FLAGS], %l5
+-      andcc   %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++      andcc   %l5, _TIF_WORK_SYSCALL, %g0
+       be,pt   %icc, rtrap
+        nop
+       call    syscall_trace_leave
+@@ -230,7 +230,7 @@ linux_sparc_syscall32:
+       srl     %i3, 0, %o3                             ! IEU0
+       srl     %i2, 0, %o2                             ! IEU0  Group
+-      andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++      andcc   %l0, _TIF_WORK_SYSCALL, %g0
+       bne,pn  %icc, linux_syscall_trace32             ! CTI
+        mov    %i0, %l5                                ! IEU1
+ 5:    call    %l7                                     ! CTI   Group brk forced
+@@ -254,7 +254,7 @@ linux_sparc_syscall:
+       mov     %i3, %o3                                ! IEU1
+       mov     %i4, %o4                                ! IEU0  Group
+-      andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++      andcc   %l0, _TIF_WORK_SYSCALL, %g0
+       bne,pn  %icc, linux_syscall_trace               ! CTI   Group
+        mov    %i0, %l5                                ! IEU0
+ 2:    call    %l7                                     ! CTI   Group brk forced
+@@ -269,7 +269,7 @@ ret_sys_call:
+       cmp     %o0, -ERESTART_RESTARTBLOCK
+       bgeu,pn %xcc, 1f
+-       andcc  %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++       andcc  %l0, _TIF_WORK_SYSCALL, %g0
+       ldx     [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ 2:
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 4f21df7..0a374da 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+       static int die_counter;
+@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+                     count++ < 30                              &&
+                       (((unsigned long) rw) >= PAGE_OFFSET)   &&
+                     !(((unsigned long) rw) & 0x7)) {
+-                      printk("Caller[%08lx]: %pS\n", rw->ins[7],
++                      printk("Caller[%08lx]: %pA\n", rw->ins[7],
+                              (void *) rw->ins[7]);
+                       rw = (struct reg_window32 *)rw->ins[6];
+               }
+       }
+       printk("Instruction DUMP:");
+       instruction_dump ((unsigned long *) regs->pc);
+-      if(regs->psr & PSR_PS)
++      if(regs->psr & PSR_PS) {
++              gr_handle_kernel_exploit();
+               do_exit(SIGKILL);
++      }
+       do_exit(SIGSEGV);
+ }
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index d21cd62..4e2ca86 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+                      i + 1,
+                      p->trapstack[i].tstate, p->trapstack[i].tpc,
+                      p->trapstack[i].tnpc, p->trapstack[i].tt);
+-              printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++              printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+       }
+ }
+@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+       lvl -= 0x100;
+       if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++              if (lvl == 6)
++                      pax_report_refcount_error(regs, NULL);
++#endif
++
+               sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+               die_if_kernel(buffer, regs);
+       }
+@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+       char buffer[32];
+-      
++
+       if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+                      0, lvl, SIGTRAP) == NOTIFY_STOP)
+               return;
++#ifdef CONFIG_PAX_REFCOUNT
++      if (lvl == 6)
++              pax_report_refcount_error(regs, NULL);
++#endif
++
+       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+       sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+              regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+       printk("%s" "ERROR(%d): ",
+              (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+-      printk("TPC<%pS>\n", (void *) regs->tpc);
++      printk("TPC<%pA>\n", (void *) regs->tpc);
+       printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
+              (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+              (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+                      smp_processor_id(),
+                      (type & 0x1) ? 'I' : 'D',
+                      regs->tpc);
+-              printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++              printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+               panic("Irrecoverable Cheetah+ parity error.");
+       }
+@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+              smp_processor_id(),
+              (type & 0x1) ? 'I' : 'D',
+              regs->tpc);
+-      printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++      printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+ struct sun4v_error_entry {
+@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
+ /*0x38*/u64           reserved_5;
+ };
+-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+ static const char *sun4v_err_type_to_str(u8 type)
+ {
+@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
+ }
+ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+-                          int cpu, const char *pfx, atomic_t *ocnt)
++                          int cpu, const char *pfx, atomic_unchecked_t *ocnt)
+ {
+       u64 *raw_ptr = (u64 *) ent;
+       u32 attrs;
+@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+       show_regs(regs);
+-      if ((cnt = atomic_read(ocnt)) != 0) {
+-              atomic_set(ocnt, 0);
++      if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
++              atomic_set_unchecked(ocnt, 0);
+               wmb();
+               printk("%s: Queue overflowed %d times.\n",
+                      pfx, cnt);
+@@ -2048,7 +2059,7 @@ out:
+  */
+ void sun4v_resum_overflow(struct pt_regs *regs)
+ {
+-      atomic_inc(&sun4v_resum_oflow_cnt);
++      atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
+ }
+ /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
+@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+       /* XXX Actually even this can make not that much sense.  Perhaps
+        * XXX we should just pull the plug and panic directly from here?
+        */
+-      atomic_inc(&sun4v_nonresum_oflow_cnt);
++      atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
+ }
+ static void sun4v_tlb_error(struct pt_regs *regs)
+@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+       printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+-      printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++      printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+-      printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++      printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+              (void *) regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+       printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+-      printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++      printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+-      printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++      printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+              (void *) regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+                       fp = (unsigned long)sf->fp + STACK_BIAS;
+               }
+-              printk(" [%016lx] %pS\n", pc, (void *) pc);
++              printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+               if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+                       int index = tsk->curr_ret_stack;
+                       if (tsk->ret_stack && index >= graph) {
+                               pc = tsk->ret_stack[index - graph].ret;
+-                              printk(" [%016lx] %pS\n", pc, (void *) pc);
++                              printk(" [%016lx] %pA\n", pc, (void *) pc);
+                               graph++;
+                       }
+               }
+@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+       return (struct reg_window *) (fp + STACK_BIAS);
+ }
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+       static int die_counter;
+@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+               while (rw &&
+                      count++ < 30 &&
+                      kstack_valid(tp, (unsigned long) rw)) {
+-                      printk("Caller[%016lx]: %pS\n", rw->ins[7],
++                      printk("Caller[%016lx]: %pA\n", rw->ins[7],
+                              (void *) rw->ins[7]);
+                       rw = kernel_stack_up(rw);
+@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+       }
+       if (panic_on_oops)
+               panic("Fatal exception");
+-      if (regs->tstate & TSTATE_PRIV)
++      if (regs->tstate & TSTATE_PRIV) {
++              gr_handle_kernel_exploit();
+               do_exit(SIGKILL);
++      }
+       do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 9aacb91..6415c82 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
+       static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+       if (__ratelimit(&ratelimit)) {
+-              printk("Kernel unaligned access at TPC[%lx] %pS\n",
++              printk("Kernel unaligned access at TPC[%lx] %pA\n",
+                      regs->tpc, (void *) regs->tpc);
+       }
+ }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index 3269b02..64f5231 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+ lib-$(CONFIG_SPARC32) += ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index a5c5a02..b62dbfec 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -16,11 +16,22 @@
+        * barriers.
+        */
+-#define ATOMIC_OP(op)                                                 \
+-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */            \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##cc
++#define __OVERFLOW_IOP tvs    %icc, 6;
++#define __OVERFLOW_XOP tvs    %xcc, 6;
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_IOP
++#define __OVERFLOW_XOP
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, post_op)                      \
++ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */    \
+       BACKOFF_SETUP(%o2);                                             \
+ 1:    lduw    [%o1], %g1;                                             \
+-      op      %g1, %o0, %g7;                                          \
++      asm_op  %g1, %o0, %g7;                                          \
++      post_op                                                         \
+       cas     [%o1], %g1, %g7;                                        \
+       cmp     %g1, %g7;                                               \
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b);                            \
+@@ -30,11 +41,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */         \
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b);                                     \
+ ENDPROC(atomic_##op);                                                 \
+-#define ATOMIC_OP_RETURN(op)                                          \
+-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */   \
++#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
++                    __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op)                       \
++ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+       BACKOFF_SETUP(%o2);                                             \
+ 1:    lduw    [%o1], %g1;                                             \
+-      op      %g1, %o0, %g7;                                          \
++      asm_op  %g1, %o0, %g7;                                          \
++      post_op                                                         \
+       cas     [%o1], %g1, %g7;                                        \
+       cmp     %g1, %g7;                                               \
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b);                            \
+@@ -44,6 +59,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */  \
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b);                                     \
+ ENDPROC(atomic_##op##_return);
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
++                           __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
++
+ #define ATOMIC_FETCH_OP(op)                                           \
+ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */      \
+       BACKOFF_SETUP(%o2);                                             \
+@@ -73,13 +91,16 @@ ATOMIC_OPS(xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+-#define ATOMIC64_OP(op)                                                       \
+-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */          \
++#define __ATOMIC64_OP(op, suffix, asm_op, post_op)                    \
++ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */  \
+       BACKOFF_SETUP(%o2);                                             \
+ 1:    ldx     [%o1], %g1;                                             \
+-      op      %g1, %o0, %g7;                                          \
++      asm_op  %g1, %o0, %g7;                                          \
++      post_op                                                         \
+       casx    [%o1], %g1, %g7;                                        \
+       cmp     %g1, %g7;                                               \
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b);                            \
+@@ -89,11 +110,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */              \
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b);                                     \
+ ENDPROC(atomic64_##op);                                                       \
+-#define ATOMIC64_OP_RETURN(op)                                                \
+-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
++                      __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op)             \
++ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+       BACKOFF_SETUP(%o2);                                             \
+ 1:    ldx     [%o1], %g1;                                             \
+-      op      %g1, %o0, %g7;                                          \
++      asm_op  %g1, %o0, %g7;                                          \
++      post_op                                                         \
+       casx    [%o1], %g1, %g7;                                        \
+       cmp     %g1, %g7;                                               \
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b);                            \
+@@ -103,6 +128,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */      \
+ 2:    BACKOFF_SPIN(%o2, %o3, 1b);                                     \
+ ENDPROC(atomic64_##op##_return);
++#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
++                             __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
++
+ #define ATOMIC64_FETCH_OP(op)                                         \
+ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */    \
+       BACKOFF_SETUP(%o2);                                             \
+@@ -132,7 +160,12 @@ ATOMIC64_OPS(xor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_XOP
++#undef __OVERFLOW_IOP
++#undef __REFCOUNT_OP
+ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index de5e978..cf48854 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
+ /* Atomic counter implementation. */
+ #define ATOMIC_OP(op)                                                 \
+ EXPORT_SYMBOL(atomic_##op);                                           \
+-EXPORT_SYMBOL(atomic64_##op);
++EXPORT_SYMBOL(atomic_##op##_unchecked);                                       \
++EXPORT_SYMBOL(atomic64_##op);                                         \
++EXPORT_SYMBOL(atomic64_##op##_unchecked);
+ #define ATOMIC_OP_RETURN(op)                                          \
+ EXPORT_SYMBOL(atomic_##op##_return);                                  \
+@@ -114,6 +116,8 @@ EXPORT_SYMBOL(atomic64_fetch_##op);
+ #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
+ ATOMIC_OPS(add)
++EXPORT_SYMBOL(atomic_add_return_unchecked);
++EXPORT_SYMBOL(atomic64_add_return_unchecked);
+ ATOMIC_OPS(sub)
+ #undef ATOMIC_OPS
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 30c3ecc..736f015 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+ obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
+ obj-y                   += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 4714061..bad7f9a 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -22,6 +22,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+       return safe_compute_effective_address(regs, insn);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++      vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      unsigned int *kaddr;
++
++      vmf->page = alloc_page(GFP_HIGHUSER);
++      if (!vmf->page)
++              return VM_FAULT_OOM;
++
++      kaddr = kmap(vmf->page);
++      memset(kaddr, 0, PAGE_SIZE);
++      kaddr[0] = 0x9DE3BFA8U; /* save */
++      flush_dcache_page(vmf->page);
++      kunmap(vmf->page);
++      return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++      .close = pax_emuplt_close,
++      .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++      int ret;
++
++      INIT_LIST_HEAD(&vma->anon_vma_chain);
++      vma->vm_mm = current->mm;
++      vma->vm_start = addr;
++      vma->vm_end = addr + PAGE_SIZE;
++      vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++      vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++      vma->vm_ops = &pax_vm_ops;
++
++      ret = insert_vm_struct(current->mm, vma);
++      if (ret)
++              return ret;
++
++      ++current->mm->total_vm;
++      return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when patched PLT trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: patched PLT emulation #1 */
++              unsigned int sethi1, sethi2, jmpl;
++
++              err = get_user(sethi1, (unsigned int *)regs->pc);
++              err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++              err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x03000000U &&
++                  (jmpl & 0xFFFFE000U) == 0x81C06000U)
++              {
++                      unsigned int addr;
++
++                      regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++                      addr = regs->u_regs[UREG_G1];
++                      addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++                      regs->pc = addr;
++                      regs->npc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #2 */
++              unsigned int ba;
++
++              err = get_user(ba, (unsigned int *)regs->pc);
++
++              if (err)
++                      break;
++
++              if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++                      unsigned int addr;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++                      else
++                              addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++                      regs->pc = addr;
++                      regs->npc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #3 */
++              unsigned int sethi, bajmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->pc);
++              err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
++              err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned int addr;
++
++                      addr = (sethi & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] = addr;
++                      if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++                              addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++                      else
++                              addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++                      regs->pc = addr;
++                      regs->npc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation step 1 */
++              unsigned int sethi, ba, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->pc);
++              err |= get_user(ba, (unsigned int *)(regs->pc+4));
++              err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned int addr, save, call;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++                      else
++                              addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++                      err = get_user(save, (unsigned int *)addr);
++                      err |= get_user(call, (unsigned int *)(addr+4));
++                      err |= get_user(nop, (unsigned int *)(addr+8));
++                      if (err)
++                              break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++                      if (save == 0x9DE3BFA8U &&
++                          (call & 0xC0000000U) == 0x40000000U &&
++                          nop == 0x01000000U)
++                      {
++                              struct vm_area_struct *vma;
++                              unsigned long call_dl_resolve;
++
++                              down_read(&current->mm->mmap_sem);
++                              call_dl_resolve = current->mm->call_dl_resolve;
++                              up_read(&current->mm->mmap_sem);
++                              if (likely(call_dl_resolve))
++                                      goto emulate;
++
++                              vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++                              down_write(&current->mm->mmap_sem);
++                              if (current->mm->call_dl_resolve) {
++                                      call_dl_resolve = current->mm->call_dl_resolve;
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      goto emulate;
++                              }
++
++                              call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++                              if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              if (pax_insert_vma(vma, call_dl_resolve)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              current->mm->call_dl_resolve = call_dl_resolve;
++                              up_write(&current->mm->mmap_sem);
++
++emulate:
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->pc = call_dl_resolve;
++                              regs->npc = addr+4;
++                              return 3;
++                      }
++#endif
++
++                      /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++                      if ((save & 0xFFC00000U) == 0x05000000U &&
++                          (call & 0xFFFFE000U) == 0x85C0A000U &&
++                          nop == 0x01000000U)
++                      {
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->u_regs[UREG_G2] = addr + 4;
++                              addr = (save & 0x003FFFFFU) << 10;
++                              addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++                              regs->pc = addr;
++                              regs->npc = addr+4;
++                              return 3;
++                      }
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation step 2 */
++              unsigned int save, call, nop;
++
++              err = get_user(save, (unsigned int *)(regs->pc-4));
++              err |= get_user(call, (unsigned int *)regs->pc);
++              err |= get_user(nop, (unsigned int *)(regs->pc+4));
++              if (err)
++                      break;
++
++              if (save == 0x9DE3BFA8U &&
++                  (call & 0xC0000000U) == 0x40000000U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++                      regs->u_regs[UREG_RETPC] = regs->pc;
++                      regs->pc = dl_resolve;
++                      regs->npc = dl_resolve+4;
++                      return 3;
++              }
++      } while (0);
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 8; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+                                     int text_fault)
+ {
+@@ -226,6 +500,24 @@ good_area:
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto bad_area;
+       } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++              if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++                      up_read(&mm->mmap_sem);
++                      switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++                      case 2:
++                      case 3:
++                              return;
++#endif
++
++                      }
++                      pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               /* Allow reads even for write-only mappings */
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 3f291d8..b335338 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -23,6 +23,9 @@
+ #include <linux/percpu.h>
+ #include <linux/context_tracking.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+       printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+              regs->tpc);
+       printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+-      printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++      printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+       printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+       dump_stack();
+       unhandled_fault(regs->tpc, current, regs);
+@@ -276,6 +279,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
+       show_regs(regs);
+ }
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++      vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      unsigned int *kaddr;
++
++      vmf->page = alloc_page(GFP_HIGHUSER);
++      if (!vmf->page)
++              return VM_FAULT_OOM;
++
++      kaddr = kmap(vmf->page);
++      memset(kaddr, 0, PAGE_SIZE);
++      kaddr[0] = 0x9DE3BFA8U; /* save */
++      flush_dcache_page(vmf->page);
++      kunmap(vmf->page);
++      return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++      .close = pax_emuplt_close,
++      .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++      int ret;
++
++      INIT_LIST_HEAD(&vma->anon_vma_chain);
++      vma->vm_mm = current->mm;
++      vma->vm_start = addr;
++      vma->vm_end = addr + PAGE_SIZE;
++      vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++      vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++      vma->vm_ops = &pax_vm_ops;
++
++      ret = insert_vm_struct(current->mm, vma);
++      if (ret)
++              return ret;
++
++      ++current->mm->total_vm;
++      return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when patched PLT trampoline was detected
++ *         3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++      int err;
++
++      do { /* PaX: patched PLT emulation #1 */
++              unsigned int sethi1, sethi2, jmpl;
++
++              err = get_user(sethi1, (unsigned int *)regs->tpc);
++              err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++              err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x03000000U &&
++                  (jmpl & 0xFFFFE000U) == 0x81C06000U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++                      addr = regs->u_regs[UREG_G1];
++                      addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #2 */
++              unsigned int ba;
++
++              err = get_user(ba, (unsigned int *)regs->tpc);
++
++              if (err)
++                      break;
++
++              if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++                      unsigned long addr;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++                      else
++                              addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #3 */
++              unsigned int sethi, bajmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      addr = (sethi & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] = addr;
++                      if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++                              addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++                      else
++                              addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #4 */
++              unsigned int sethi, mov1, call, mov2;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++              err |= get_user(call, (unsigned int *)(regs->tpc+8));
++              err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  mov1 == 0x8210000FU &&
++                  (call & 0xC0000000U) == 0x40000000U &&
++                  mov2 == 0x9E100001U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++                      addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #5 */
++              unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++              err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++              err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++              err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++              err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++              err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  (sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++                  (or1 & 0xFFFFE000U) == 0x82106000U &&
++                  (or2 & 0xFFFFE000U) == 0x8A116000U &&
++                  sllx == 0x83287020U &&
++                  jmpl == 0x81C04005U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++                      regs->u_regs[UREG_G1] <<= 32;
++                      regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++                      addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: patched PLT emulation #6 */
++              unsigned int sethi, sethi1, sethi2, sllx, or,  jmpl, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++              err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++              err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++              err |= get_user(or, (unsigned int *)(regs->tpc+16));
++              err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  (sethi1 & 0xFFC00000U) == 0x03000000U &&
++                  (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++                  sllx == 0x83287020U &&
++                  (or & 0xFFFFE000U) == 0x8A116000U &&
++                  jmpl == 0x81C04005U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] <<= 32;
++                      regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++                      addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: unpatched PLT emulation step 1 */
++              unsigned int sethi, ba, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++                      unsigned int save, call;
++                      unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++                      if ((ba & 0xFFC00000U) == 0x30800000U)
++                              addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++                      else
++                              addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      err = get_user(save, (unsigned int *)addr);
++                      err |= get_user(call, (unsigned int *)(addr+4));
++                      err |= get_user(nop, (unsigned int *)(addr+8));
++                      if (err)
++                              break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++                      if (save == 0x9DE3BFA8U &&
++                          (call & 0xC0000000U) == 0x40000000U &&
++                          nop == 0x01000000U)
++                      {
++                              struct vm_area_struct *vma;
++                              unsigned long call_dl_resolve;
++
++                              down_read(&current->mm->mmap_sem);
++                              call_dl_resolve = current->mm->call_dl_resolve;
++                              up_read(&current->mm->mmap_sem);
++                              if (likely(call_dl_resolve))
++                                      goto emulate;
++
++                              vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++                              down_write(&current->mm->mmap_sem);
++                              if (current->mm->call_dl_resolve) {
++                                      call_dl_resolve = current->mm->call_dl_resolve;
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      goto emulate;
++                              }
++
++                              call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++                              if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      if (vma)
++                                              kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              if (pax_insert_vma(vma, call_dl_resolve)) {
++                                      up_write(&current->mm->mmap_sem);
++                                      kmem_cache_free(vm_area_cachep, vma);
++                                      return 1;
++                              }
++
++                              current->mm->call_dl_resolve = call_dl_resolve;
++                              up_write(&current->mm->mmap_sem);
++
++emulate:
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->tpc = call_dl_resolve;
++                              regs->tnpc = addr+4;
++                              return 3;
++                      }
++#endif
++
++                      /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++                      if ((save & 0xFFC00000U) == 0x05000000U &&
++                          (call & 0xFFFFE000U) == 0x85C0A000U &&
++                          nop == 0x01000000U)
++                      {
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->u_regs[UREG_G2] = addr + 4;
++                              addr = (save & 0x003FFFFFU) << 10;
++                              addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++                              if (test_thread_flag(TIF_32BIT))
++                                      addr &= 0xFFFFFFFFUL;
++
++                              regs->tpc = addr;
++                              regs->tnpc = addr+4;
++                              return 3;
++                      }
++
++                      /* PaX: 64-bit PLT stub */
++                      err = get_user(sethi1, (unsigned int *)addr);
++                      err |= get_user(sethi2, (unsigned int *)(addr+4));
++                      err |= get_user(or1, (unsigned int *)(addr+8));
++                      err |= get_user(or2, (unsigned int *)(addr+12));
++                      err |= get_user(sllx, (unsigned int *)(addr+16));
++                      err |= get_user(add, (unsigned int *)(addr+20));
++                      err |= get_user(jmpl, (unsigned int *)(addr+24));
++                      err |= get_user(nop, (unsigned int *)(addr+28));
++                      if (err)
++                              break;
++
++                      if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++                          (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++                          (or1 & 0xFFFFE000U) == 0x88112000U &&
++                          (or2 & 0xFFFFE000U) == 0x8A116000U &&
++                          sllx == 0x89293020U &&
++                          add == 0x8A010005U &&
++                          jmpl == 0x89C14000U &&
++                          nop == 0x01000000U)
++                      {
++                              regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++                              regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++                              regs->u_regs[UREG_G4] <<= 32;
++                              regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++                              regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++                              regs->u_regs[UREG_G4] = addr + 24;
++                              addr = regs->u_regs[UREG_G5];
++                              regs->tpc = addr;
++                              regs->tnpc = addr+4;
++                              return 3;
++                      }
++              }
++      } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++      do { /* PaX: unpatched PLT emulation step 2 */
++              unsigned int save, call, nop;
++
++              err = get_user(save, (unsigned int *)(regs->tpc-4));
++              err |= get_user(call, (unsigned int *)regs->tpc);
++              err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++              if (err)
++                      break;
++
++              if (save == 0x9DE3BFA8U &&
++                  (call & 0xC0000000U) == 0x40000000U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              dl_resolve &= 0xFFFFFFFFUL;
++
++                      regs->u_regs[UREG_RETPC] = regs->tpc;
++                      regs->tpc = dl_resolve;
++                      regs->tnpc = dl_resolve+4;
++                      return 3;
++              }
++      } while (0);
++#endif
++
++      do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++              unsigned int sethi, ba, nop;
++
++              err = get_user(sethi, (unsigned int *)regs->tpc);
++              err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++              err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++              if (err)
++                      break;
++
++              if ((sethi & 0xFFC00000U) == 0x03000000U &&
++                  (ba & 0xFFF00000U) == 0x30600000U &&
++                  nop == 0x01000000U)
++              {
++                      unsigned long addr;
++
++                      addr = (sethi & 0x003FFFFFU) << 10;
++                      regs->u_regs[UREG_G1] = addr;
++                      addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++                      if (test_thread_flag(TIF_32BIT))
++                              addr &= 0xFFFFFFFFUL;
++
++                      regs->tpc = addr;
++                      regs->tnpc = addr+4;
++                      return 2;
++              }
++      } while (0);
++
++#endif
++
++      return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      unsigned long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 8; i++) {
++              unsigned int c;
++              if (get_user(c, (unsigned int *)pc+i))
++                      printk(KERN_CONT "???????? ");
++              else
++                      printk(KERN_CONT "%08x ", c);
++      }
++      printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+       enum ctx_state prev_state = exception_enter();
+@@ -350,6 +813,29 @@ retry:
+       if (!vma)
+               goto bad_area;
++#ifdef CONFIG_PAX_PAGEEXEC
++      /* PaX: detect ITLB misses on non-exec pages */
++      if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++          !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++      {
++              if (address != regs->tpc)
++                      goto good_area;
++
++              up_read(&mm->mmap_sem);
++              switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++              case 2:
++              case 3:
++                      return;
++#endif
++
++              }
++              pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++              do_group_exit(SIGKILL);
++      }
++#endif
++
+       /* Pure DTLB misses do not tell us whether the fault causing
+        * load/store/atomic was a write or not, it only says that there
+        * was no match.  So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 988acc8b..f26345c 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -26,8 +26,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+                                                       unsigned long addr,
+                                                       unsigned long len,
+                                                       unsigned long pgoff,
+-                                                      unsigned long flags)
++                                                      unsigned long flags,
++                                                      unsigned long offset)
+ {
++      struct mm_struct *mm = current->mm;
+       unsigned long task_size = TASK_SIZE;
+       struct vm_unmapped_area_info info;
+@@ -36,15 +38,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+       info.flags = 0;
+       info.length = len;
+-      info.low_limit = TASK_UNMAPPED_BASE;
++      info.low_limit = mm->mmap_base;
+       info.high_limit = min(task_size, VA_EXCLUDE_START);
+       info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+               VM_BUG_ON(addr != -ENOMEM);
+               info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = task_size;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -53,10 +62,11 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ }
+ static unsigned long
+-hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+-                                const unsigned long len,
+-                                const unsigned long pgoff,
+-                                const unsigned long flags)
++hugetlb_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++                                unsigned long len,
++                                unsigned long pgoff,
++                                unsigned long flags,
++                                unsigned long offset)
+ {
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+@@ -71,6 +81,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.high_limit = mm->mmap_base;
+       info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -83,6 +94,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = STACK_TOP32;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -97,6 +114,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long task_size = TASK_SIZE;
++      unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+       if (test_thread_flag(TIF_32BIT))
+               task_size = STACK_TOP32;
+@@ -112,19 +130,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = ALIGN(addr, HPAGE_SIZE);
+               vma = find_vma(mm, addr);
+-              if (task_size - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       if (mm->get_unmapped_area == arch_get_unmapped_area)
+               return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+       else
+               return hugetlb_get_unmapped_area_topdown(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+ }
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 7ac6b62..58e934c 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+ int num_kernel_image_mappings;
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-atomic_t dcpage_flushes = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
+ #ifdef CONFIG_SMP
+-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+ #endif
+ #endif
+@@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page)
+ {
+       BUG_ON(tlb_type == hypervisor);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-      atomic_inc(&dcpage_flushes);
++      atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+ #ifdef DCACHE_ALIASING_POSSIBLE
+@@ -462,10 +462,10 @@ void mmu_info(struct seq_file *m)
+ #ifdef CONFIG_DEBUG_DCFLUSH
+       seq_printf(m, "DCPageFlushes\t: %d\n",
+-                 atomic_read(&dcpage_flushes));
++                 atomic_read_unchecked(&dcpage_flushes));
+ #ifdef CONFIG_SMP
+       seq_printf(m, "DCPageFlushesXC\t: %d\n",
+-                 atomic_read(&dcpage_flushes_xcall));
++                 atomic_read_unchecked(&dcpage_flushes_xcall));
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_DEBUG_DCFLUSH */
+ }
+diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
+index 78da75b..264302d 100644
+--- a/arch/tile/Kconfig
++++ b/arch/tile/Kconfig
+@@ -193,6 +193,7 @@ source "kernel/Kconfig.hz"
+ config KEXEC
+       bool "kexec system call"
+       select KEXEC_CORE
++      depends on !GRKERNSEC_KMEM
+       ---help---
+         kexec is a system call that implements the ability to shutdown your
+         current kernel, and to start another kernel.  It is like a reboot
+diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
+index 4cefa0c..98d8b83 100644
+--- a/arch/tile/include/asm/atomic_64.h
++++ b/arch/tile/include/asm/atomic_64.h
+@@ -195,6 +195,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1, 0)
++#define atomic64_read_unchecked(v)            atomic64_read(v)
++#define atomic64_set_unchecked(v, i)          atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v)          atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v)   atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v)          atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v)             atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v)      atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v)             atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n)   atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* _ASM_TILE_ATOMIC_64_H */
+diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
+index 6160761..00cac88 100644
+--- a/arch/tile/include/asm/cache.h
++++ b/arch/tile/include/asm/cache.h
+@@ -15,11 +15,12 @@
+ #ifndef _ASM_TILE_CACHE_H
+ #define _ASM_TILE_CACHE_H
++#include <linux/const.h>
+ #include <arch/chip.h>
+ /* bytes per L1 data cache line */
+ #define L1_CACHE_SHIFT                CHIP_L1D_LOG_LINE_SIZE()
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ /* bytes per L2 cache line */
+ #define L2_CACHE_SHIFT                CHIP_L2_LOG_LINE_SIZE()
+diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
+index a77369e..7ba6ecd 100644
+--- a/arch/tile/include/asm/uaccess.h
++++ b/arch/tile/include/asm/uaccess.h
+@@ -428,9 +428,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
+                                         const void __user *from,
+                                         unsigned long n)
+ {
+-      int sz = __compiletime_object_size(to);
++      size_t sz = __compiletime_object_size(to);
+-      if (likely(sz == -1 || sz >= n))
++      if (likely(sz == (size_t)-1 || sz >= n))
+               n = _copy_from_user(to, from, n);
+       else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index 77ceaa3..3630dea 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -174,6 +174,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = 0;
+       return vm_unmapped_area(&info);
+ }
+@@ -191,6 +192,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+       info.high_limit = current->mm->mmap_base;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = 0;
+       addr = vm_unmapped_area(&info);
+       /*
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 0ca46ede..8d7fd38 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -73,6 +73,8 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
+               -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
+               -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
++USER_CFLAGS   := $(filter-out $(GCC_PLUGINS_CFLAGS),$(USER_CFLAGS))
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(ARCH_DIR)/Makefile-os-$(OS)
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
++#include <linux/const.h>
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT               (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT               5
+ #endif
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ #endif
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 2e0a6b1..a64d0f5 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -8,6 +8,6 @@
+ /* No more #include "asm/arch/kmap_types.h" ! */
+-#define KM_TYPE_NR 14
++#define KM_TYPE_NR 15
+ #endif
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index f878bec..ca09300 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE     (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK     (~(PAGE_SIZE-1))
++#define ktla_ktva(addr)                       (addr)
++#define ktva_ktla(addr)                       (addr)
++
+ #ifndef __ASSEMBLY__
+ struct page;
+diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
+index bae8523..ba9484b 100644
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -58,6 +58,7 @@
+ #define pud_present(x)        (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+       set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 034b42c7..5c186ce 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -343,22 +343,6 @@ int singlestepping(void * t)
+       return 2;
+ }
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/exec.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() % 8192;
+-      return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+       unsigned long stack_page, sp, ip;
+diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
+index ad8f795..2c7eec6 100644
+--- a/arch/unicore32/include/asm/cache.h
++++ b/arch/unicore32/include/asm/cache.h
+@@ -12,8 +12,10 @@
+ #ifndef __UNICORE_CACHE_H__
+ #define __UNICORE_CACHE_H__
+-#define L1_CACHE_SHIFT                (5)
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#include <linux/const.h>
++
++#define L1_CACHE_SHIFT                5
++#define L1_CACHE_BYTES                (_AC(1,UL) << L1_CACHE_SHIFT)
+ /*
+  * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2a1f0ce..ca2cc51 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -39,14 +39,13 @@ config X86
+       select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_SUPPORTS_ATOMIC_RMW
+       select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+-      select ARCH_SUPPORTS_INT128             if X86_64
++      select ARCH_SUPPORTS_INT128             if X86_64 && !PAX_SIZE_OVERFLOW_EXTRA && !PAX_SIZE_OVERFLOW
+       select ARCH_SUPPORTS_NUMA_BALANCING     if X86_64
+       select ARCH_USE_BUILTIN_BSWAP
+       select ARCH_USE_CMPXCHG_LOCKREF         if X86_64
+       select ARCH_USE_QUEUED_RWLOCKS
+       select ARCH_USE_QUEUED_SPINLOCKS
+       select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
+-      select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+       select ARCH_WANT_FRAME_POINTERS
+       select ARCH_WANT_IPC_PARSE_VERSION      if X86_32
+       select BUILDTIME_EXTABLE_SORT
+@@ -93,7 +92,7 @@ config X86
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_WITHIN_STACK_FRAMES
+       select HAVE_EBPF_JIT                    if X86_64
+-      select HAVE_CC_STACKPROTECTOR
++      select HAVE_CC_STACKPROTECTOR           if X86_64 || !PAX_MEMORY_UDEREF
+       select HAVE_CMPXCHG_DOUBLE
+       select HAVE_CMPXCHG_LOCAL
+       select HAVE_CONTEXT_TRACKING            if X86_64
+@@ -136,6 +135,7 @@ config X86
+       select HAVE_NMI
+       select HAVE_OPROFILE
+       select HAVE_OPTPROBES
++      select HAVE_PAX_INITIFY_INIT_EXIT if GCC_PLUGINS
+       select HAVE_PCSPKR_PLATFORM
+       select HAVE_PERF_EVENTS
+       select HAVE_PERF_EVENTS_NMI
+@@ -189,11 +189,13 @@ config MMU
+       def_bool y
+ config ARCH_MMAP_RND_BITS_MIN
+-      default 28 if 64BIT
++      default 28 if 64BIT && !PAX_PER_CPU_PGD
++      default 27 if 64BIT && PAX_PER_CPU_PGD
+       default 8
+ config ARCH_MMAP_RND_BITS_MAX
+-      default 32 if 64BIT
++      default 32 if 64BIT && !PAX_PER_CPU_PGD
++      default 27 if 64BIT && PAX_PER_CPU_PGD
+       default 16
+ config ARCH_MMAP_RND_COMPAT_BITS_MIN
+@@ -295,7 +297,7 @@ config X86_64_SMP
+ config X86_32_LAZY_GS
+       def_bool y
+-      depends on X86_32 && !CC_STACKPROTECTOR
++      depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+ config ARCH_SUPPORTS_UPROBES
+       def_bool y
+@@ -677,6 +679,7 @@ config SCHED_OMIT_FRAME_POINTER
+ menuconfig HYPERVISOR_GUEST
+       bool "Linux guest support"
++      depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
+       ---help---
+         Say Y here to enable options for running Linux under various hyper-
+         visors. This option enables basic hypervisor detection and platform
+@@ -1078,6 +1081,7 @@ config VM86
+ config X86_16BIT
+       bool "Enable support for 16-bit segments" if EXPERT
++      depends on !GRKERNSEC
+       default y
+       depends on MODIFY_LDT_SYSCALL
+       ---help---
+@@ -1232,6 +1236,7 @@ choice
+ config NOHIGHMEM
+       bool "off"
++      depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+       ---help---
+         Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+         However, the address space of 32-bit x86 processors is only 4
+@@ -1268,6 +1273,7 @@ config NOHIGHMEM
+ config HIGHMEM4G
+       bool "4GB"
++      depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+       ---help---
+         Select this if you have a 32-bit processor and between 1 and 4
+         gigabytes of physical RAM.
+@@ -1320,7 +1326,7 @@ config PAGE_OFFSET
+       hex
+       default 0xB0000000 if VMSPLIT_3G_OPT
+       default 0x80000000 if VMSPLIT_2G
+-      default 0x78000000 if VMSPLIT_2G_OPT
++      default 0x70000000 if VMSPLIT_2G_OPT
+       default 0x40000000 if VMSPLIT_1G
+       default 0xC0000000
+       depends on X86_32
+@@ -1341,7 +1347,6 @@ config X86_PAE
+ config ARCH_PHYS_ADDR_T_64BIT
+       def_bool y
+-      depends on X86_64 || X86_PAE
+ config ARCH_DMA_ADDR_T_64BIT
+       def_bool y
+@@ -1472,7 +1477,7 @@ config ARCH_PROC_KCORE_TEXT
+ config ILLEGAL_POINTER_VALUE
+        hex
+-       default 0 if X86_32
++       default 0xfffff000 if X86_32
+        default 0xdead000000000000 if X86_64
+ source "mm/Kconfig"
+@@ -1795,6 +1800,7 @@ source kernel/Kconfig.hz
+ config KEXEC
+       bool "kexec system call"
+       select KEXEC_CORE
++      depends on !GRKERNSEC_KMEM
+       ---help---
+         kexec is a system call that implements the ability to shutdown your
+         current kernel, and to start another kernel.  It is like a reboot
+@@ -1922,7 +1928,7 @@ config RELOCATABLE
+ config RANDOMIZE_BASE
+       bool "Randomize the address of the kernel image (KASLR)"
+-      depends on RELOCATABLE
++      depends on RELOCATABLE && BROKEN_SECURITY
+       default n
+       ---help---
+         In support of Kernel Address Space Layout Randomization (KASLR),
+@@ -1966,7 +1972,9 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+       hex "Alignment value to which kernel should be aligned"
+-      default "0x200000"
++      default "0x1000000"
++      range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++      range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
+       range 0x2000 0x1000000 if X86_32
+       range 0x200000 0x1000000 if X86_64
+       ---help---
+@@ -2081,6 +2089,7 @@ config COMPAT_VDSO
+       def_bool n
+       prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
+       depends on X86_32 || IA32_EMULATION
++      depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+       ---help---
+         Certain buggy versions of glibc will crash if they are
+         presented with a 32-bit vDSO that is not mapped at the address
+@@ -2121,15 +2130,6 @@ choice
+         If unsure, select "Emulate".
+-      config LEGACY_VSYSCALL_NATIVE
+-              bool "Native"
+-              help
+-                Actual executable code is located in the fixed vsyscall
+-                address mapping, implementing time() efficiently. Since
+-                this makes the mapping executable, it can be used during
+-                security vulnerability exploitation (traditionally as
+-                ROP gadgets). This configuration is not recommended.
+-
+       config LEGACY_VSYSCALL_EMULATE
+               bool "Emulate"
+               help
+@@ -2210,6 +2210,22 @@ config MODIFY_LDT_SYSCALL
+         Saying 'N' here may make sense for embedded or server kernels.
++config DEFAULT_MODIFY_LDT_SYSCALL
++      bool "Allow userspace to modify the LDT by default"
++      default y
++
++      ---help---
++        Modifying the LDT (Local Descriptor Table) may be needed to run a
++        16-bit or segmented code such as Dosemu or Wine. This is done via
++        a system call which is not needed to run portable applications,
++        and which can sometimes be abused to exploit some weaknesses of
++        the architecture, opening new vulnerabilities.
++
++        For this reason this option allows one to enable or disable the
++        feature at runtime. It is recommended to say 'N' here to leave
++        the system protected, and to enable it at runtime only if needed
++        by setting the sys.kernel.modify_ldt sysctl.
++
+ source "kernel/livepatch/Kconfig"
+ endmenu
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 3ba5ff2..44bdacc 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -329,7 +329,7 @@ config X86_PPRO_FENCE
+ config X86_F00F_BUG
+       def_bool y
+-      depends on M586MMX || M586TSC || M586 || M486
++      depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
+ config X86_INVD_BUG
+       def_bool y
+@@ -337,7 +337,7 @@ config X86_INVD_BUG
+ config X86_ALIGNMENT_16
+       def_bool y
+-      depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++      depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+ config X86_INTEL_USERCOPY
+       def_bool y
+@@ -379,7 +379,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+       def_bool y
+-      depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++      depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+ config X86_MINIMUM_CPU_FAMILY
+       int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index 67eec55..1a5c1ab 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -55,6 +55,7 @@ config X86_PTDUMP
+       tristate "Export kernel pagetable layout to userspace via debugfs"
+       depends on DEBUG_KERNEL
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       select X86_PTDUMP_CORE
+       ---help---
+         Say Y here if you want to show the kernel pagetable layout in a
+@@ -84,6 +85,7 @@ config DEBUG_RODATA_TEST
+ config DEBUG_WX
+       bool "Warn on W+X mappings at boot"
++      depends on BROKEN
+       select X86_PTDUMP_CORE
+       ---help---
+         Generate a warning if any W+X mappings are found at boot.
+@@ -111,7 +113,7 @@ config DEBUG_WX
+ config DEBUG_SET_MODULE_RONX
+       bool "Set loadable kernel module data as NX and text as RO"
+-      depends on MODULES
++      depends on MODULES && BROKEN
+       ---help---
+         This option helps catch unintended modifications to loadable
+         kernel module's text and read-only data. It also prevents execution
+@@ -353,6 +355,7 @@ config X86_DEBUG_FPU
+ config PUNIT_ATOM_DEBUG
+       tristate "ATOM Punit debug driver"
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       select IOSF_MBI
+       ---help---
+         This is a debug driver, which gets the power states
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 830ed39..56602a5 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -75,9 +75,6 @@ ifeq ($(CONFIG_X86_32),y)
+         # CPU-specific tuning. Anything which can be shared with UML should go here.
+         include arch/x86/Makefile_32.cpu
+         KBUILD_CFLAGS += $(cflags-y)
+-
+-        # temporary until string.h is fixed
+-        KBUILD_CFLAGS += -ffreestanding
+ else
+         BITS := 64
+         UTS_MACHINE := x86_64
+@@ -126,6 +123,9 @@ else
+         KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
+ endif
++# temporary until string.h is fixed
++KBUILD_CFLAGS += -ffreestanding
++
+ ifdef CONFIG_X86_X32
+       x32_ld_ok := $(call try-run,\
+                       /bin/echo -e '1: .quad 1b' | \
+@@ -191,6 +191,7 @@ archheaders:
+       $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
+ archprepare:
++      $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+ ifeq ($(CONFIG_KEXEC_FILE),y)
+       $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
+ endif
+@@ -277,3 +278,9 @@ define archhelp
+   echo  '                  FDARGS="..."  arguments for the booted kernel'
+   echo  '                  FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 0d41d68..2d6120c 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -28,7 +28,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
+       bool v;
+       const u32 *p = (const u32 *)addr;
+-      asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++      asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+       return v;
+ }
+@@ -39,7 +39,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
+ static inline void set_bit(int nr, void *addr)
+ {
+-      asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++      asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index e5612f3..e755d05 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -84,7 +84,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+       u16 seg;
+-      asm("movw %%ds,%0" : "=rm" (seg));
++      asm volatile("movw %%ds,%0" : "=rm" (seg));
+       return seg;
+ }
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 536ccfc..1295cc1f 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -35,6 +35,23 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++ifdef CONFIG_DEBUG_INFO
++ifdef CONFIG_DEBUG_INFO_SPLIT
++KBUILD_CFLAGS   += $(call cc-option, -gsplit-dwarf, -g)
++else
++KBUILD_CFLAGS += -g
++endif
++KBUILD_AFLAGS += -Wa,--gdwarf-2
++endif
++ifdef CONFIG_DEBUG_INFO_DWARF4
++KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
++endif
++
++ifdef CONFIG_DEBUG_INFO_REDUCED
++KBUILD_CFLAGS         += $(call cc-option, -femit-struct-debug-baseonly) \
++                 $(call cc-option,-fno-var-tracking)
++endif
++
+ KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE :=n
+diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
+index a53440e..c3dbf1e 100644
+--- a/arch/x86/boot/compressed/efi_stub_32.S
++++ b/arch/x86/boot/compressed/efi_stub_32.S
+@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
+        * parameter 2, ..., param n. To make things easy, we save the return
+        * address of efi_call_phys in a global variable.
+        */
+-      popl    %ecx
+-      movl    %ecx, saved_return_addr(%edx)
+-      /* get the function pointer into ECX*/
+-      popl    %ecx
+-      movl    %ecx, efi_rt_function_ptr(%edx)
++      popl    saved_return_addr(%edx)
++      popl    efi_rt_function_ptr(%edx)
+       /*
+        * 3. Call the physical function.
+        */
+-      call    *%ecx
++      call    *efi_rt_function_ptr(%edx)
+       /*
+        * 4. Balance the stack. And because EAX contain the return value,
+@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
+ 1:    popl    %edx
+       subl    $1b, %edx
+-      movl    efi_rt_function_ptr(%edx), %ecx
+-      pushl   %ecx
++      pushl   efi_rt_function_ptr(%edx)
+       /*
+        * 10. Push the saved return address onto the stack and return.
+        */
+-      movl    saved_return_addr(%edx), %ecx
+-      pushl   %ecx
+-      ret
++      jmpl    *saved_return_addr(%edx)
+ ENDPROC(efi_call_phys)
+ .previous
+diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
+index 630384a..278e788 100644
+--- a/arch/x86/boot/compressed/efi_thunk_64.S
++++ b/arch/x86/boot/compressed/efi_thunk_64.S
+@@ -189,8 +189,8 @@ efi_gdt64:
+       .long   0                       /* Filled out by user */
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+-      .quad   0x00af9a000000ffff      /* __KERNEL_CS */
+-      .quad   0x00cf92000000ffff      /* __KERNEL_DS */
++      .quad   0x00af9b000000ffff      /* __KERNEL_CS */
++      .quad   0x00cf93000000ffff      /* __KERNEL_DS */
+       .quad   0x0080890000000000      /* TS descriptor */
+       .quad   0x0000000000000000      /* TS continued */
+ efi_gdt64_end:
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 1038524..b6acc21 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -169,10 +169,10 @@ preferred_addr:
+       addl    %eax, %ebx
+       notl    %eax
+       andl    %eax, %ebx
+-      cmpl    $LOAD_PHYSICAL_ADDR, %ebx
++      cmpl    $____LOAD_PHYSICAL_ADDR, %ebx
+       jge     1f
+ #endif
+-      movl    $LOAD_PHYSICAL_ADDR, %ebx
++      movl    $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+       /* Target address to relocate to for decompression */
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 0d80a7a..ed3e0ff 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -103,10 +103,10 @@ ENTRY(startup_32)
+       addl    %eax, %ebx
+       notl    %eax
+       andl    %eax, %ebx
+-      cmpl    $LOAD_PHYSICAL_ADDR, %ebx
++      cmpl    $____LOAD_PHYSICAL_ADDR, %ebx
+       jge     1f
+ #endif
+-      movl    $LOAD_PHYSICAL_ADDR, %ebx
++      movl    $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+       /* Target address to relocate to for decompression */
+@@ -333,10 +333,10 @@ preferred_addr:
+       addq    %rax, %rbp
+       notq    %rax
+       andq    %rax, %rbp
+-      cmpq    $LOAD_PHYSICAL_ADDR, %rbp
++      cmpq    $____LOAD_PHYSICAL_ADDR, %rbp
+       jge     1f
+ #endif
+-      movq    $LOAD_PHYSICAL_ADDR, %rbp
++      movq    $____LOAD_PHYSICAL_ADDR, %rbp
+ 1:
+       /* Target address to relocate to for decompression */
+@@ -444,8 +444,8 @@ gdt:
+       .long   gdt
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+-      .quad   0x00af9a000000ffff      /* __KERNEL_CS */
+-      .quad   0x00cf92000000ffff      /* __KERNEL_DS */
++      .quad   0x00af9b000000ffff      /* __KERNEL_CS */
++      .quad   0x00cf93000000ffff      /* __KERNEL_DS */
+       .quad   0x0080890000000000      /* TS descriptor */
+       .quad   0x0000000000000000      /* TS continued */
+ gdt_end:
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index b3c5a5f0..596115e 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -176,13 +176,17 @@ static void handle_relocations(void *output, unsigned long output_len,
+       int *reloc;
+       unsigned long delta, map, ptr;
+       unsigned long min_addr = (unsigned long)output;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      unsigned long max_addr = min_addr + (VO___bss_start - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR);
++#else
+       unsigned long max_addr = min_addr + (VO___bss_start - VO__text);
++#endif
+       /*
+        * Calculate the delta between where vmlinux was linked to load
+        * and where it was actually loaded.
+        */
+-      delta = min_addr - LOAD_PHYSICAL_ADDR;
++      delta = min_addr - ____LOAD_PHYSICAL_ADDR;
+       /*
+        * The kernel contains a table of relocation addresses. Those
+@@ -199,7 +203,7 @@ static void handle_relocations(void *output, unsigned long output_len,
+        * from __START_KERNEL_map.
+        */
+       if (IS_ENABLED(CONFIG_X86_64))
+-              delta = virt_addr - LOAD_PHYSICAL_ADDR;
++              delta = virt_addr - ____LOAD_PHYSICAL_ADDR;
+       if (!delta) {
+               debug_putstr("No relocation needed... ");
+@@ -274,7 +278,7 @@ static void parse_elf(void *output)
+       Elf32_Ehdr ehdr;
+       Elf32_Phdr *phdrs, *phdr;
+ #endif
+-      void *dest;
++      void *dest, *prev;
+       int i;
+       memcpy(&ehdr, output, sizeof(ehdr));
+@@ -301,11 +305,14 @@ static void parse_elf(void *output)
+               case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+                       dest = output;
+-                      dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++                      dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+                       dest = (void *)(phdr->p_paddr);
+ #endif
+                       memmove(dest, output + phdr->p_offset, phdr->p_filesz);
++                      if (i)
++                              memset(prev, 0xff, dest - prev);
++                      prev = dest + phdr->p_filesz;
+                       break;
+               default: /* Ignore other PT_* */ break;
+               }
+@@ -337,7 +344,11 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+                                 unsigned char *output,
+                                 unsigned long output_len)
+ {
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      const unsigned long kernel_total_size = VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR;
++#else
+       const unsigned long kernel_total_size = VO__end - VO__text;
++#endif
+       unsigned long virt_addr = (unsigned long)output;
+       /* Retain x86 boot parameters pointer passed from startup_32/64. */
+@@ -395,7 +406,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+               error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+-      if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++      if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+               error("Destination address does not match LOAD_PHYSICAL_ADDR");
+       if ((unsigned long)output != virt_addr)
+               error("Destination virtual address changed when not relocatable");
+diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
+index 56589d0..f2085be 100644
+--- a/arch/x86/boot/compressed/pagetable.c
++++ b/arch/x86/boot/compressed/pagetable.c
+@@ -14,6 +14,7 @@
+  */
+ #define __pa(x)  ((unsigned long)(x))
+ #define __va(x)  ((void *)((unsigned long)(x)))
++#undef CONFIG_PAX_KERNEXEC
+ #include "misc.h"
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 4ad7d70..c703963 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -126,9 +126,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+               u32 ecx = MSR_K7_HWCR;
+               u32 eax, edx;
+-              asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++              asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+               eax &= ~(1 << 15);
+-              asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++              asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               get_cpuflags(); /* Make sure it really did something */
+               err = check_cpuflags();
+@@ -141,9 +141,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+               u32 ecx = MSR_VIA_FCR;
+               u32 eax, edx;
+-              asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++              asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+               eax |= (1<<1)|(1<<7);
+-              asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++              asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               set_bit(X86_FEATURE_CX8, cpu.flags);
+               err = check_cpuflags();
+@@ -154,12 +154,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+               u32 eax, edx;
+               u32 level = 1;
+-              asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+-              asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+-              asm("cpuid"
++              asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++              asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++              asm volatile("cpuid"
+                   : "+a" (level), "=d" (cpu.flags[0])
+                   : : "ecx", "ebx");
+-              asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++              asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               err = check_cpuflags();
+       } else if (err == 0x01 &&
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index 3dd5be3..16720a2 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -438,7 +438,7 @@ setup_data:                .quad 0                 # 64-bit physical pointer to
+                                               # single linked list of
+                                               # struct setup_data
+-pref_address:         .quad LOAD_PHYSICAL_ADDR        # preferred load addr
++pref_address:         .quad ____LOAD_PHYSICAL_ADDR    # preferred load addr
+ #
+ # Getting to provably safe in-place decompression is hard. Worst case
+@@ -543,7 +543,12 @@ pref_address:             .quad LOAD_PHYSICAL_ADDR        # preferred load addr
+ #define ZO_INIT_SIZE  (ZO__end - ZO_startup_32 + ZO_z_min_extract_offset)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define VO_INIT_SIZE  (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
++#else
+ #define VO_INIT_SIZE  (VO__end - VO__text)
++#endif
++
+ #if ZO_INIT_SIZE > VO_INIT_SIZE
+ # define INIT_SIZE ZO_INIT_SIZE
+ #else
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
+index db75d07..8e6d0af 100644
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -19,7 +19,7 @@
+ static int detect_memory_e820(void)
+ {
+-      int count = 0;
++      unsigned int count = 0;
+       struct biosregs ireg, oreg;
+       struct e820entry *desc = boot_params.e820_map;
+       static struct e820entry buf; /* static so it is zeroed */
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index ba3e100..6501b8f 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
+       boot_params.screen_info.vesapm_seg = oreg.es;
+       boot_params.screen_info.vesapm_off = oreg.di;
++      boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+ /*
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index 77780e3..86be0cb 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -100,7 +100,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+       char entry_buf[4];
+-      int i, len = 0;
++      unsigned int i, len = 0;
+       int key;
+       unsigned int v;
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 9105655..41779c1 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,6 +8,8 @@
+  * including this sentence is retained in full.
+  */
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -70,6 +72,8 @@
+       je      B192;                   \
+       leaq    32(r9),r9;
++#define ret   pax_force_retaddr; ret
++
+ #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+       movq    r1,r2;                  \
+       movq    r3,r4;                  \
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index 383a6f8..a4db591 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -32,6 +32,7 @@
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ /*
+  * The following macros are used to move an (un)aligned 16 byte value to/from
+@@ -218,7 +219,7 @@ enc:        .octa 0x2
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -228,8 +229,8 @@ enc:        .octa 0x2
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+         MOVADQ     SHUF_MASK(%rip), %xmm14
+       mov        arg7, %r10           # %r10 = AAD
+-      mov        arg8, %r12           # %r12 = aadLen
+-      mov        %r12, %r11
++      mov        arg8, %r15           # %r15 = aadLen
++      mov        %r15, %r11
+       pxor       %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+@@ -238,17 +239,17 @@ _get_AAD_loop\num_initial_blocks\operation:
+       psrldq     $4, %xmm\i
+       pxor       \TMP1, %xmm\i
+       add        $4, %r10
+-      sub        $4, %r12
++      sub        $4, %r15
+       jne        _get_AAD_loop\num_initial_blocks\operation
+       cmp        $16, %r11
+       je         _get_AAD_loop2_done\num_initial_blocks\operation
+-      mov        $16, %r12
++      mov        $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+       psrldq     $4, %xmm\i
+-      sub        $4, %r12
+-      cmp        %r11, %r12
++      sub        $4, %r15
++      cmp        %r11, %r15
+       jne        _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -453,8 +454,8 @@ _initial_blocks_done\num_initial_blocks\operation:
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+         MOVADQ     SHUF_MASK(%rip), %xmm14
+       mov        arg7, %r10           # %r10 = AAD
+-      mov        arg8, %r12           # %r12 = aadLen
+-      mov        %r12, %r11
++      mov        arg8, %r15           # %r15 = aadLen
++      mov        %r15, %r11
+       pxor       %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+       movd       (%r10), \TMP1
+@@ -462,15 +463,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+       psrldq     $4, %xmm\i
+       pxor       \TMP1, %xmm\i
+       add        $4, %r10
+-      sub        $4, %r12
++      sub        $4, %r15
+       jne        _get_AAD_loop\num_initial_blocks\operation
+       cmp        $16, %r11
+       je         _get_AAD_loop2_done\num_initial_blocks\operation
+-      mov        $16, %r12
++      mov        $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+       psrldq     $4, %xmm\i
+-      sub        $4, %r12
+-      cmp        %r11, %r12
++      sub        $4, %r15
++      cmp        %r11, %r15
+       jne        _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+       PSHUFB_XMM   %xmm14, %xmm\i # byte-reflect the AAD data
+@@ -1280,8 +1281,8 @@ _esb_loop_\@:
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ *
+ *****************************************************************************/
+-ENTRY(aesni_gcm_dec)
+-      push    %r12
++RAP_ENTRY(aesni_gcm_dec)
++      push    %r15
+       push    %r13
+       push    %r14
+       mov     %rsp, %r14
+@@ -1291,8 +1292,8 @@ ENTRY(aesni_gcm_dec)
+ */
+       sub     $VARIABLE_OFFSET, %rsp
+       and     $~63, %rsp                        # align rsp to 64 bytes
+-      mov     %arg6, %r12
+-      movdqu  (%r12), %xmm13                    # %xmm13 = HashKey
++      mov     %arg6, %r15
++      movdqu  (%r15), %xmm13                    # %xmm13 = HashKey
+         movdqa  SHUF_MASK(%rip), %xmm2
+       PSHUFB_XMM %xmm2, %xmm13
+@@ -1320,10 +1321,10 @@ ENTRY(aesni_gcm_dec)
+       movdqa %xmm13, HashKey(%rsp)           # store HashKey<<1 (mod poly)
+       mov %arg4, %r13    # save the number of bytes of plaintext/ciphertext
+       and $-16, %r13                      # %r13 = %r13 - (%r13 mod 16)
+-      mov %r13, %r12
+-      and $(3<<4), %r12
++      mov %r13, %r15
++      and $(3<<4), %r15
+       jz _initial_num_blocks_is_0_decrypt
+-      cmp $(2<<4), %r12
++      cmp $(2<<4), %r15
+       jb _initial_num_blocks_is_1_decrypt
+       je _initial_num_blocks_is_2_decrypt
+ _initial_num_blocks_is_3_decrypt:
+@@ -1373,16 +1374,16 @@ _zero_cipher_left_decrypt:
+       sub $16, %r11
+       add %r13, %r11
+       movdqu (%arg3,%r11,1), %xmm1   # receive the last <16 byte block
+-      lea SHIFT_MASK+16(%rip), %r12
+-      sub %r13, %r12
++      lea SHIFT_MASK+16(%rip), %r15
++      sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+-      movdqu (%r12), %xmm2           # get the appropriate shuffle mask
++      movdqu (%r15), %xmm2           # get the appropriate shuffle mask
+       PSHUFB_XMM %xmm2, %xmm1            # right shift 16-%r13 butes
+       movdqa  %xmm1, %xmm2
+       pxor %xmm1, %xmm0            # Ciphertext XOR E(K, Yn)
+-      movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++      movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+       # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+       pand %xmm1, %xmm0            # mask out top 16-%r13 bytes of %xmm0
+       pand    %xmm1, %xmm2
+@@ -1411,9 +1412,9 @@ _less_than_8_bytes_left_decrypt:
+       sub     $1, %r13
+       jne     _less_than_8_bytes_left_decrypt
+ _multiple_of_16_bytes_decrypt:
+-      mov     arg8, %r12                # %r13 = aadLen (number of bytes)
+-      shl     $3, %r12                  # convert into number of bits
+-      movd    %r12d, %xmm15             # len(A) in %xmm15
++      mov     arg8, %r15                # %r13 = aadLen (number of bytes)
++      shl     $3, %r15                  # convert into number of bits
++      movd    %r15d, %xmm15             # len(A) in %xmm15
+       shl     $3, %arg4                 # len(C) in bits (*128)
+       MOVQ_R64_XMM    %arg4, %xmm1
+       pslldq  $8, %xmm15                # %xmm15 = len(A)||0x0000000000000000
+@@ -1452,7 +1453,8 @@ _return_T_done_decrypt:
+       mov     %r14, %rsp
+       pop     %r14
+       pop     %r13
+-      pop     %r12
++      pop     %r15
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_gcm_dec)
+@@ -1540,8 +1542,8 @@ ENDPROC(aesni_gcm_dec)
+ *
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ ***************************************************************************/
+-ENTRY(aesni_gcm_enc)
+-      push    %r12
++RAP_ENTRY(aesni_gcm_enc)
++      push    %r15
+       push    %r13
+       push    %r14
+       mov     %rsp, %r14
+@@ -1551,8 +1553,8 @@ ENTRY(aesni_gcm_enc)
+ #
+       sub     $VARIABLE_OFFSET, %rsp
+       and     $~63, %rsp
+-      mov     %arg6, %r12
+-      movdqu  (%r12), %xmm13
++      mov     %arg6, %r15
++      movdqu  (%r15), %xmm13
+         movdqa  SHUF_MASK(%rip), %xmm2
+       PSHUFB_XMM %xmm2, %xmm13
+@@ -1576,13 +1578,13 @@ ENTRY(aesni_gcm_enc)
+       movdqa  %xmm13, HashKey(%rsp)
+       mov     %arg4, %r13            # %xmm13 holds HashKey<<1 (mod poly)
+       and     $-16, %r13
+-      mov     %r13, %r12
++      mov     %r13, %r15
+         # Encrypt first few blocks
+-      and     $(3<<4), %r12
++      and     $(3<<4), %r15
+       jz      _initial_num_blocks_is_0_encrypt
+-      cmp     $(2<<4), %r12
++      cmp     $(2<<4), %r15
+       jb      _initial_num_blocks_is_1_encrypt
+       je      _initial_num_blocks_is_2_encrypt
+ _initial_num_blocks_is_3_encrypt:
+@@ -1635,14 +1637,14 @@ _zero_cipher_left_encrypt:
+       sub $16, %r11
+       add %r13, %r11
+       movdqu (%arg3,%r11,1), %xmm1     # receive the last <16 byte blocks
+-      lea SHIFT_MASK+16(%rip), %r12
+-      sub %r13, %r12
++      lea SHIFT_MASK+16(%rip), %r15
++      sub %r13, %r15
+       # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+       # (%r13 is the number of bytes in plaintext mod 16)
+-      movdqu  (%r12), %xmm2           # get the appropriate shuffle mask
++      movdqu  (%r15), %xmm2           # get the appropriate shuffle mask
+       PSHUFB_XMM      %xmm2, %xmm1            # shift right 16-r13 byte
+       pxor    %xmm1, %xmm0            # Plaintext XOR Encrypt(K, Yn)
+-      movdqu  ALL_F-SHIFT_MASK(%r12), %xmm1
++      movdqu  ALL_F-SHIFT_MASK(%r15), %xmm1
+       # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+       pand    %xmm1, %xmm0            # mask out top 16-r13 bytes of xmm0
+         movdqa SHUF_MASK(%rip), %xmm10
+@@ -1675,9 +1677,9 @@ _less_than_8_bytes_left_encrypt:
+       sub $1, %r13
+       jne _less_than_8_bytes_left_encrypt
+ _multiple_of_16_bytes_encrypt:
+-      mov     arg8, %r12    # %r12 = addLen (number of bytes)
+-      shl     $3, %r12
+-      movd    %r12d, %xmm15       # len(A) in %xmm15
++      mov     arg8, %r15    # %r15 = addLen (number of bytes)
++      shl     $3, %r15
++      movd    %r15d, %xmm15       # len(A) in %xmm15
+       shl     $3, %arg4               # len(C) in bits (*128)
+       MOVQ_R64_XMM    %arg4, %xmm1
+       pslldq  $8, %xmm15          # %xmm15 = len(A)||0x0000000000000000
+@@ -1716,7 +1718,8 @@ _return_T_done_encrypt:
+       mov     %r14, %rsp
+       pop     %r14
+       pop     %r13
+-      pop     %r12
++      pop     %r15
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_gcm_enc)
+@@ -1734,6 +1737,7 @@ _key_expansion_256a:
+       pxor %xmm1, %xmm0
+       movaps %xmm0, (TKEYP)
+       add $0x10, TKEYP
++      pax_force_retaddr
+       ret
+ ENDPROC(_key_expansion_128)
+ ENDPROC(_key_expansion_256a)
+@@ -1760,6 +1764,7 @@ _key_expansion_192a:
+       shufps $0b01001110, %xmm2, %xmm1
+       movaps %xmm1, 0x10(TKEYP)
+       add $0x20, TKEYP
++      pax_force_retaddr
+       ret
+ ENDPROC(_key_expansion_192a)
+@@ -1780,6 +1785,7 @@ _key_expansion_192b:
+       movaps %xmm0, (TKEYP)
+       add $0x10, TKEYP
++      pax_force_retaddr
+       ret
+ ENDPROC(_key_expansion_192b)
+@@ -1793,6 +1799,7 @@ _key_expansion_256b:
+       pxor %xmm1, %xmm2
+       movaps %xmm2, (TKEYP)
+       add $0x10, TKEYP
++      pax_force_retaddr
+       ret
+ ENDPROC(_key_expansion_256b)
+@@ -1908,13 +1915,14 @@ ENTRY(aesni_set_key)
+       popl KEYP
+ #endif
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_set_key)
+ /*
+  * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+  */
+-ENTRY(aesni_enc)
++RAP_ENTRY(aesni_enc)
+       FRAME_BEGIN
+ #ifndef __x86_64__
+       pushl KEYP
+@@ -1932,6 +1940,7 @@ ENTRY(aesni_enc)
+       popl KEYP
+ #endif
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_enc)
+@@ -1990,6 +1999,7 @@ _aesni_enc1:
+       AESENC KEY STATE
+       movaps 0x70(TKEYP), KEY
+       AESENCLAST KEY STATE
++      pax_force_retaddr
+       ret
+ ENDPROC(_aesni_enc1)
+@@ -2099,13 +2109,14 @@ _aesni_enc4:
+       AESENCLAST KEY STATE2
+       AESENCLAST KEY STATE3
+       AESENCLAST KEY STATE4
++      pax_force_retaddr
+       ret
+ ENDPROC(_aesni_enc4)
+ /*
+  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+  */
+-ENTRY(aesni_dec)
++RAP_ENTRY(aesni_dec)
+       FRAME_BEGIN
+ #ifndef __x86_64__
+       pushl KEYP
+@@ -2124,6 +2135,7 @@ ENTRY(aesni_dec)
+       popl KEYP
+ #endif
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_dec)
+@@ -2182,6 +2194,7 @@ _aesni_dec1:
+       AESDEC KEY STATE
+       movaps 0x70(TKEYP), KEY
+       AESDECLAST KEY STATE
++      pax_force_retaddr
+       ret
+ ENDPROC(_aesni_dec1)
+@@ -2291,6 +2304,7 @@ _aesni_dec4:
+       AESDECLAST KEY STATE2
+       AESDECLAST KEY STATE3
+       AESDECLAST KEY STATE4
++      pax_force_retaddr
+       ret
+ ENDPROC(_aesni_dec4)
+@@ -2351,6 +2365,7 @@ ENTRY(aesni_ecb_enc)
+       popl LEN
+ #endif
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_ecb_enc)
+@@ -2412,6 +2427,7 @@ ENTRY(aesni_ecb_dec)
+       popl LEN
+ #endif
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_ecb_dec)
+@@ -2456,6 +2472,7 @@ ENTRY(aesni_cbc_enc)
+       popl IVP
+ #endif
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_cbc_enc)
+@@ -2549,6 +2566,7 @@ ENTRY(aesni_cbc_dec)
+       popl IVP
+ #endif
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_cbc_dec)
+@@ -2578,6 +2596,7 @@ _aesni_inc_init:
+       mov $1, TCTR_LOW
+       MOVQ_R64_XMM TCTR_LOW INC
+       MOVQ_R64_XMM CTR TCTR_LOW
++      pax_force_retaddr
+       ret
+ ENDPROC(_aesni_inc_init)
+@@ -2607,6 +2626,7 @@ _aesni_inc:
+ .Linc_low:
+       movaps CTR, IV
+       PSHUFB_XMM BSWAP_MASK IV
++      pax_force_retaddr
+       ret
+ ENDPROC(_aesni_inc)
+@@ -2614,7 +2634,7 @@ ENDPROC(_aesni_inc)
+  * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+  *                  size_t len, u8 *iv)
+  */
+-ENTRY(aesni_ctr_enc)
++RAP_ENTRY(aesni_ctr_enc)
+       FRAME_BEGIN
+       cmp $16, LEN
+       jb .Lctr_enc_just_ret
+@@ -2670,6 +2690,7 @@ ENTRY(aesni_ctr_enc)
+       movups IV, (IVP)
+ .Lctr_enc_just_ret:
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_ctr_enc)
+@@ -2798,6 +2819,7 @@ ENTRY(aesni_xts_crypt8)
+       movdqu STATE4, 0x70(OUTP)
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(aesni_xts_crypt8)
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index 0ab5ee1..a5d431f 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -71,9 +71,9 @@ struct aesni_xts_ctx {
+ asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+                            unsigned int key_len);
+-asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
++asmlinkage void aesni_enc(void *ctx, u8 *out,
+                         const u8 *in);
+-asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
++asmlinkage void aesni_dec(void *ctx, u8 *out,
+                         const u8 *in);
+ asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len);
+diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+index 246c670..4fb7603 100644
+--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "blowfish-x86_64-asm.S"
+ .text
+@@ -149,13 +150,15 @@ ENTRY(__blowfish_enc_blk)
+       jnz .L__enc_xor;
+       write_block();
++      pax_force_retaddr
+       ret;
+ .L__enc_xor:
+       xor_block();
++      pax_force_retaddr
+       ret;
+ ENDPROC(__blowfish_enc_blk)
+-ENTRY(blowfish_dec_blk)
++RAP_ENTRY(blowfish_dec_blk)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
+       movq %r11, %rbp;
++      pax_force_retaddr
+       ret;
+ ENDPROC(blowfish_dec_blk)
+@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
+       popq %rbx;
+       popq %rbp;
++      pax_force_retaddr
+       ret;
+ .L__enc_xor4:
+@@ -341,10 +346,11 @@ ENTRY(__blowfish_enc_blk_4way)
+       popq %rbx;
+       popq %rbp;
++      pax_force_retaddr
+       ret;
+ ENDPROC(__blowfish_enc_blk_4way)
+-ENTRY(blowfish_dec_blk_4way)
++RAP_ENTRY(blowfish_dec_blk_4way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
+       popq %rbx;
+       popq %rbp;
++      pax_force_retaddr
+       ret;
+ ENDPROC(blowfish_dec_blk_4way)
+diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+index aa9e8bd..0b8def4 100644
+--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+@@ -17,6 +17,7 @@
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+@@ -192,6 +193,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+       roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+                 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
+                 %rcx, (%r9));
++      pax_force_retaddr
+       ret;
+ ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+@@ -200,6 +202,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+       roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
+                 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
+                 %rax, (%r9));
++      pax_force_retaddr
+       ret;
+ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+@@ -783,6 +786,7 @@ __camellia_enc_blk16:
+                   %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ .align 8
+@@ -870,6 +874,7 @@ __camellia_dec_blk16:
+                   %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ .align 8
+@@ -889,7 +894,7 @@ __camellia_dec_blk16:
+       jmp .Ldec_max24;
+ ENDPROC(__camellia_dec_blk16)
+-ENTRY(camellia_ecb_enc_16way)
++RAP_ENTRY(camellia_ecb_enc_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -911,10 +916,11 @@ ENTRY(camellia_ecb_enc_16way)
+                    %xmm8, %rsi);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_ecb_enc_16way)
+-ENTRY(camellia_ecb_dec_16way)
++RAP_ENTRY(camellia_ecb_dec_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -941,10 +947,11 @@ ENTRY(camellia_ecb_dec_16way)
+                    %xmm8, %rsi);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_ecb_dec_16way)
+-ENTRY(camellia_cbc_dec_16way)
++RAP_ENTRY(camellia_cbc_dec_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -992,6 +999,7 @@ ENTRY(camellia_cbc_dec_16way)
+                    %xmm8, %rsi);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_cbc_dec_16way)
+@@ -1001,7 +1009,7 @@ ENDPROC(camellia_cbc_dec_16way)
+       vpslldq $8, tmp, tmp; \
+       vpsubq tmp, x, x;
+-ENTRY(camellia_ctr_16way)
++RAP_ENTRY(camellia_ctr_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -1105,6 +1113,7 @@ ENTRY(camellia_ctr_16way)
+                    %xmm8, %rsi);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_ctr_16way)
+@@ -1249,10 +1258,11 @@ camellia_xts_crypt_16way:
+                    %xmm8, %rsi);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_xts_crypt_16way)
+-ENTRY(camellia_xts_enc_16way)
++RAP_ENTRY(camellia_xts_enc_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -1266,7 +1276,7 @@ ENTRY(camellia_xts_enc_16way)
+       jmp camellia_xts_crypt_16way;
+ ENDPROC(camellia_xts_enc_16way)
+-ENTRY(camellia_xts_dec_16way)
++RAP_ENTRY(camellia_xts_dec_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+index 16186c1..3468f83 100644
+--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+@@ -12,6 +12,7 @@
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+@@ -231,6 +232,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+       roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
+                 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
+                 %rcx, (%r9));
++      pax_force_retaddr
+       ret;
+ ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+@@ -239,6 +241,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+       roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
+                 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
+                 %rax, (%r9));
++      pax_force_retaddr
+       ret;
+ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+@@ -823,6 +826,7 @@ __camellia_enc_blk32:
+                   %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ .align 8
+@@ -910,6 +914,7 @@ __camellia_dec_blk32:
+                   %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ .align 8
+@@ -929,7 +934,7 @@ __camellia_dec_blk32:
+       jmp .Ldec_max24;
+ ENDPROC(__camellia_dec_blk32)
+-ENTRY(camellia_ecb_enc_32way)
++RAP_ENTRY(camellia_ecb_enc_32way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (32 blocks)
+@@ -955,10 +960,11 @@ ENTRY(camellia_ecb_enc_32way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_ecb_enc_32way)
+-ENTRY(camellia_ecb_dec_32way)
++RAP_ENTRY(camellia_ecb_dec_32way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (32 blocks)
+@@ -989,10 +995,11 @@ ENTRY(camellia_ecb_dec_32way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_ecb_dec_32way)
+-ENTRY(camellia_cbc_dec_32way)
++RAP_ENTRY(camellia_cbc_dec_32way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (32 blocks)
+@@ -1057,6 +1064,7 @@ ENTRY(camellia_cbc_dec_32way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_cbc_dec_32way)
+@@ -1074,7 +1082,7 @@ ENDPROC(camellia_cbc_dec_32way)
+       vpslldq $8, tmp1, tmp1; \
+       vpsubq tmp1, x, x;
+-ENTRY(camellia_ctr_32way)
++RAP_ENTRY(camellia_ctr_32way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (32 blocks)
+@@ -1197,6 +1205,7 @@ ENTRY(camellia_ctr_32way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_ctr_32way)
+@@ -1364,10 +1373,11 @@ camellia_xts_crypt_32way:
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_xts_crypt_32way)
+-ENTRY(camellia_xts_enc_32way)
++RAP_ENTRY(camellia_xts_enc_32way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (32 blocks)
+@@ -1382,7 +1392,7 @@ ENTRY(camellia_xts_enc_32way)
+       jmp camellia_xts_crypt_32way;
+ ENDPROC(camellia_xts_enc_32way)
+-ENTRY(camellia_xts_dec_32way)
++RAP_ENTRY(camellia_xts_dec_32way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (32 blocks)
+diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
+index 310319c..9253a8f 100644
+--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
++++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "camellia-x86_64-asm_64.S"
+ .text
+@@ -228,16 +229,18 @@ ENTRY(__camellia_enc_blk)
+       enc_outunpack(mov, RT1);
+       movq RRBP, %rbp;
++      pax_force_retaddr
+       ret;
+ .L__enc_xor:
+       enc_outunpack(xor, RT1);
+       movq RRBP, %rbp;
++      pax_force_retaddr
+       ret;
+ ENDPROC(__camellia_enc_blk)
+-ENTRY(camellia_dec_blk)
++RAP_ENTRY(camellia_dec_blk)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
+       dec_outunpack();
+       movq RRBP, %rbp;
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_dec_blk)
+@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
+       movq RRBP, %rbp;
+       popq %rbx;
++      pax_force_retaddr
+       ret;
+ .L__enc2_xor:
+@@ -470,10 +475,11 @@ ENTRY(__camellia_enc_blk_2way)
+       movq RRBP, %rbp;
+       popq %rbx;
++      pax_force_retaddr
+       ret;
+ ENDPROC(__camellia_enc_blk_2way)
+-ENTRY(camellia_dec_blk_2way)
++RAP_ENTRY(camellia_dec_blk_2way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
+       movq RRBP, %rbp;
+       movq RXOR, %rbx;
++      pax_force_retaddr
+       ret;
+ ENDPROC(camellia_dec_blk_2way)
+diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+index 60907c1..fe8638d 100644
+--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
+@@ -27,20 +27,20 @@
+ #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
+ /* 32-way AVX2/AES-NI parallel cipher functions */
+-asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_ecb_enc_32way(void *ctx, u8 *dst,
+                                      const u8 *src);
+-asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_ecb_dec_32way(void *ctx, u8 *dst,
+                                      const u8 *src);
+-asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_cbc_dec_32way(void *ctx, u8 *dst,
+                                      const u8 *src);
+-asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst,
+-                                 const u8 *src, le128 *iv);
++asmlinkage void camellia_ctr_32way(void *ctx, u128 *dst,
++                                 const u128 *src, le128 *iv);
+-asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst,
+-                                     const u8 *src, le128 *iv);
+-asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst,
+-                                     const u8 *src, le128 *iv);
++asmlinkage void camellia_xts_enc_32way(void *ctx, u128 *dst,
++                                     const u128 *src, le128 *iv);
++asmlinkage void camellia_xts_dec_32way(void *ctx, u128 *dst,
++                                     const u128 *src, le128 *iv);
+ static const struct common_glue_ctx camellia_enc = {
+       .num_funcs = 4,
+diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
+index d96429d..18ab2e6 100644
+--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
+@@ -26,28 +26,28 @@
+ #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
+ /* 16-way parallel cipher functions (avx/aes-ni) */
+-asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_ecb_enc_16way(void *ctx, u8 *dst,
+                                      const u8 *src);
+ EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way);
+-asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_ecb_dec_16way(void *ctx, u8 *dst,
+                                      const u8 *src);
+ EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way);
+-asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_cbc_dec_16way(void *ctx, u8 *dst,
+                                      const u8 *src);
+ EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way);
+-asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
+-                                 const u8 *src, le128 *iv);
++asmlinkage void camellia_ctr_16way(void *ctx, u128 *dst,
++                                 const u128 *src, le128 *iv);
+ EXPORT_SYMBOL_GPL(camellia_ctr_16way);
+-asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
+-                                     const u8 *src, le128 *iv);
++asmlinkage void camellia_xts_enc_16way(void *ctx, u128 *dst,
++                                     const u128 *src, le128 *iv);
+ EXPORT_SYMBOL_GPL(camellia_xts_enc_16way);
+-asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+-                                     const u8 *src, le128 *iv);
++asmlinkage void camellia_xts_dec_16way(void *ctx, u128 *dst,
++                                     const u128 *src, le128 *iv);
+ EXPORT_SYMBOL_GPL(camellia_xts_dec_16way);
+ void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
+index aa76cad..ffd8808 100644
+--- a/arch/x86/crypto/camellia_glue.c
++++ b/arch/x86/crypto/camellia_glue.c
+@@ -39,7 +39,7 @@
+ asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+                                  const u8 *src, bool xor);
+ EXPORT_SYMBOL_GPL(__camellia_enc_blk);
+-asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_dec_blk(void *ctx, u8 *dst,
+                                const u8 *src);
+ EXPORT_SYMBOL_GPL(camellia_dec_blk);
+@@ -47,7 +47,7 @@ EXPORT_SYMBOL_GPL(camellia_dec_blk);
+ asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+                                       const u8 *src, bool xor);
+ EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way);
+-asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_dec_blk_2way(void *ctx, u8 *dst,
+                                     const u8 *src);
+ EXPORT_SYMBOL_GPL(camellia_dec_blk_2way);
+@@ -1279,8 +1279,10 @@ static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+                                &tfm->crt_flags);
+ }
+-void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
++void camellia_decrypt_cbc_2way(void *ctx, u8 *_dst, const u8 *_src)
+ {
++      u128 *dst = (u128 *)_dst;
++      u128 *src = (u128 *)_src;
+       u128 iv = *src;
+       camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src);
+diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+index 14fa196..5de8a4a 100644
+--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+@@ -25,6 +25,7 @@
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ .file "cast5-avx-x86_64-asm_64.S"
+@@ -282,6 +283,7 @@ __cast5_enc_blk16:
+       outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+       outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__cast5_enc_blk16)
+@@ -353,6 +355,7 @@ __cast5_dec_blk16:
+       outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+       outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
++      pax_force_retaddr
+       ret;
+ .L__skip_dec:
+@@ -360,7 +363,7 @@ __cast5_dec_blk16:
+       jmp .L__dec_tail;
+ ENDPROC(__cast5_dec_blk16)
+-ENTRY(cast5_ecb_enc_16way)
++RAP_ENTRY(cast5_ecb_enc_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -391,10 +394,11 @@ ENTRY(cast5_ecb_enc_16way)
+       vmovdqu RL4, (7*4*4)(%r11);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_ecb_enc_16way)
+-ENTRY(cast5_ecb_dec_16way)
++RAP_ENTRY(cast5_ecb_dec_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -425,6 +429,7 @@ ENTRY(cast5_ecb_dec_16way)
+       vmovdqu RL4, (7*4*4)(%r11);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_ecb_dec_16way)
+@@ -436,10 +441,10 @@ ENTRY(cast5_cbc_dec_16way)
+        */
+       FRAME_BEGIN
+-      pushq %r12;
++      pushq %r14;
+       movq %rsi, %r11;
+-      movq %rdx, %r12;
++      movq %rdx, %r14;
+       vmovdqu (0*16)(%rdx), RL1;
+       vmovdqu (1*16)(%rdx), RR1;
+@@ -453,16 +458,16 @@ ENTRY(cast5_cbc_dec_16way)
+       call __cast5_dec_blk16;
+       /* xor with src */
+-      vmovq (%r12), RX;
++      vmovq (%r14), RX;
+       vpshufd $0x4f, RX, RX;
+       vpxor RX, RR1, RR1;
+-      vpxor 0*16+8(%r12), RL1, RL1;
+-      vpxor 1*16+8(%r12), RR2, RR2;
+-      vpxor 2*16+8(%r12), RL2, RL2;
+-      vpxor 3*16+8(%r12), RR3, RR3;
+-      vpxor 4*16+8(%r12), RL3, RL3;
+-      vpxor 5*16+8(%r12), RR4, RR4;
+-      vpxor 6*16+8(%r12), RL4, RL4;
++      vpxor 0*16+8(%r14), RL1, RL1;
++      vpxor 1*16+8(%r14), RR2, RR2;
++      vpxor 2*16+8(%r14), RL2, RL2;
++      vpxor 3*16+8(%r14), RR3, RR3;
++      vpxor 4*16+8(%r14), RL3, RL3;
++      vpxor 5*16+8(%r14), RR4, RR4;
++      vpxor 6*16+8(%r14), RL4, RL4;
+       vmovdqu RR1, (0*16)(%r11);
+       vmovdqu RL1, (1*16)(%r11);
+@@ -473,9 +478,10 @@ ENTRY(cast5_cbc_dec_16way)
+       vmovdqu RR4, (6*16)(%r11);
+       vmovdqu RL4, (7*16)(%r11);
+-      popq %r12;
++      popq %r14;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_cbc_dec_16way)
+@@ -488,10 +494,10 @@ ENTRY(cast5_ctr_16way)
+        */
+       FRAME_BEGIN
+-      pushq %r12;
++      pushq %r14;
+       movq %rsi, %r11;
+-      movq %rdx, %r12;
++      movq %rdx, %r14;
+       vpcmpeqd RTMP, RTMP, RTMP;
+       vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
+@@ -531,14 +537,14 @@ ENTRY(cast5_ctr_16way)
+       call __cast5_enc_blk16;
+       /* dst = src ^ iv */
+-      vpxor (0*16)(%r12), RR1, RR1;
+-      vpxor (1*16)(%r12), RL1, RL1;
+-      vpxor (2*16)(%r12), RR2, RR2;
+-      vpxor (3*16)(%r12), RL2, RL2;
+-      vpxor (4*16)(%r12), RR3, RR3;
+-      vpxor (5*16)(%r12), RL3, RL3;
+-      vpxor (6*16)(%r12), RR4, RR4;
+-      vpxor (7*16)(%r12), RL4, RL4;
++      vpxor (0*16)(%r14), RR1, RR1;
++      vpxor (1*16)(%r14), RL1, RL1;
++      vpxor (2*16)(%r14), RR2, RR2;
++      vpxor (3*16)(%r14), RL2, RL2;
++      vpxor (4*16)(%r14), RR3, RR3;
++      vpxor (5*16)(%r14), RL3, RL3;
++      vpxor (6*16)(%r14), RR4, RR4;
++      vpxor (7*16)(%r14), RL4, RL4;
+       vmovdqu RR1, (0*16)(%r11);
+       vmovdqu RL1, (1*16)(%r11);
+       vmovdqu RR2, (2*16)(%r11);
+@@ -548,8 +554,9 @@ ENTRY(cast5_ctr_16way)
+       vmovdqu RR4, (6*16)(%r11);
+       vmovdqu RL4, (7*16)(%r11);
+-      popq %r12;
++      popq %r14;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast5_ctr_16way)
+diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+index c419389..b853452 100644
+--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+@@ -25,6 +25,7 @@
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+ .file "cast6-avx-x86_64-asm_64.S"
+@@ -296,6 +297,7 @@ __cast6_enc_blk8:
+       outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+       outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__cast6_enc_blk8)
+@@ -341,10 +343,11 @@ __cast6_dec_blk8:
+       outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+       outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__cast6_dec_blk8)
+-ENTRY(cast6_ecb_enc_8way)
++RAP_ENTRY(cast6_ecb_enc_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -361,10 +364,11 @@ ENTRY(cast6_ecb_enc_8way)
+       store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_ecb_enc_8way)
+-ENTRY(cast6_ecb_dec_8way)
++RAP_ENTRY(cast6_ecb_dec_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -381,10 +385,11 @@ ENTRY(cast6_ecb_dec_8way)
+       store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_ecb_dec_8way)
+-ENTRY(cast6_cbc_dec_8way)
++RAP_ENTRY(cast6_cbc_dec_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -392,24 +397,25 @@ ENTRY(cast6_cbc_dec_8way)
+        */
+       FRAME_BEGIN
+-      pushq %r12;
++      pushq %r14;
+       movq %rsi, %r11;
+-      movq %rdx, %r12;
++      movq %rdx, %r14;
+       load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       call __cast6_dec_blk8;
+-      store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+-      popq %r12;
++      popq %r14;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_cbc_dec_8way)
+-ENTRY(cast6_ctr_8way)
++RAP_ENTRY(cast6_ctr_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -418,25 +424,26 @@ ENTRY(cast6_ctr_8way)
+        */
+       FRAME_BEGIN
+-      pushq %r12;
++      pushq %r14;
+       movq %rsi, %r11;
+-      movq %rdx, %r12;
++      movq %rdx, %r14;
+       load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+                     RD2, RX, RKR, RKM);
+       call __cast6_enc_blk8;
+-      store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+-      popq %r12;
++      popq %r14;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_ctr_8way)
+-ENTRY(cast6_xts_enc_8way)
++RAP_ENTRY(cast6_xts_enc_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -457,10 +464,11 @@ ENTRY(cast6_xts_enc_8way)
+       store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_xts_enc_8way)
+-ENTRY(cast6_xts_dec_8way)
++RAP_ENTRY(cast6_xts_dec_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -481,5 +489,6 @@ ENTRY(cast6_xts_dec_8way)
+       store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(cast6_xts_dec_8way)
+diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
+index 50e6847..bf7c2d8 100644
+--- a/arch/x86/crypto/cast6_avx_glue.c
++++ b/arch/x86/crypto/cast6_avx_glue.c
+@@ -41,20 +41,20 @@
+ #define CAST6_PARALLEL_BLOCKS 8
+-asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst,
++asmlinkage void cast6_ecb_enc_8way(void *ctx, u8 *dst,
+                                  const u8 *src);
+-asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst,
++asmlinkage void cast6_ecb_dec_8way(void *ctx, u8 *dst,
+                                  const u8 *src);
+-asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst,
++asmlinkage void cast6_cbc_dec_8way(void *ctx, u8 *dst,
+                                  const u8 *src);
+-asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src,
++asmlinkage void cast6_ctr_8way(void *ctx, u128 *dst, const u128 *src,
+                              le128 *iv);
+-asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
+-                                 const u8 *src, le128 *iv);
+-asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
+-                                 const u8 *src, le128 *iv);
++asmlinkage void cast6_xts_enc_8way(void *ctx, u128 *dst,
++                                 const u128 *src, le128 *iv);
++asmlinkage void cast6_xts_dec_8way(void *ctx, u128 *dst,
++                                 const u128 *src, le128 *iv);
+ static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+ {
+diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
+index f247304..b500391 100644
+--- a/arch/x86/crypto/crc32-pclmul_asm.S
++++ b/arch/x86/crypto/crc32-pclmul_asm.S
+@@ -102,6 +102,12 @@
+  *                         size_t len, uint crc32)
+  */
++#ifndef __x86_64__
++__i686_get_pc_thunk_cx:
++      mov     (%esp),%ecx
++      ret
++#endif
++
+ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
+       movdqa  (BUF), %xmm1
+       movdqa  0x10(BUF), %xmm2
+@@ -113,9 +119,8 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
+       add     $0x40, BUF
+ #ifndef __x86_64__
+       /* This is for position independent code(-fPIC) support for 32bit */
+-      call    delta
++      call    __i686_get_pc_thunk_cx
+ delta:
+-      pop     %ecx
+ #endif
+       cmp     $0x40, LEN
+       jb      less_64
+@@ -123,7 +128,7 @@ delta:
+ #ifdef __x86_64__
+       movdqa .Lconstant_R2R1(%rip), CONSTANT
+ #else
+-      movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
++      movdqa %cs:.Lconstant_R2R1 - delta (%ecx), CONSTANT
+ #endif
+ loop_64:/*  64 bytes Full cache line folding */
+@@ -172,7 +177,7 @@ less_64:/*  Folding cache line into 128bit */
+ #ifdef __x86_64__
+       movdqa  .Lconstant_R4R3(%rip), CONSTANT
+ #else
+-      movdqa  .Lconstant_R4R3 - delta(%ecx), CONSTANT
++      movdqa  %cs:.Lconstant_R4R3 - delta(%ecx), CONSTANT
+ #endif
+       prefetchnta     (BUF)
+@@ -220,8 +225,8 @@ fold_64:
+       movdqa  .Lconstant_R5(%rip), CONSTANT
+       movdqa  .Lconstant_mask32(%rip), %xmm3
+ #else
+-      movdqa  .Lconstant_R5 - delta(%ecx), CONSTANT
+-      movdqa  .Lconstant_mask32 - delta(%ecx), %xmm3
++      movdqa  %cs:.Lconstant_R5 - delta(%ecx), CONSTANT
++      movdqa  %cs:.Lconstant_mask32 - delta(%ecx), %xmm3
+ #endif
+       psrldq  $0x04, %xmm2
+       pand    %xmm3, %xmm1
+@@ -232,7 +237,7 @@ fold_64:
+ #ifdef __x86_64__
+       movdqa  .Lconstant_RUpoly(%rip), CONSTANT
+ #else
+-      movdqa  .Lconstant_RUpoly - delta(%ecx), CONSTANT
++      movdqa  %cs:.Lconstant_RUpoly - delta(%ecx), CONSTANT
+ #endif
+       movdqa  %xmm1, %xmm2
+       pand    %xmm3, %xmm1
+diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+index dc05f010..23c8bfd 100644
+--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+@@ -45,6 +45,7 @@
+ #include <asm/inst.h>
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
+@@ -309,6 +310,7 @@ do_return:
+       popq    %rsi
+       popq    %rdi
+       popq    %rbx
++      pax_force_retaddr
+         ret
+ ENDPROC(crc_pcl)
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+index eed55c8..b354187 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
++++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+@@ -19,6 +19,7 @@
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ .data
+@@ -90,6 +91,7 @@ __clmul_gf128mul_ble:
+       psrlq $1, T2
+       pxor T2, T1
+       pxor T1, DATA
++      pax_force_retaddr
+       ret
+ ENDPROC(__clmul_gf128mul_ble)
+@@ -104,6 +106,7 @@ ENTRY(clmul_ghash_mul)
+       PSHUFB_XMM BSWAP DATA
+       movups DATA, (%rdi)
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(clmul_ghash_mul)
+@@ -133,5 +136,6 @@ ENTRY(clmul_ghash_update)
+       movups DATA, (%rdi)
+ .Lupdate_just_ret:
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(clmul_ghash_update)
+diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
+index 6a85598..fed2ada 100644
+--- a/arch/x86/crypto/glue_helper.c
++++ b/arch/x86/crypto/glue_helper.c
+@@ -165,7 +165,7 @@ __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+                               src -= num_blocks - 1;
+                               dst -= num_blocks - 1;
+-                              gctx->funcs[i].fn_u.cbc(ctx, dst, src);
++                              gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, (u8 *)src);
+                               nbytes -= bsize;
+                               if (nbytes < bsize)
+diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+index 9279e0b..c4b3d2c 100644
+--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+@@ -1,4 +1,5 @@
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ # enter salsa20_encrypt_bytes
+ ENTRY(salsa20_encrypt_bytes)
+@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
+       add     %r11,%rsp
+       mov     %rdi,%rax
+       mov     %rsi,%rdx
++      pax_force_retaddr
+       ret
+ #   bytesatleast65:
+ ._bytesatleast65:
+@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
+       add     %r11,%rsp
+       mov     %rdi,%rax
+       mov     %rsi,%rdx
++      pax_force_retaddr
+       ret
+ ENDPROC(salsa20_keysetup)
+@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
+       add     %r11,%rsp
+       mov     %rdi,%rax
+       mov     %rsi,%rdx
++      pax_force_retaddr
+       ret
+ ENDPROC(salsa20_ivsetup)
+diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+index 8be5718..d2bcbcd 100644
+--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+@@ -25,6 +25,7 @@
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+ .file "serpent-avx-x86_64-asm_64.S"
+@@ -619,6 +620,7 @@ __serpent_enc_blk8_avx:
+       write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_enc_blk8_avx)
+@@ -673,10 +675,11 @@ __serpent_dec_blk8_avx:
+       write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+       write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_dec_blk8_avx)
+-ENTRY(serpent_ecb_enc_8way_avx)
++RAP_ENTRY(serpent_ecb_enc_8way_avx)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -691,10 +694,11 @@ ENTRY(serpent_ecb_enc_8way_avx)
+       store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_enc_8way_avx)
+-ENTRY(serpent_ecb_dec_8way_avx)
++RAP_ENTRY(serpent_ecb_dec_8way_avx)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -709,10 +713,11 @@ ENTRY(serpent_ecb_dec_8way_avx)
+       store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_dec_8way_avx)
+-ENTRY(serpent_cbc_dec_8way_avx)
++RAP_ENTRY(serpent_cbc_dec_8way_avx)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -727,10 +732,11 @@ ENTRY(serpent_cbc_dec_8way_avx)
+       store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_cbc_dec_8way_avx)
+-ENTRY(serpent_ctr_8way_avx)
++RAP_ENTRY(serpent_ctr_8way_avx)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -747,10 +753,11 @@ ENTRY(serpent_ctr_8way_avx)
+       store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ctr_8way_avx)
+-ENTRY(serpent_xts_enc_8way_avx)
++RAP_ENTRY(serpent_xts_enc_8way_avx)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -769,10 +776,11 @@ ENTRY(serpent_xts_enc_8way_avx)
+       store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_enc_8way_avx)
+-ENTRY(serpent_xts_dec_8way_avx)
++RAP_ENTRY(serpent_xts_dec_8way_avx)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -791,5 +799,6 @@ ENTRY(serpent_xts_dec_8way_avx)
+       store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_dec_8way_avx)
+diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
+index 97c48ad..25416de 100644
+--- a/arch/x86/crypto/serpent-avx2-asm_64.S
++++ b/arch/x86/crypto/serpent-avx2-asm_64.S
+@@ -16,6 +16,7 @@
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx2.S"
+ .file "serpent-avx2-asm_64.S"
+@@ -611,6 +612,7 @@ __serpent_enc_blk16:
+       write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_enc_blk16)
+@@ -665,10 +667,11 @@ __serpent_dec_blk16:
+       write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+       write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_dec_blk16)
+-ENTRY(serpent_ecb_enc_16way)
++RAP_ENTRY(serpent_ecb_enc_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -687,10 +690,11 @@ ENTRY(serpent_ecb_enc_16way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_enc_16way)
+-ENTRY(serpent_ecb_dec_16way)
++RAP_ENTRY(serpent_ecb_dec_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -709,10 +713,11 @@ ENTRY(serpent_ecb_dec_16way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ecb_dec_16way)
+-ENTRY(serpent_cbc_dec_16way)
++RAP_ENTRY(serpent_cbc_dec_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -732,10 +737,11 @@ ENTRY(serpent_cbc_dec_16way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_cbc_dec_16way)
+-ENTRY(serpent_ctr_16way)
++RAP_ENTRY(serpent_ctr_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -757,10 +763,11 @@ ENTRY(serpent_ctr_16way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_ctr_16way)
+-ENTRY(serpent_xts_enc_16way)
++RAP_ENTRY(serpent_xts_enc_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -783,10 +790,11 @@ ENTRY(serpent_xts_enc_16way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_enc_16way)
+-ENTRY(serpent_xts_dec_16way)
++RAP_ENTRY(serpent_xts_dec_16way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst (16 blocks)
+@@ -809,5 +817,6 @@ ENTRY(serpent_xts_dec_16way)
+       vzeroupper;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_xts_dec_16way)
+diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+index acc066c..1559cc4 100644
+--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
++++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+@@ -25,6 +25,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "serpent-sse2-x86_64-asm_64.S"
+ .text
+@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
+       write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ .L__enc_xor8:
+       xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+       xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__serpent_enc_blk_8way)
+@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
+       write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+       write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(serpent_dec_blk_8way)
+diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
+index 870f6d8..9fed18e 100644
+--- a/arch/x86/crypto/serpent_avx2_glue.c
++++ b/arch/x86/crypto/serpent_avx2_glue.c
+@@ -27,18 +27,18 @@
+ #define SERPENT_AVX2_PARALLEL_BLOCKS 16
+ /* 16-way AVX2 parallel cipher functions */
+-asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_ecb_enc_16way(void *ctx, u8 *dst,
+                                     const u8 *src);
+-asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_ecb_dec_16way(void *ctx, u8 *dst,
+                                     const u8 *src);
+-asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src);
++asmlinkage void serpent_cbc_dec_16way(void *ctx, u8 *dst, const u8 *src);
+ asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src,
+                                 le128 *iv);
+-asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
+-                                    const u8 *src, le128 *iv);
+-asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
+-                                    const u8 *src, le128 *iv);
++asmlinkage void serpent_xts_enc_16way(void *ctx, u128 *dst,
++                                    const u128 *src, le128 *iv);
++asmlinkage void serpent_xts_dec_16way(void *ctx, u128 *dst,
++                                    const u128 *src, le128 *iv);
+ static const struct common_glue_ctx serpent_enc = {
+       .num_funcs = 3,
+diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
+index 6f778d3..3cf277e 100644
+--- a/arch/x86/crypto/serpent_avx_glue.c
++++ b/arch/x86/crypto/serpent_avx_glue.c
+@@ -41,28 +41,28 @@
+ #include <asm/crypto/glue_helper.h>
+ /* 8-way parallel cipher functions */
+-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_ecb_enc_8way_avx(void  *ctx, u8 *dst,
+                                        const u8 *src);
+ EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
+-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_ecb_dec_8way_avx(void *ctx, u8 *dst,
+                                        const u8 *src);
+ EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
+-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_cbc_dec_8way_avx(void *ctx, u8 *dst,
+                                        const u8 *src);
+ EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
+-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+-                                   const u8 *src, le128 *iv);
++asmlinkage void serpent_ctr_8way_avx(void *ctx, u128 *dst,
++                                   const u128 *src, le128 *iv);
+ EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
+-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+-                                       const u8 *src, le128 *iv);
++asmlinkage void serpent_xts_enc_8way_avx(void *ctx, u128 *dst,
++                                       const u128 *src, le128 *iv);
+ EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
+-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+-                                       const u8 *src, le128 *iv);
++asmlinkage void serpent_xts_dec_8way_avx(void *ctx, u128 *dst,
++                                       const u128 *src, le128 *iv);
+ EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
+ void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
+index 644f97a..4d069a1 100644
+--- a/arch/x86/crypto/serpent_sse2_glue.c
++++ b/arch/x86/crypto/serpent_sse2_glue.c
+@@ -45,8 +45,10 @@
+ #include <asm/crypto/serpent-sse2.h>
+ #include <asm/crypto/glue_helper.h>
+-static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
++static void serpent_decrypt_cbc_xway(void *ctx, u8 *_dst, const u8 *_src)
+ {
++      u128 *dst = (u128 *)_dst;
++      const u128 *src = (const u128 *)_src;
+       u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
+       unsigned int j;
+diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+index 96df6a3..8519a8f 100644
+--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+@@ -103,7 +103,7 @@ offset = \_offset
+ # JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
+ # arg 1 : rcx : state
+-ENTRY(sha1_mb_mgr_flush_avx2)
++RAP_ENTRY(sha1_mb_mgr_flush_avx2)
+       FRAME_BEGIN
+       push    %rbx
+@@ -226,7 +226,7 @@ ENDPROC(sha1_mb_mgr_flush_avx2)
+ #################################################################
+ .align 16
+-ENTRY(sha1_mb_mgr_get_comp_job_avx2)
++RAP_ENTRY(sha1_mb_mgr_get_comp_job_avx2)
+       push    %rbx
+       ## if bit 32+3 is set, then all lanes are empty
+diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+index 63a0d9c..a6038fd 100644
+--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
++++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+@@ -98,7 +98,7 @@ lane_data       = %r10
+ # JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
+ # arg 1 : rcx : state
+ # arg 2 : rdx : job
+-ENTRY(sha1_mb_mgr_submit_avx2)
++RAP_ENTRY(sha1_mb_mgr_submit_avx2)
+       FRAME_BEGIN
+       push    %rbx
+       push    %r12
+diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
+index a410950..02d2056 100644
+--- a/arch/x86/crypto/sha1_ssse3_asm.S
++++ b/arch/x86/crypto/sha1_ssse3_asm.S
+@@ -29,6 +29,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #define CTX   %rdi    // arg1
+ #define BUF   %rsi    // arg2
+@@ -71,13 +72,14 @@
+  * param: function's name
+  */
+ .macro SHA1_VECTOR_ASM  name
+-      ENTRY(\name)
++ALIGN
++      RAP_ENTRY(\name)
+       push    %rbx
+       push    %rbp
+-      push    %r12
++      push    %r14
+-      mov     %rsp, %r12
++      mov     %rsp, %r14
+       sub     $64, %rsp               # allocate workspace
+       and     $~15, %rsp              # align stack
+@@ -99,11 +101,12 @@
+       xor     %rax, %rax
+       rep stosq
+-      mov     %r12, %rsp              # deallocate workspace
++      mov     %r14, %rsp              # deallocate workspace
+-      pop     %r12
++      pop     %r14
+       pop     %rbp
+       pop     %rbx
++      pax_force_retaddr
+       ret
+       ENDPROC(\name)
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index fc61739..03f7efe 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -31,8 +31,8 @@
+ #include <crypto/sha1_base.h>
+ #include <asm/fpu/api.h>
+-typedef void (sha1_transform_fn)(u32 *digest, const char *data,
+-                              unsigned int rounds);
++typedef void (sha1_transform_fn)(struct sha1_state *digest, const u8 *data,
++                              int rounds);
+ static int sha1_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len, sha1_transform_fn *sha1_xform)
+@@ -47,8 +47,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
+       BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
+       kernel_fpu_begin();
+-      sha1_base_do_update(desc, data, len,
+-                          (sha1_block_fn *)sha1_xform);
++      sha1_base_do_update(desc, data, len, sha1_xform);
+       kernel_fpu_end();
+       return 0;
+@@ -62,29 +61,26 @@ static int sha1_finup(struct shash_desc *desc, const u8 *data,
+       kernel_fpu_begin();
+       if (len)
+-              sha1_base_do_update(desc, data, len,
+-                                  (sha1_block_fn *)sha1_xform);
+-      sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform);
++              sha1_base_do_update(desc, data, len, sha1_xform);
++      sha1_base_do_finalize(desc, sha1_xform);
+       kernel_fpu_end();
+       return sha1_base_finish(desc, out);
+ }
+-asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
+-                                   unsigned int rounds);
++asmlinkage void sha1_transform_ssse3(struct sha1_state *digest, const u8 *data,
++                                   int rounds);
+ static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len)
+ {
+-      return sha1_update(desc, data, len,
+-                      (sha1_transform_fn *) sha1_transform_ssse3);
++      return sha1_update(desc, data, len, sha1_transform_ssse3);
+ }
+ static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
+                             unsigned int len, u8 *out)
+ {
+-      return sha1_finup(desc, data, len, out,
+-                      (sha1_transform_fn *) sha1_transform_ssse3);
++      return sha1_finup(desc, data, len, out, sha1_transform_ssse3);
+ }
+ /* Add padding and return the message digest. */
+@@ -124,21 +120,19 @@ static void unregister_sha1_ssse3(void)
+ }
+ #ifdef CONFIG_AS_AVX
+-asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
+-                                 unsigned int rounds);
++asmlinkage void sha1_transform_avx(struct sha1_state *digest, const u8 *data,
++                                 int rounds);
+ static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len)
+ {
+-      return sha1_update(desc, data, len,
+-                      (sha1_transform_fn *) sha1_transform_avx);
++      return sha1_update(desc, data, len, sha1_transform_avx);
+ }
+ static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
+                             unsigned int len, u8 *out)
+ {
+-      return sha1_finup(desc, data, len, out,
+-                      (sha1_transform_fn *) sha1_transform_avx);
++      return sha1_finup(desc, data, len, out, sha1_transform_avx);
+ }
+ static int sha1_avx_final(struct shash_desc *desc, u8 *out)
+@@ -196,8 +190,8 @@ static inline void unregister_sha1_avx(void) { }
+ #if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX)
+ #define SHA1_AVX2_BLOCK_OPTSIZE       4       /* optimal 4*64 bytes of SHA1 blocks */
+-asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
+-                                  unsigned int rounds);
++asmlinkage void sha1_transform_avx2(struct sha1_state *digest, const u8 *data,
++                                  int rounds);
+ static bool avx2_usable(void)
+ {
+@@ -209,8 +203,8 @@ static bool avx2_usable(void)
+       return false;
+ }
+-static void sha1_apply_transform_avx2(u32 *digest, const char *data,
+-                              unsigned int rounds)
++static void sha1_apply_transform_avx2(struct sha1_state *digest, const u8 *data,
++                              int rounds)
+ {
+       /* Select the optimal transform based on data block size */
+       if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE)
+@@ -222,15 +216,13 @@ static void sha1_apply_transform_avx2(u32 *digest, const char *data,
+ static int sha1_avx2_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len)
+ {
+-      return sha1_update(desc, data, len,
+-              (sha1_transform_fn *) sha1_apply_transform_avx2);
++      return sha1_update(desc, data, len, sha1_apply_transform_avx2);
+ }
+ static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data,
+                             unsigned int len, u8 *out)
+ {
+-      return sha1_finup(desc, data, len, out,
+-              (sha1_transform_fn *) sha1_apply_transform_avx2);
++      return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2);
+ }
+ static int sha1_avx2_final(struct shash_desc *desc, u8 *out)
+@@ -274,21 +266,19 @@ static inline void unregister_sha1_avx2(void) { }
+ #endif
+ #ifdef CONFIG_AS_SHA1_NI
+-asmlinkage void sha1_ni_transform(u32 *digest, const char *data,
+-                                 unsigned int rounds);
++asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
++                                 int rounds);
+ static int sha1_ni_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len)
+ {
+-      return sha1_update(desc, data, len,
+-              (sha1_transform_fn *) sha1_ni_transform);
++      return sha1_update(desc, data, len, sha1_ni_transform);
+ }
+ static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
+                             unsigned int len, u8 *out)
+ {
+-      return sha1_finup(desc, data, len, out,
+-              (sha1_transform_fn *) sha1_ni_transform);
++      return sha1_finup(desc, data, len, out, sha1_ni_transform);
+ }
+ static int sha1_ni_final(struct shash_desc *desc, u8 *out)
+diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
+index 92b3b5d..8732479 100644
+--- a/arch/x86/crypto/sha256-avx-asm.S
++++ b/arch/x86/crypto/sha256-avx-asm.S
+@@ -49,6 +49,7 @@
+ #ifdef CONFIG_AS_AVX
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## assume buffers not aligned
+ #define    VMOVDQ vmovdqu
+@@ -347,8 +348,7 @@ a = TMP_
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-ENTRY(sha256_transform_avx)
+-.align 32
++RAP_ENTRY(sha256_transform_avx)
+       pushq   %rbx
+       pushq   %rbp
+       pushq   %r13
+@@ -460,6 +460,7 @@ done_hash:
+       popq    %r13
+       popq    %rbp
+       popq    %rbx
++      pax_force_retaddr
+       ret
+ ENDPROC(sha256_transform_avx)
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 570ec5e..9bcfa25 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -50,6 +50,7 @@
+ #ifdef CONFIG_AS_AVX2
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## assume buffers not aligned
+ #define       VMOVDQ vmovdqu
+@@ -528,8 +529,7 @@ STACK_SIZE = _RSP      + _RSP_SIZE
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-ENTRY(sha256_transform_rorx)
+-.align 32
++RAP_ENTRY(sha256_transform_rorx)
+       pushq   %rbx
+       pushq   %rbp
+       pushq   %r12
+@@ -720,6 +720,7 @@ done_hash:
+       popq    %r12
+       popq    %rbp
+       popq    %rbx
++      pax_force_retaddr
+       ret
+ ENDPROC(sha256_transform_rorx)
+diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+index a78a069..127cb66 100644
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+@@ -101,7 +101,7 @@ offset = \_offset
+ # JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
+ # arg 1 : rcx : state
+-ENTRY(sha256_mb_mgr_flush_avx2)
++RAP_ENTRY(sha256_mb_mgr_flush_avx2)
+       FRAME_BEGIN
+         push    %rbx
+@@ -225,7 +225,7 @@ ENDPROC(sha256_mb_mgr_flush_avx2)
+ ##############################################################################
+ .align 16
+-ENTRY(sha256_mb_mgr_get_comp_job_avx2)
++RAP_ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+       push    %rbx
+       ## if bit 32+3 is set, then all lanes are empty
+diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+index 7ea670e..5aa297a 100644
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+@@ -96,7 +96,7 @@ lane_data    = %r10
+ # JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
+ # arg 1 : rcx : state
+ # arg 2 : rdx : job
+-ENTRY(sha256_mb_mgr_submit_avx2)
++RAP_ENTRY(sha256_mb_mgr_submit_avx2)
+       FRAME_BEGIN
+       push    %rbx
+       push    %r12
+diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
+index 2cedc44..6fb8582 100644
+--- a/arch/x86/crypto/sha256-ssse3-asm.S
++++ b/arch/x86/crypto/sha256-ssse3-asm.S
+@@ -47,6 +47,7 @@
+ ########################################################################
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ ## assume buffers not aligned
+ #define    MOVDQ movdqu
+@@ -352,9 +353,7 @@ a = TMP_
+ ## arg 2 : pointer to input data
+ ## arg 3 : Num blocks
+ ########################################################################
+-.text
+-ENTRY(sha256_transform_ssse3)
+-.align 32
++RAP_ENTRY(sha256_transform_ssse3)
+       pushq   %rbx
+       pushq   %rbp
+       pushq   %r13
+@@ -471,6 +470,7 @@ done_hash:
+       popq    %rbp
+       popq    %rbx
++      pax_force_retaddr
+       ret
+ ENDPROC(sha256_transform_ssse3)
+diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
+index 748cdf2..959bb4d 100644
+--- a/arch/x86/crypto/sha256_ni_asm.S
++++ b/arch/x86/crypto/sha256_ni_asm.S
+@@ -97,7 +97,7 @@
+ .text
+ .align 32
+-ENTRY(sha256_ni_transform)
++RAP_ENTRY(sha256_ni_transform)
+       shl             $6, NUM_BLKS            /*  convert to bytes */
+       jz              .Ldone_hash
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index 9e79baf..c5186c74 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -40,9 +40,9 @@
+ #include <asm/fpu/api.h>
+ #include <linux/string.h>
+-asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
+-                                     u64 rounds);
+-typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds);
++asmlinkage void sha256_transform_ssse3(struct sha256_state *digest, const u8 *data,
++                                     int rounds);
++typedef void (sha256_transform_fn)(struct sha256_state *digest, const u8 *data, int rounds);
+ static int sha256_update(struct shash_desc *desc, const u8 *data,
+                        unsigned int len, sha256_transform_fn *sha256_xform)
+@@ -57,8 +57,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
+       BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+       kernel_fpu_begin();
+-      sha256_base_do_update(desc, data, len,
+-                            (sha256_block_fn *)sha256_xform);
++      sha256_base_do_update(desc, data, len, sha256_xform);
+       kernel_fpu_end();
+       return 0;
+@@ -72,9 +71,8 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
+       kernel_fpu_begin();
+       if (len)
+-              sha256_base_do_update(desc, data, len,
+-                                    (sha256_block_fn *)sha256_xform);
+-      sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_xform);
++              sha256_base_do_update(desc, data, len, sha256_xform);
++      sha256_base_do_finalize(desc, sha256_xform);
+       kernel_fpu_end();
+       return sha256_base_finish(desc, out);
+@@ -146,8 +144,8 @@ static void unregister_sha256_ssse3(void)
+ }
+ #ifdef CONFIG_AS_AVX
+-asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
+-                                   u64 rounds);
++asmlinkage void sha256_transform_avx(struct sha256_state *digest, const u8 *data,
++                                   int rounds);
+ static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
+                        unsigned int len)
+@@ -230,8 +228,8 @@ static inline void unregister_sha256_avx(void) { }
+ #endif
+ #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
+-asmlinkage void sha256_transform_rorx(u32 *digest, const char *data,
+-                                    u64 rounds);
++asmlinkage void sha256_transform_rorx(struct sha256_state *digest, const u8 *data,
++                                    int rounds);
+ static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
+                        unsigned int len)
+@@ -312,8 +310,8 @@ static inline void unregister_sha256_avx2(void) { }
+ #endif
+ #ifdef CONFIG_AS_SHA256_NI
+-asmlinkage void sha256_ni_transform(u32 *digest, const char *data,
+-                                 u64 rounds); /*unsigned int rounds);*/
++asmlinkage void sha256_ni_transform(struct sha256_state *digest, const u8 *data,
++                                 int rounds); /*unsigned int rounds);*/
+ static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
+                        unsigned int len)
+diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
+index 565274d..779d34a 100644
+--- a/arch/x86/crypto/sha512-avx-asm.S
++++ b/arch/x86/crypto/sha512-avx-asm.S
+@@ -49,6 +49,7 @@
+ #ifdef CONFIG_AS_AVX
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .text
+@@ -277,7 +278,8 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
+ # message blocks.
+ # L is the message length in SHA512 blocks
+ ########################################################################
+-ENTRY(sha512_transform_avx)
++ALIGN
++RAP_ENTRY(sha512_transform_avx)
+       cmp $0, msglen
+       je nowork
+@@ -364,6 +366,7 @@ updateblock:
+       mov     frame_RSPSAVE(%rsp), %rsp
+ nowork:
++      pax_force_retaddr
+       ret
+ ENDPROC(sha512_transform_avx)
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index 1f20b35..ab1f3a8 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -51,6 +51,7 @@
+ #ifdef CONFIG_AS_AVX2
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .text
+@@ -568,7 +569,8 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
+ #   message blocks.
+ # L is the message length in SHA512 blocks
+ ########################################################################
+-ENTRY(sha512_transform_rorx)
++ALIGN
++RAP_ENTRY(sha512_transform_rorx)
+       # Allocate Stack Space
+       mov     %rsp, %rax
+       sub     $frame_size, %rsp
+@@ -678,6 +680,7 @@ done_hash:
+       # Restore Stack Pointer
+       mov     frame_RSPSAVE(%rsp), %rsp
++      pax_force_retaddr
+       ret
+ ENDPROC(sha512_transform_rorx)
+diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+index 3ddba19..2d3abc7 100644
+--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+@@ -107,7 +107,7 @@ offset = \_offset
+ # JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
+ # arg 1 : rcx : state
+-ENTRY(sha512_mb_mgr_flush_avx2)
++RAP_ENTRY(sha512_mb_mgr_flush_avx2)
+       FRAME_BEGIN
+       push    %rbx
+@@ -220,7 +220,7 @@ return_null:
+ ENDPROC(sha512_mb_mgr_flush_avx2)
+ .align 16
+-ENTRY(sha512_mb_mgr_get_comp_job_avx2)
++RAP_ENTRY(sha512_mb_mgr_get_comp_job_avx2)
+         push    %rbx
+       mov     _unused_lanes(state), unused_lanes
+diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+index 815f07b..70fbc7b 100644
+--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
++++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+@@ -98,7 +98,7 @@
+ # JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
+ # arg 1 : rcx : state
+ # arg 2 : rdx : job
+-ENTRY(sha512_mb_mgr_submit_avx2)
++RAP_ENTRY(sha512_mb_mgr_submit_avx2)
+       FRAME_BEGIN
+       push    %rbx
+       push    %r12
+diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
+index e610e29..83f1cde 100644
+--- a/arch/x86/crypto/sha512-ssse3-asm.S
++++ b/arch/x86/crypto/sha512-ssse3-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .text
+@@ -275,7 +276,8 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
+ #   message blocks.
+ # L is the message length in SHA512 blocks.
+ ########################################################################
+-ENTRY(sha512_transform_ssse3)
++ALIGN
++RAP_ENTRY(sha512_transform_ssse3)
+       cmp $0, msglen
+       je nowork
+@@ -363,6 +365,7 @@ updateblock:
+       mov     frame_RSPSAVE(%rsp), %rsp
+ nowork:
++      pax_force_retaddr
+       ret
+ ENDPROC(sha512_transform_ssse3)
+diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
+index 2b0e2a6..59a1f94 100644
+--- a/arch/x86/crypto/sha512_ssse3_glue.c
++++ b/arch/x86/crypto/sha512_ssse3_glue.c
+@@ -39,10 +39,10 @@
+ #include <linux/string.h>
+-asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
+-                                     u64 rounds);
++asmlinkage void sha512_transform_ssse3(struct sha512_state *digest, const u8 *data,
++                                     int rounds);
+-typedef void (sha512_transform_fn)(u64 *digest, const char *data, u64 rounds);
++typedef void (sha512_transform_fn)(struct sha512_state *digest, const u8 *data, int rounds);
+ static int sha512_update(struct shash_desc *desc, const u8 *data,
+                      unsigned int len, sha512_transform_fn *sha512_xform)
+@@ -57,8 +57,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
+       BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
+       kernel_fpu_begin();
+-      sha512_base_do_update(desc, data, len,
+-                            (sha512_block_fn *)sha512_xform);
++      sha512_base_do_update(desc, data, len, sha512_xform);
+       kernel_fpu_end();
+       return 0;
+@@ -72,9 +71,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
+       kernel_fpu_begin();
+       if (len)
+-              sha512_base_do_update(desc, data, len,
+-                                    (sha512_block_fn *)sha512_xform);
+-      sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_xform);
++              sha512_base_do_update(desc, data, len, sha512_xform);
++      sha512_base_do_finalize(desc, sha512_xform);
+       kernel_fpu_end();
+       return sha512_base_finish(desc, out);
+@@ -146,8 +144,8 @@ static void unregister_sha512_ssse3(void)
+ }
+ #ifdef CONFIG_AS_AVX
+-asmlinkage void sha512_transform_avx(u64 *digest, const char *data,
+-                                   u64 rounds);
++asmlinkage void sha512_transform_avx(struct sha512_state *digest, const u8 *data,
++                                   int rounds);
+ static bool avx_usable(void)
+ {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+@@ -229,8 +227,8 @@ static inline void unregister_sha512_avx(void) { }
+ #endif
+ #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
+-asmlinkage void sha512_transform_rorx(u64 *digest, const char *data,
+-                                    u64 rounds);
++asmlinkage void sha512_transform_rorx(struct sha512_state *digest, const u8 *data,
++                                    int rounds);
+ static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
+                      unsigned int len)
+diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+index dc66273..30aba4b 100644
+--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+@@ -25,6 +25,7 @@
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+ .file "twofish-avx-x86_64-asm_64.S"
+@@ -285,6 +286,7 @@ __twofish_enc_blk8:
+       outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
+       outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__twofish_enc_blk8)
+@@ -325,10 +327,11 @@ __twofish_dec_blk8:
+       outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
+       outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
++      pax_force_retaddr
+       ret;
+ ENDPROC(__twofish_dec_blk8)
+-ENTRY(twofish_ecb_enc_8way)
++RAP_ENTRY(twofish_ecb_enc_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -345,10 +348,11 @@ ENTRY(twofish_ecb_enc_8way)
+       store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(twofish_ecb_enc_8way)
+-ENTRY(twofish_ecb_dec_8way)
++RAP_ENTRY(twofish_ecb_dec_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -365,10 +369,11 @@ ENTRY(twofish_ecb_dec_8way)
+       store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(twofish_ecb_dec_8way)
+-ENTRY(twofish_cbc_dec_8way)
++RAP_ENTRY(twofish_cbc_dec_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -376,24 +381,25 @@ ENTRY(twofish_cbc_dec_8way)
+        */
+       FRAME_BEGIN
+-      pushq %r12;
++      pushq %r14;
+       movq %rsi, %r11;
+-      movq %rdx, %r12;
++      movq %rdx, %r14;
+       load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+       call __twofish_dec_blk8;
+-      store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++      store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+-      popq %r12;
++      popq %r14;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(twofish_cbc_dec_8way)
+-ENTRY(twofish_ctr_8way)
++RAP_ENTRY(twofish_ctr_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -402,25 +408,26 @@ ENTRY(twofish_ctr_8way)
+        */
+       FRAME_BEGIN
+-      pushq %r12;
++      pushq %r14;
+       movq %rsi, %r11;
+-      movq %rdx, %r12;
++      movq %rdx, %r14;
+       load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+                     RD2, RX0, RX1, RY0);
+       call __twofish_enc_blk8;
+-      store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
++      store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+-      popq %r12;
++      popq %r14;
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(twofish_ctr_8way)
+-ENTRY(twofish_xts_enc_8way)
++RAP_ENTRY(twofish_xts_enc_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -441,10 +448,11 @@ ENTRY(twofish_xts_enc_8way)
+       store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(twofish_xts_enc_8way)
+-ENTRY(twofish_xts_dec_8way)
++RAP_ENTRY(twofish_xts_dec_8way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -465,5 +473,6 @@ ENTRY(twofish_xts_dec_8way)
+       store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       FRAME_END
++      pax_force_retaddr
+       ret;
+ ENDPROC(twofish_xts_dec_8way)
+diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S
+index 694ea45..f2c1418 100644
+--- a/arch/x86/crypto/twofish-i586-asm_32.S
++++ b/arch/x86/crypto/twofish-i586-asm_32.S
+@@ -220,7 +220,7 @@
+       xor     %esi,           d ## D;\
+       ror     $1,             d ## D;
+-ENTRY(twofish_enc_blk)
++RAP_ENTRY(twofish_enc_blk)
+       push    %ebp                    /* save registers according to calling convention*/
+       push    %ebx
+       push    %esi
+@@ -276,7 +276,7 @@ ENTRY(twofish_enc_blk)
+       ret
+ ENDPROC(twofish_enc_blk)
+-ENTRY(twofish_dec_blk)
++RAP_ENTRY(twofish_dec_blk)
+       push    %ebp                    /* save registers according to calling convention*/
+       push    %ebx
+       push    %esi
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+index 1c3b7ce..c9912c7 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+@@ -21,6 +21,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ .file "twofish-x86_64-asm-3way.S"
+ .text
+@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
+       popq %r13;
+       popq %r14;
+       popq %r15;
++      pax_force_retaddr
+       ret;
+ .L__enc_xor3:
+@@ -269,10 +271,11 @@ ENTRY(__twofish_enc_blk_3way)
+       popq %r13;
+       popq %r14;
+       popq %r15;
++      pax_force_retaddr
+       ret;
+ ENDPROC(__twofish_enc_blk_3way)
+-ENTRY(twofish_dec_blk_3way)
++RAP_ENTRY(twofish_dec_blk_3way)
+       /* input:
+        *      %rdi: ctx, CTX
+        *      %rsi: dst
+@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
+       popq %r13;
+       popq %r14;
+       popq %r15;
++      pax_force_retaddr
+       ret;
+ ENDPROC(twofish_dec_blk_3way)
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
+index a350c99..080c5ab 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
+@@ -22,6 +22,7 @@
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+ #define a_offset      0
+ #define b_offset      4
+@@ -215,7 +216,7 @@
+       xor     %r8d,           d ## D;\
+       ror     $1,             d ## D;
+-ENTRY(twofish_enc_blk)
++RAP_ENTRY(twofish_enc_blk)
+       pushq    R1
+       /* %rdi contains the ctx address */
+@@ -265,10 +266,11 @@ ENTRY(twofish_enc_blk)
+       popq    R1
+       movl    $1,%eax
++      pax_force_retaddr
+       ret
+ ENDPROC(twofish_enc_blk)
+-ENTRY(twofish_dec_blk)
++RAP_ENTRY(twofish_dec_blk)
+       pushq    R1
+       /* %rdi contains the ctx address */
+@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
+       popq    R1
+       movl    $1,%eax
++      pax_force_retaddr
+       ret
+ ENDPROC(twofish_dec_blk)
+diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
+index b7a3904b..3e4d0d6 100644
+--- a/arch/x86/crypto/twofish_avx_glue.c
++++ b/arch/x86/crypto/twofish_avx_glue.c
+@@ -46,24 +46,25 @@
+ #define TWOFISH_PARALLEL_BLOCKS 8
+ /* 8-way parallel cipher functions */
+-asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_ecb_enc_8way(void *ctx, u8 *dst,
+                                    const u8 *src);
+-asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_ecb_dec_8way(void *ctx, u8 *dst,
+                                    const u8 *src);
+-asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_cbc_dec_8way(void *ctx, u8 *dst,
+                                    const u8 *src);
+-asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst,
+-                               const u8 *src, le128 *iv);
++asmlinkage void twofish_ctr_8way(void *ctx, u128 *dst,
++                               const u128 *src, le128 *iv);
+-asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
+-                                   const u8 *src, le128 *iv);
+-asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
+-                                   const u8 *src, le128 *iv);
++asmlinkage void twofish_xts_enc_8way(void *ctx, u128 *dst,
++                                   const u128 *src, le128 *iv);
++asmlinkage void twofish_xts_dec_8way(void *ctx, u128 *dst,
++                                   const u128 *src, le128 *iv);
+-static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
++static inline void twofish_enc_blk_3way(void *_ctx, u8 *dst,
+                                       const u8 *src)
+ {
++      struct twofish_ctx *ctx = _ctx;
+       __twofish_enc_blk_3way(ctx, dst, src, false);
+ }
+diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
+index 77e06c2..a45c27b 100644
+--- a/arch/x86/crypto/twofish_glue.c
++++ b/arch/x86/crypto/twofish_glue.c
+@@ -44,10 +44,10 @@
+ #include <linux/module.h>
+ #include <linux/types.h>
+-asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_enc_blk(void *ctx, u8 *dst,
+                               const u8 *src);
+ EXPORT_SYMBOL_GPL(twofish_enc_blk);
+-asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_dec_blk(void *ctx, u8 *dst,
+                               const u8 *src);
+ EXPORT_SYMBOL_GPL(twofish_dec_blk);
+diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
+index 2ebb5e9..a0b0aa9 100644
+--- a/arch/x86/crypto/twofish_glue_3way.c
++++ b/arch/x86/crypto/twofish_glue_3way.c
+@@ -36,21 +36,21 @@
+ EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way);
+ EXPORT_SYMBOL_GPL(twofish_dec_blk_3way);
+-static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
++static inline void twofish_enc_blk_3way(void *ctx, u8 *dst,
+                                       const u8 *src)
+ {
+       __twofish_enc_blk_3way(ctx, dst, src, false);
+ }
+-static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
++static inline void twofish_enc_blk_xor_3way(void *ctx, u8 *dst,
+                                           const u8 *src)
+ {
+       __twofish_enc_blk_3way(ctx, dst, src, true);
+ }
+-void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src)
++void twofish_dec_blk_cbc_3way(void *ctx, u8 *_dst, const u8 *_src)
+ {
+-      u128 ivs[2];
++      u128 ivs[2], *dst = (u128 *)_dst, *src = (u128 *)_src;
+       ivs[0] = src[0];
+       ivs[1] = src[1];
+@@ -118,10 +118,10 @@ static const struct common_glue_ctx twofish_ctr = {
+       .funcs = { {
+               .num_blocks = 3,
+-              .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) }
++              .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) }
+       }, {
+               .num_blocks = 1,
+-              .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) }
++              .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) }
+       } }
+ };
+diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
+index 77f28ce..7714ca0 100644
+--- a/arch/x86/entry/Makefile
++++ b/arch/x86/entry/Makefile
+@@ -15,3 +15,5 @@ obj-y                                += vsyscall/
+ obj-$(CONFIG_IA32_EMULATION)  += entry_64_compat.o syscall_32.o
++CFLAGS_REMOVE_syscall_32.o = $(RAP_PLUGIN_ABS_CFLAGS)
++CFLAGS_REMOVE_syscall_64.o = $(RAP_PLUGIN_ABS_CFLAGS)
+diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
+index 9a9e588..b900d1c 100644
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -95,23 +95,26 @@ For 32-bit we have the following conventions - kernel is built with
+       .endm
+       .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      movq %r12, R12+\offset(%rsp)
++#endif
+       .if \r11
+-      movq %r11, 6*8+\offset(%rsp)
++      movq %r11, R11+\offset(%rsp)
+       .endif
+       .if \r8910
+-      movq %r10, 7*8+\offset(%rsp)
+-      movq %r9,  8*8+\offset(%rsp)
+-      movq %r8,  9*8+\offset(%rsp)
++      movq %r10, R10+\offset(%rsp)
++      movq %r9,  R9+\offset(%rsp)
++      movq %r8,  R8+\offset(%rsp)
+       .endif
+       .if \rax
+-      movq %rax, 10*8+\offset(%rsp)
++      movq %rax, RAX+\offset(%rsp)
+       .endif
+       .if \rcx
+-      movq %rcx, 11*8+\offset(%rsp)
++      movq %rcx, RCX+\offset(%rsp)
+       .endif
+-      movq %rdx, 12*8+\offset(%rsp)
+-      movq %rsi, 13*8+\offset(%rsp)
+-      movq %rdi, 14*8+\offset(%rsp)
++      movq %rdx, RDX+\offset(%rsp)
++      movq %rsi, RSI+\offset(%rsp)
++      movq %rdi, RDI+\offset(%rsp)
+       .endm
+       .macro SAVE_C_REGS offset=0
+       SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
+@@ -130,67 +133,78 @@ For 32-bit we have the following conventions - kernel is built with
+       .endm
+       .macro SAVE_EXTRA_REGS offset=0
+-      movq %r15, 0*8+\offset(%rsp)
+-      movq %r14, 1*8+\offset(%rsp)
+-      movq %r13, 2*8+\offset(%rsp)
+-      movq %r12, 3*8+\offset(%rsp)
+-      movq %rbp, 4*8+\offset(%rsp)
+-      movq %rbx, 5*8+\offset(%rsp)
++      movq %r15, R15+\offset(%rsp)
++      movq %r14, R14+\offset(%rsp)
++      movq %r13, R13+\offset(%rsp)
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      movq %r12, R12+\offset(%rsp)
++#endif
++      movq %rbp, RBP+\offset(%rsp)
++      movq %rbx, RBX+\offset(%rsp)
+       .endm
+       .macro RESTORE_EXTRA_REGS offset=0
+-      movq 0*8+\offset(%rsp), %r15
+-      movq 1*8+\offset(%rsp), %r14
+-      movq 2*8+\offset(%rsp), %r13
+-      movq 3*8+\offset(%rsp), %r12
+-      movq 4*8+\offset(%rsp), %rbp
+-      movq 5*8+\offset(%rsp), %rbx
++      movq R15+\offset(%rsp), %r15
++      movq R14+\offset(%rsp), %r14
++      movq R13+\offset(%rsp), %r13
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      movq R12+\offset(%rsp), %r12
++#endif
++      movq RBP+\offset(%rsp), %rbp
++      movq RBX+\offset(%rsp), %rbx
+       .endm
+       .macro ZERO_EXTRA_REGS
+       xorl    %r15d, %r15d
+       xorl    %r14d, %r14d
+       xorl    %r13d, %r13d
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+       xorl    %r12d, %r12d
++#endif
+       xorl    %ebp, %ebp
+       xorl    %ebx, %ebx
+       .endm
+-      .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
++      .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1, rstor_r12=1
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      .if \rstor_r12
++      movq R12(%rsp), %r12
++      .endif
++#endif
+       .if \rstor_r11
+-      movq 6*8(%rsp), %r11
++      movq R11(%rsp), %r11
+       .endif
+       .if \rstor_r8910
+-      movq 7*8(%rsp), %r10
+-      movq 8*8(%rsp), %r9
+-      movq 9*8(%rsp), %r8
++      movq R10(%rsp), %r10
++      movq R9(%rsp), %r9
++      movq R8(%rsp), %r8
+       .endif
+       .if \rstor_rax
+-      movq 10*8(%rsp), %rax
++      movq RAX(%rsp), %rax
+       .endif
+       .if \rstor_rcx
+-      movq 11*8(%rsp), %rcx
++      movq RCX(%rsp), %rcx
+       .endif
+       .if \rstor_rdx
+-      movq 12*8(%rsp), %rdx
++      movq RDX(%rsp), %rdx
+       .endif
+-      movq 13*8(%rsp), %rsi
+-      movq 14*8(%rsp), %rdi
++      movq RSI(%rsp), %rsi
++      movq RDI(%rsp), %rdi
+       .endm
+       .macro RESTORE_C_REGS
+-      RESTORE_C_REGS_HELPER 1,1,1,1,1
++      RESTORE_C_REGS_HELPER 1,1,1,1,1,1
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_RAX
+-      RESTORE_C_REGS_HELPER 0,1,1,1,1
++      RESTORE_C_REGS_HELPER 0,1,1,1,1,0
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_RCX
+-      RESTORE_C_REGS_HELPER 1,0,1,1,1
++      RESTORE_C_REGS_HELPER 1,0,1,1,1,0
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_R11
+-      RESTORE_C_REGS_HELPER 1,1,0,1,1
++      RESTORE_C_REGS_HELPER 1,1,0,1,1,1
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_RCX_R11
+-      RESTORE_C_REGS_HELPER 1,0,0,1,1
++      RESTORE_C_REGS_HELPER 1,0,0,1,1,1
+       .endm
+       .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 1433f6b..dac4cbe 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -33,9 +33,7 @@
+ static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
+ {
+-      unsigned long top_of_stack =
+-              (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
+-      return (struct thread_info *)(top_of_stack - THREAD_SIZE);
++      return current_thread_info();
+ }
+ #ifdef CONFIG_CONTEXT_TRACKING
+@@ -49,6 +47,12 @@ __visible inline void enter_from_user_mode(void)
+ static inline void enter_from_user_mode(void) {}
+ #endif
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++asmlinkage void pax_erase_kstack(void);
++#else
++static void pax_erase_kstack(void) {}
++#endif
++
+ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
+ {
+ #ifdef CONFIG_X86_64
+@@ -63,6 +67,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
+       }
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+  * Returns the syscall nr to run (which should match regs->orig_ax) or -1
+  * to skip the syscall.
+@@ -81,12 +89,19 @@ static long syscall_trace_enter(struct pt_regs *regs)
+       work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       if (unlikely(work & _TIF_SYSCALL_EMU))
+               emulated = true;
+       if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
+-          tracehook_report_syscall_entry(regs))
++          tracehook_report_syscall_entry(regs)) {
++              pax_erase_kstack();
+               return -1L;
++      }
+       if (emulated)
+               return -1L;
+@@ -121,8 +136,10 @@ static long syscall_trace_enter(struct pt_regs *regs)
+               }
+               ret = __secure_computing(&sd);
+-              if (ret == -1)
++              if (ret == -1) {
++                      pax_erase_kstack();
+                       return ret;
++              }
+       }
+ #endif
+@@ -131,6 +148,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
+       do_audit_syscall_entry(regs, arch);
++      pax_erase_kstack();
+       return ret ?: regs->orig_ax;
+ }
+@@ -237,7 +255,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
+       step = unlikely(
+               (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
+               == _TIF_SINGLESTEP);
+-      if (step || cached_flags & _TIF_SYSCALL_TRACE)
++      if (step || (cached_flags & _TIF_SYSCALL_TRACE))
+               tracehook_report_syscall_exit(regs, step);
+ }
+@@ -256,6 +274,11 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
+           WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
+               local_irq_enable();
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++              gr_delayed_cred_worker();
++#endif
++
+       /*
+        * First do one-time work.  If these work items are enabled, we
+        * want to run them exactly once per syscall exit with IRQs on.
+@@ -285,9 +308,29 @@ __visible void do_syscall_64(struct pt_regs *regs)
+        * regs->orig_ax, which changes the behavior of some syscalls.
+        */
+       if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
++#ifdef CONFIG_PAX_RAP
++              asm volatile("movq %[param1],%%rdi\n\t"
++                           "movq %[param2],%%rsi\n\t"
++                           "movq %[param3],%%rdx\n\t"
++                           "movq %[param4],%%rcx\n\t"
++                           "movq %[param5],%%r8\n\t"
++                           "movq %[param6],%%r9\n\t"
++                           "call *%P[syscall]\n\t"
++                           "mov %%rax,%[result]\n\t"
++                      : [result] "=m" (regs->ax)
++                      : [syscall] "m" (sys_call_table[nr & __SYSCALL_MASK]),
++                        [param1] "m" (regs->di),
++                        [param2] "m" (regs->si),
++                        [param3] "m" (regs->dx),
++                        [param4] "m" (regs->r10),
++                        [param5] "m" (regs->r8),
++                        [param6] "m" (regs->r9)
++                      : "ax", "di", "si", "dx", "cx", "r8", "r9", "r10", "r11", "memory");
++#else
+               regs->ax = sys_call_table[nr & __SYSCALL_MASK](
+                       regs->di, regs->si, regs->dx,
+                       regs->r10, regs->r8, regs->r9);
++#endif
+       }
+       syscall_return_slowpath(regs);
+@@ -327,10 +370,51 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
+                * the high bits are zero.  Make sure we zero-extend all
+                * of the args.
+                */
++#ifdef CONFIG_PAX_RAP
++#ifdef CONFIG_X86_64
++              asm volatile("movl %[param1],%%edi\n\t"
++                           "movl %[param2],%%esi\n\t"
++                           "movl %[param3],%%edx\n\t"
++                           "movl %[param4],%%ecx\n\t"
++                           "movl %[param5],%%r8d\n\t"
++                           "movl %[param6],%%r9d\n\t"
++                           "call *%P[syscall]\n\t"
++                           "mov %%rax,%[result]\n\t"
++                      : [result] "=m" (regs->ax)
++                      : [syscall] "m" (ia32_sys_call_table[nr]),
++                        [param1] "m" (regs->bx),
++                        [param2] "m" (regs->cx),
++                        [param3] "m" (regs->dx),
++                        [param4] "m" (regs->si),
++                        [param5] "m" (regs->di),
++                        [param6] "m" (regs->bp)
++                      : "ax", "di", "si", "dx", "cx", "r8", "r9", "r10", "r11", "memory");
++#else
++              asm volatile("pushl %[param6]\n\t"
++                           "pushl %[param5]\n\t"
++                           "pushl %[param4]\n\t"
++                           "pushl %[param3]\n\t"
++                           "pushl %[param2]\n\t"
++                           "pushl %[param1]\n\t"
++                           "call *%P[syscall]\n\t"
++                           "addl $6*8,%%esp\n\t"
++                           "mov %%eax,%[result]\n\t"
++                      : [result] "=m" (regs->ax)
++                      : [syscall] "m" (ia32_sys_call_table[nr]),
++                        [param1] "m" (regs->bx),
++                        [param2] "m" (regs->cx),
++                        [param3] "m" (regs->dx),
++                        [param4] "m" (regs->si),
++                        [param5] "m" (regs->di),
++                        [param6] "m" (regs->bp)
++                      : "ax", "dx", "cx", "memory");
++#endif
++#else
+               regs->ax = ia32_sys_call_table[nr](
+                       (unsigned int)regs->bx, (unsigned int)regs->cx,
+                       (unsigned int)regs->dx, (unsigned int)regs->si,
+                       (unsigned int)regs->di, (unsigned int)regs->bp);
++#endif
+       }
+       syscall_return_slowpath(regs);
+@@ -354,6 +438,7 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
+       unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
+               vdso_image_32.sym_int80_landing_pad;
++      u32 __user *saved_bp = (u32 __force_user *)(unsigned long)(u32)regs->sp;
+       /*
+        * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
+@@ -373,11 +458,9 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
+                * Micro-optimization: the pointer we're following is explicitly
+                * 32 bits, so it can't be out of range.
+                */
+-              __get_user(*(u32 *)&regs->bp,
+-                          (u32 __user __force *)(unsigned long)(u32)regs->sp)
++              __get_user_nocheck(*(u32 *)&regs->bp, saved_bp, sizeof(u32))
+ #else
+-              get_user(*(u32 *)&regs->bp,
+-                       (u32 __user __force *)(unsigned long)(u32)regs->sp)
++              get_user(regs->bp, saved_bp)
+ #endif
+               ) {
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 0b56666..92043f9 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -147,13 +147,157 @@
+       movl    \reg, PT_GS(%esp)
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+       movl    $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++      movl $(__USER_DS), \reg
++#else
++      xorl \reg, \reg
++#endif
++
+       movl    \reg, %gs
+ .endm
+ #endif /* CONFIG_X86_32_LAZY_GS */
+-.macro SAVE_ALL pt_regs_ax=%eax
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++      call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++      call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++      pushl %eax
++      pushl %ecx
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++      mov %eax, %esi
++#else
++      mov %cr0, %esi
++#endif
++      bts $X86_CR0_WP_BIT, %esi
++      jnc 1f
++      mov %cs, %esi
++      cmp $__KERNEL_CS, %esi
++      jz 3f
++      ljmp $__KERNEL_CS, $3f
++1:    ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++      mov %esi, %eax
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++      mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++      popl %ecx
++      popl %eax
++#endif
++      ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++      pushl %eax
++      pushl %ecx
++#endif
++      mov %cs, %esi
++      cmp $__KERNEXEC_KERNEL_CS, %esi
++      jnz 2f
++#ifdef CONFIG_PARAVIRT
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++      mov %eax, %esi
++#else
++      mov %cr0, %esi
++#endif
++      btr $X86_CR0_WP_BIT, %esi
++      ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++      mov %esi, %eax
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++      mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++      popl %ecx
++      popl %eax
++#endif
++      ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++      .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++      call pax_erase_kstack
++#endif
++      .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ */
++ENTRY(pax_erase_kstack)
++      pushl %edi
++      pushl %ecx
++      pushl %eax
++      pushl %ebp
++
++      GET_THREAD_INFO(%ebp)
++      mov TI_lowest_stack(%ebp), %edi
++      mov $-0xBEEF, %eax
++      std
++
++1:    mov %edi, %ecx
++      and $THREAD_SIZE_asm - 1, %ecx
++      shr $2, %ecx
++      repne scasl
++      jecxz 2f
++
++      cmp $2*16, %ecx
++      jc 2f
++
++      mov $2*16, %ecx
++      repe scasl
++      jecxz 2f
++      jne 1b
++
++2:    cld
++      or $2*4, %edi
++      mov %esp, %ecx
++      sub %edi, %ecx
++
++      cmp $THREAD_SIZE_asm, %ecx
++      jb 3f
++      ud2
++3:
++
++      shr $2, %ecx
++      rep stosl
++
++      mov TI_task_thread_sp0(%ebp), %edi
++      sub $128, %edi
++      mov %edi, TI_lowest_stack(%ebp)
++
++      popl %ebp
++      popl %eax
++      popl %ecx
++      popl %edi
++      ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL pt_regs_ax, _DS
+       cld
+       PUSH_GS
+       pushl   %fs
+@@ -166,7 +310,7 @@
+       pushl   %edx
+       pushl   %ecx
+       pushl   %ebx
+-      movl    $(__USER_DS), %edx
++      movl    $\_DS, %edx
+       movl    %edx, %ds
+       movl    %edx, %es
+       movl    $(__KERNEL_PERCPU), %edx
+@@ -174,6 +318,15 @@
+       SET_KERNEL_GS %edx
+ .endm
++.macro SAVE_ALL pt_regs_ax=%eax
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      __SAVE_ALL \pt_regs_ax, __KERNEL_DS
++      pax_enter_kernel
++#else
++      __SAVE_ALL \pt_regs_ax, __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+       popl    %ebx
+       popl    %ecx
+@@ -213,7 +366,7 @@ ENTRY(ret_from_fork)
+       movl    %esp, %eax
+       call    syscall_return_slowpath
+       jmp     restore_all
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+ ENTRY(ret_from_kernel_thread)
+       pushl   %eax
+@@ -257,15 +410,23 @@ ret_from_intr:
+       andl    $SEGMENT_RPL_MASK, %eax
+ #endif
+       cmpl    $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++      jae     resume_userspace
++
++      pax_exit_kernel
++      jmp     resume_kernel
++#else
+       jb      resume_kernel                   # not returning to v8086 or userspace
++#endif
+ ENTRY(resume_userspace)
+       DISABLE_INTERRUPTS(CLBR_ANY)
+       TRACE_IRQS_OFF
+       movl    %esp, %eax
+       call    prepare_exit_to_usermode
+-      jmp     restore_all
+-END(ret_from_exception)
++      jmp     .Lsyscall_32_done
++ENDPROC(ret_from_exception)
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -277,7 +438,7 @@ need_resched:
+       jz      restore_all
+       call    preempt_schedule_irq
+       jmp     need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
+ GLOBAL(__begin_SYSENTER_singlestep_region)
+@@ -344,6 +505,10 @@ sysenter_past_esp:
+       pushl   %eax                    /* pt_regs->orig_ax */
+       SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest */
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       /*
+        * SYSENTER doesn't filter flags, so we need to clear NT, AC
+        * and TF ourselves.  To save a few cycles, we can check whether
+@@ -379,11 +544,20 @@ sysenter_past_esp:
+       ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
+                   "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
++#ifdef CONFIG_PAX_RANDKSTACK
++      movl    %esp, %eax
++      call    pax_randomize_kstack
++#endif
++
++      pax_erase_kstack
++
+ /* Opportunistic SYSEXIT */
+       TRACE_IRQS_ON                   /* User mode traces as IRQs on. */
+       movl    PT_EIP(%esp), %edx      /* pt_regs->ip */
+       movl    PT_OLDESP(%esp), %ecx   /* pt_regs->sp */
+ 1:    mov     PT_FS(%esp), %fs
++2:    mov     PT_DS(%esp), %ds
++3:    mov     PT_ES(%esp), %es
+       PTGS_TO_GS
+       popl    %ebx                    /* pt_regs->bx */
+       addl    $2*4, %esp              /* skip pt_regs->cx and pt_regs->dx */
+@@ -409,10 +583,16 @@ sysenter_past_esp:
+       sysexit
+ .pushsection .fixup, "ax"
+-2:    movl    $0, PT_FS(%esp)
++4:    movl    $0, PT_FS(%esp)
++      jmp     1b
++5:    movl    $0, PT_DS(%esp)
++      jmp     1b
++6:    movl    $0, PT_ES(%esp)
+       jmp     1b
+ .popsection
+-      _ASM_EXTABLE(1b, 2b)
++      _ASM_EXTABLE(1b, 4b)
++      _ASM_EXTABLE(2b, 5b)
++      _ASM_EXTABLE(3b, 6b)
+       PTGS_TO_GS_EX
+ .Lsysenter_fix_flags:
+@@ -455,6 +635,10 @@ ENTRY(entry_INT80_32)
+       pushl   %eax                    /* pt_regs->orig_ax */
+       SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest */
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       /*
+        * User mode is traced as though IRQs are on, and the interrupt gate
+        * turned them off.
+@@ -465,6 +649,13 @@ ENTRY(entry_INT80_32)
+       call    do_int80_syscall_32
+ .Lsyscall_32_done:
++#ifdef CONFIG_PAX_RANDKSTACK
++      movl    %esp, %eax
++      call    pax_randomize_kstack
++#endif
++
++      pax_erase_kstack
++
+ restore_all:
+       TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -508,14 +699,34 @@ ldt_ss:
+  * compensating for the offset by changing to the ESPFIX segment with
+  * a base address that matches for the difference.
+  */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+       mov     %esp, %edx                      /* load kernel esp */
+       mov     PT_OLDESP(%esp), %eax           /* load userspace esp */
+       mov     %dx, %ax                        /* eax: new kernel esp */
+       sub     %eax, %edx                      /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++      movl    PER_CPU_VAR(cpu_number), %ebx
++      shll    $PAGE_SHIFT_asm, %ebx
++      addl    $cpu_gdt_table, %ebx
++#else
++      movl    $cpu_gdt_table, %ebx
++#endif
+       shr     $16, %edx
+-      mov     %dl, GDT_ESPFIX_SS + 4          /* bits 16..23 */
+-      mov     %dh, GDT_ESPFIX_SS + 7          /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov     %cr0, %esi
++      btr     $X86_CR0_WP_BIT, %esi
++      mov     %esi, %cr0
++#endif
++
++      mov     %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++      mov     %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      bts     $X86_CR0_WP_BIT, %esi
++      mov     %esi, %cr0
++#endif
++
+       pushl   $__ESPFIX_SS
+       pushl   %eax                            /* new kernel esp */
+       /*
+@@ -539,8 +750,15 @@ ENDPROC(entry_INT80_32)
+  */
+ #ifdef CONFIG_X86_ESPFIX32
+       /* fixup the stack */
+-      mov     GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+-      mov     GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++      movl    PER_CPU_VAR(cpu_number), %ebx
++      shll    $PAGE_SHIFT_asm, %ebx
++      addl    $cpu_gdt_table, %ebx
++#else
++      movl    $cpu_gdt_table, %ebx
++#endif
++      mov     4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++      mov     7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+       shl     $16, %eax
+       addl    %esp, %eax                      /* the adjusted stack pointer */
+       pushl   $__KERNEL_DS
+@@ -576,7 +794,7 @@ ENTRY(irq_entries_start)
+       jmp     common_interrupt
+       .align  8
+     .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+ /*
+  * the CPU automatically disables interrupts when executing an IRQ vector,
+@@ -623,7 +841,7 @@ ENTRY(coprocessor_error)
+       pushl   $0
+       pushl   $do_coprocessor_error
+       jmp     error_code
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+ ENTRY(simd_coprocessor_error)
+       ASM_CLAC
+@@ -637,20 +855,20 @@ ENTRY(simd_coprocessor_error)
+       pushl   $do_simd_coprocessor_error
+ #endif
+       jmp     error_code
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+ ENTRY(device_not_available)
+       ASM_CLAC
+       pushl   $-1                             # mark this as an int
+       pushl   $do_device_not_available
+       jmp     error_code
+-END(device_not_available)
++ENDPROC(device_not_available)
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+       iret
+       _ASM_EXTABLE(native_iret, iret_exc)
+-END(native_iret)
++ENDPROC(native_iret)
+ #endif
+ ENTRY(overflow)
+@@ -658,59 +876,59 @@ ENTRY(overflow)
+       pushl   $0
+       pushl   $do_overflow
+       jmp     error_code
+-END(overflow)
++ENDPROC(overflow)
+ ENTRY(bounds)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_bounds
+       jmp     error_code
+-END(bounds)
++ENDPROC(bounds)
+ ENTRY(invalid_op)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_invalid_op
+       jmp     error_code
+-END(invalid_op)
++ENDPROC(invalid_op)
+ ENTRY(coprocessor_segment_overrun)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_coprocessor_segment_overrun
+       jmp     error_code
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+ ENTRY(invalid_TSS)
+       ASM_CLAC
+       pushl   $do_invalid_TSS
+       jmp     error_code
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+ ENTRY(segment_not_present)
+       ASM_CLAC
+       pushl   $do_segment_not_present
+       jmp     error_code
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+ ENTRY(stack_segment)
+       ASM_CLAC
+       pushl   $do_stack_segment
+       jmp     error_code
+-END(stack_segment)
++ENDPROC(stack_segment)
+ ENTRY(alignment_check)
+       ASM_CLAC
+       pushl   $do_alignment_check
+       jmp     error_code
+-END(alignment_check)
++ENDPROC(alignment_check)
+ ENTRY(divide_error)
+       ASM_CLAC
+       pushl   $0                              # no error code
+       pushl   $do_divide_error
+       jmp     error_code
+-END(divide_error)
++ENDPROC(divide_error)
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -718,7 +936,7 @@ ENTRY(machine_check)
+       pushl   $0
+       pushl   machine_check_vector
+       jmp     error_code
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+ ENTRY(spurious_interrupt_bug)
+@@ -726,7 +944,16 @@ ENTRY(spurious_interrupt_bug)
+       pushl   $0
+       pushl   $do_spurious_interrupt_bug
+       jmp     error_code
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ENTRY(refcount_error)
++      ASM_CLAC
++      pushl   $0
++      pushl   $do_refcount_error
++      jmp     error_code
++ENDPROC(refcount_error)
++#endif
+ #ifdef CONFIG_XEN
+ ENTRY(xen_hypervisor_callback)
+@@ -825,7 +1052,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+ ENTRY(mcount)
+       ret
+-END(mcount)
++ENDPROC(mcount)
+ ENTRY(ftrace_caller)
+       pushl   %eax
+@@ -855,7 +1082,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+       ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+ ENTRY(ftrace_regs_caller)
+       pushf   /* push flags before compare (in cs location) */
+@@ -953,7 +1180,7 @@ trace:
+       popl    %ecx
+       popl    %eax
+       jmp     ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+@@ -971,7 +1198,7 @@ ENTRY(ftrace_graph_caller)
+       popl    %ecx
+       popl    %eax
+       ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+ .globl return_to_handler
+ return_to_handler:
+@@ -990,7 +1217,7 @@ ENTRY(trace_page_fault)
+       ASM_CLAC
+       pushl   $trace_do_page_fault
+       jmp     error_code
+-END(trace_page_fault)
++ENDPROC(trace_page_fault)
+ #endif
+ ENTRY(page_fault)
+@@ -1019,16 +1246,19 @@ error_code:
+       movl    $-1, PT_ORIG_EAX(%esp)          # no syscall to restart
+       REG_TO_PTGS %ecx
+       SET_KERNEL_GS %ecx
+-      movl    $(__USER_DS), %ecx
++      movl    $(__KERNEL_DS), %ecx
+       movl    %ecx, %ds
+       movl    %ecx, %es
++
++      pax_enter_kernel
++
+       TRACE_IRQS_OFF
+       movl    %esp, %eax                      # pt_regs pointer
+       call    *%edi
+       jmp     ret_from_exception
+-END(page_fault)
++ENDPROC(page_fault)
+-ENTRY(debug)
++ENTRY(int1)
+       /*
+        * #DB can happen at the first instruction of
+        * entry_SYSENTER_32 or in Xen's SYSENTER prologue.  If this
+@@ -1045,7 +1275,13 @@ ENTRY(debug)
+       movl    %esp, %eax                      # pt_regs pointer
+       /* Are we currently on the SYSENTER stack? */
+-      PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
++#ifdef CONFIG_SMP
++      imul    $TSS_size, PER_CPU_VAR(cpu_number), %ecx
++      lea     cpu_tss(%ecx), %ecx
++#else
++      movl    $cpu_tss, %ecx
++#endif
++      movl    CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack(%ecx), %ecx
+       subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
+       cmpl    $SIZEOF_SYSENTER_stack, %ecx
+       jb      .Ldebug_from_sysenter_stack
+@@ -1062,7 +1298,7 @@ ENTRY(debug)
+       call    do_debug
+       movl    %ebp, %esp
+       jmp     ret_from_exception
+-END(debug)
++ENDPROC(int1)
+ /*
+  * NMI is doubly nasty.  It can happen on the first instruction of
+@@ -1087,13 +1323,22 @@ ENTRY(nmi)
+       movl    %esp, %eax                      # pt_regs pointer
+       /* Are we currently on the SYSENTER stack? */
+-      PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
++#ifdef CONFIG_SMP
++      imul    $TSS_size, PER_CPU_VAR(cpu_number), %ecx
++      lea     cpu_tss(%ecx), %ecx
++#else
++      movl    $cpu_tss, %ecx
++#endif
++      movl    CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack(%ecx), %ecx
+       subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
+       cmpl    $SIZEOF_SYSENTER_stack, %ecx
+       jb      .Lnmi_from_sysenter_stack
+       /* Not on SYSENTER stack. */
+       call    do_nmi
++
++      pax_exit_kernel
++
+       jmp     restore_all_notrace
+ .Lnmi_from_sysenter_stack:
+@@ -1105,6 +1350,9 @@ ENTRY(nmi)
+       movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
+       call    do_nmi
+       movl    %ebp, %esp
++
++      pax_exit_kernel
++
+       jmp     restore_all_notrace
+ #ifdef CONFIG_X86_ESPFIX32
+@@ -1124,11 +1372,14 @@ nmi_espfix_stack:
+       FIXUP_ESPFIX_STACK                      # %eax == %esp
+       xorl    %edx, %edx                      # zero error code
+       call    do_nmi
++
++      pax_exit_kernel
++
+       RESTORE_REGS
+       lss     12+4(%esp), %esp                # back to espfix stack
+       jmp     irq_return
+ #endif
+-END(nmi)
++ENDPROC(nmi)
+ ENTRY(int3)
+       ASM_CLAC
+@@ -1139,19 +1390,19 @@ ENTRY(int3)
+       movl    %esp, %eax                      # pt_regs pointer
+       call    do_int3
+       jmp     ret_from_exception
+-END(int3)
++ENDPROC(int3)
+ ENTRY(general_protection)
+       pushl   $do_general_protection
+       jmp     error_code
+-END(general_protection)
++ENDPROC(general_protection)
+ #ifdef CONFIG_KVM_GUEST
+ ENTRY(async_page_fault)
+       ASM_CLAC
+       pushl   $do_async_page_fault
+       jmp     error_code
+-END(async_page_fault)
++ENDPROC(async_page_fault)
+ #endif
+ ENTRY(rewind_stack_do_exit)
+@@ -1161,6 +1412,6 @@ ENTRY(rewind_stack_do_exit)
+       movl    PER_CPU_VAR(cpu_current_top_of_stack), %esi
+       leal    -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
+-      call    do_exit
++      call    do_group_exit
+ 1:    jmp 1b
+-END(rewind_stack_do_exit)
++ENDPROC(rewind_stack_do_exit)
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 02fff3e..c6685ec 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -36,6 +36,8 @@
+ #include <asm/smap.h>
+ #include <asm/pgtable_types.h>
+ #include <linux/err.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+ #include <linux/elf-em.h>
+@@ -53,6 +55,395 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
++      .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++      .byte 0x48; ljmp *1234f(%rip)
++      .pushsection .rodata
++      .align 16
++      1234: .quad \off; .word \sel
++      .popsection
++#else
++      pushq   $\sel
++      pushq   $\off
++      lretq
++#endif
++      .endm
++
++      .macro pax_enter_kernel
++      pax_set_fptr_mask
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      call    pax_enter_kernel
++#endif
++      .endm
++
++      .macro pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      call    pax_exit_kernel
++#endif
++      .endm
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ENTRY(pax_enter_kernel)
++      pushq   %rdi
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      bts     $X86_CR0_WP_BIT,%rdi
++      jnc     3f
++      mov     %cs,%edi
++      cmp     $__KERNEL_CS,%edi
++      jnz     2f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++      GET_CR3_INTO_RDI
++      cmp     $0,%dil
++      jnz     112f
++      mov     $__KERNEL_DS,%edi
++      mov     %edi,%ss
++      jmp     111f
++112:  cmp     $1,%dil
++      jz      113f
++      ud2
++113:  sub     $4097,%rdi
++      bts     $63,%rdi
++      SET_RDI_INTO_CR3
++      mov     $__UDEREF_KERNEL_DS,%edi
++      mov     %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++      popq    %rdi
++      pax_force_retaddr
++      retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2:    ljmpq   __KERNEL_CS,1b
++3:    ljmpq   __KERNEXEC_KERNEL_CS,4f
++4:    SET_RDI_INTO_CR0
++      jmp     1b
++#endif
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++      pushq   %rdi
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov     %cs,%rdi
++      cmp     $__KERNEXEC_KERNEL_CS,%edi
++      jz      2f
++      GET_CR0_INTO_RDI
++      bts     $X86_CR0_WP_BIT,%rdi
++      jnc     4f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++      mov     %ss,%edi
++      cmp     $__UDEREF_KERNEL_DS,%edi
++      jnz     111f
++      GET_CR3_INTO_RDI
++      cmp     $0,%dil
++      jz      112f
++      ud2
++112:  add     $4097,%rdi
++      bts     $63,%rdi
++      SET_RDI_INTO_CR3
++      mov     $__KERNEL_DS,%edi
++      mov     %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++      popq    %rdi
++      pax_force_retaddr
++      retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2:    GET_CR0_INTO_RDI
++      btr     $X86_CR0_WP_BIT,%rdi
++      jnc     4f
++      ljmpq   __KERNEL_CS,3f
++3:    SET_RDI_INTO_CR0
++      jmp     1b
++4:    ud2
++      jmp     4b
++#endif
++ENDPROC(pax_exit_kernel)
++#endif
++
++      .macro pax_enter_kernel_user
++      pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call    pax_enter_kernel_user
++#endif
++      .endm
++
++      .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call    pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++      pushq   %rax
++      pushq   %r11
++      call    pax_randomize_kstack
++      popq    %r11
++      popq    %rax
++#endif
++      .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++      pushq   %rdi
++      pushq   %rbx
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++      ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++      GET_CR3_INTO_RDI
++      cmp     $1,%dil
++      jnz     4f
++      sub     $4097,%rdi
++      bts     $63,%rdi
++      SET_RDI_INTO_CR3
++      jmp     3f
++111:
++
++      GET_CR3_INTO_RDI
++      mov     %rdi,%rbx
++      add     $__START_KERNEL_map,%rbx
++      sub     phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++      pushq %rdi
++      i = 0
++      .rept USER_PGD_PTRS
++      mov     i*8(%rbx),%rsi
++      mov     $0,%sil
++      lea     i*8(%rbx),%rdi
++      call    PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++      i = i + 1
++      .endr
++      popq    %rdi
++#else
++      i = 0
++      .rept USER_PGD_PTRS
++      movb    $0,i*8(%rbx)
++      i = i + 1
++      .endr
++#endif
++
++      SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      bts     $X86_CR0_WP_BIT,%rdi
++      SET_RDI_INTO_CR0
++#endif
++
++3:
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++      popq    %rbx
++      popq    %rdi
++      pax_force_retaddr
++      retq
++4:    ud2
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++      pushq   %rdi
++      pushq   %rbx
++
++#ifdef CONFIG_PARAVIRT
++      PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++      GET_CR3_INTO_RDI
++      ALTERNATIVE "jmp 1f", "", X86_FEATURE_PCID
++      cmp     $0,%dil
++      jnz     3f
++      add     $4097,%rdi
++      bts     $63,%rdi
++      SET_RDI_INTO_CR3
++      jmp     2f
++1:
++
++      mov     %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      btr     $X86_CR0_WP_BIT,%rdi
++      jnc     3f
++      SET_RDI_INTO_CR0
++#endif
++
++      add     $__START_KERNEL_map,%rbx
++      sub     phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++      i = 0
++      .rept USER_PGD_PTRS
++      mov     i*8(%rbx),%rsi
++      mov     $0x67,%sil
++      lea     i*8(%rbx),%rdi
++      call    PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++      i = i + 1
++      .endr
++#else
++      i = 0
++      .rept USER_PGD_PTRS
++      movb    $0x67,i*8(%rbx)
++      i = i + 1
++      .endr
++#endif
++
++2:
++
++#ifdef CONFIG_PARAVIRT
++      PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++      popq    %rbx
++      popq    %rdi
++      pax_force_retaddr
++      retq
++3:    ud2
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++      .macro pax_enter_kernel_nmi
++      pax_set_fptr_mask
++
++#ifdef CONFIG_PAX_KERNEXEC
++      GET_CR0_INTO_RDI
++      bts     $X86_CR0_WP_BIT,%rdi
++      jc      110f
++      SET_RDI_INTO_CR0
++      or      $2,%ebx
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++      GET_CR3_INTO_RDI
++      cmp     $0,%dil
++      jz      111f
++      sub     $4097,%rdi
++      or      $4,%ebx
++      bts     $63,%rdi
++      SET_RDI_INTO_CR3
++      mov     $__UDEREF_KERNEL_DS,%edi
++      mov     %edi,%ss
++111:
++#endif
++      .endm
++
++      .macro pax_exit_kernel_nmi
++#ifdef CONFIG_PAX_KERNEXEC
++      btr     $1,%ebx
++      jnc     110f
++      GET_CR0_INTO_RDI
++      btr     $X86_CR0_WP_BIT,%rdi
++      SET_RDI_INTO_CR0
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++      btr     $2,%ebx
++      jnc     111f
++      GET_CR3_INTO_RDI
++      add     $4097,%rdi
++      bts     $63,%rdi
++      SET_RDI_INTO_CR3
++      mov     $__KERNEL_DS,%edi
++      mov     %edi,%ss
++111:
++#endif
++      .endm
++
++      .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++      call    pax_erase_kstack
++#endif
++      .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ENTRY(pax_erase_kstack)
++      pushq   %rdi
++      pushq   %rcx
++      pushq   %rax
++      pushq   %r11
++
++      GET_THREAD_INFO(%r11)
++      mov     TI_lowest_stack(%r11), %rdi
++      mov     $-0xBEEF, %rax
++      std
++
++1:    mov     %edi, %ecx
++      and     $THREAD_SIZE_asm - 1, %ecx
++      shr     $3, %ecx
++      repne   scasq
++      jecxz   2f
++
++      cmp     $2*8, %ecx
++      jc      2f
++
++      mov     $2*8, %ecx
++      repe    scasq
++      jecxz   2f
++      jne     1b
++
++2:    cld
++      or      $2*8, %rdi
++      mov     %esp, %ecx
++      sub     %edi, %ecx
++
++      cmp     $THREAD_SIZE_asm, %rcx
++      jb      3f
++      ud2
++3:
++
++      shr     $3, %ecx
++      rep     stosq
++
++      mov     TI_task_thread_sp0(%r11), %rdi
++      sub     $256, %rdi
++      mov     %rdi, TI_lowest_stack(%r11)
++
++      popq    %r11
++      popq    %rax
++      popq    %rcx
++      popq    %rdi
++      pax_force_retaddr
++      ret
++ENDPROC(pax_erase_kstack)
++#endif
++
+ .macro TRACE_IRQS_IRETQ
+ #ifdef CONFIG_TRACE_IRQFLAGS
+       bt      $9, EFLAGS(%rsp)                /* interrupts off? */
+@@ -88,7 +479,7 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+ .macro TRACE_IRQS_IRETQ_DEBUG
+-      bt      $9, EFLAGS(%rsp)                /* interrupts off? */
++      bt      $X86_EFLAGS_IF_BIT, EFLAGS(%rsp)        /* interrupts off? */
+       jnc     1f
+       TRACE_IRQS_ON_DEBUG
+ 1:
+@@ -175,11 +566,22 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+       pushq   %r11                            /* pt_regs->r11 */
+       sub     $(6*8), %rsp                    /* pt_regs->bp, bx, r12-15 not saved */
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      movq    %r12, R12(%rsp)
++#endif
++
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       /*
+        * If we need to do entry work or if we guess we'll need to do
+        * exit work, go straight to the slow path.
+        */
+-      testl   $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++      GET_THREAD_INFO(%rcx)
++      testl   $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TI_flags(%rcx)
+       jnz     entry_SYSCALL64_slow_path
+ entry_SYSCALL_64_fastpath:
+@@ -217,9 +619,13 @@ entry_SYSCALL_64_fastpath:
+        */
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+-      testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++      GET_THREAD_INFO(%rcx)
++      testl   $_TIF_ALLWORK_MASK, TI_flags(%rcx)
+       jnz     1f
++      pax_exit_kernel_user
++      pax_erase_kstack
++
+       LOCKDEP_SYS_EXIT
+       TRACE_IRQS_ON           /* user mode is traced as IRQs on */
+       movq    RIP(%rsp), %rcx
+@@ -248,6 +654,9 @@ entry_SYSCALL64_slow_path:
+       call    do_syscall_64           /* returns with IRQs disabled */
+ return_from_SYSCALL_64:
++      pax_exit_kernel_user
++      pax_erase_kstack
++
+       RESTORE_EXTRA_REGS
+       TRACE_IRQS_IRETQ                /* we're about to change IF */
+@@ -272,13 +681,12 @@ return_from_SYSCALL_64:
+       .error "virtual address width changed -- SYSRET checks need update"
+       .endif
+-      /* Change top 16 bits to be the sign-extension of 47th bit */
+-      shl     $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
+-      sar     $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
+-
+-      /* If this changed %rcx, it was not canonical */
+-      cmpq    %rcx, %r11
+-      jne     opportunistic_sysret_failed
++      /*
++       * If the top 17 bits are not 0 then RIP isn't a userland address,
++       * it may not even be canonical, fall back to iret
++       */
++      shr     $(__VIRTUAL_MASK_SHIFT), %r11
++      jnz     opportunistic_sysret_failed
+       cmpq    $__USER_CS, CS(%rsp)            /* CS must match SYSRET */
+       jne     opportunistic_sysret_failed
+@@ -326,7 +734,7 @@ syscall_return_via_sysret:
+ opportunistic_sysret_failed:
+       SWAPGS
+       jmp     restore_c_regs_and_iret
+-END(entry_SYSCALL_64)
++ENDPROC(entry_SYSCALL_64)
+ ENTRY(stub_ptregs_64)
+       /*
+@@ -353,13 +761,13 @@ ENTRY(stub_ptregs_64)
+ 1:
+       /* Called from C */
+       jmp     *%rax                           /* called from C */
+-END(stub_ptregs_64)
++ENDPROC(stub_ptregs_64)
+ .macro ptregs_stub func
+ ENTRY(ptregs_\func)
+       leaq    \func(%rip), %rax
+       jmp     stub_ptregs_64
+-END(ptregs_\func)
++ENDPROC(ptregs_\func)
+ .endm
+ /* Instantiate ptregs_stub for each ptregs-using syscall */
+@@ -401,10 +809,12 @@ ENTRY(ret_from_fork)
+ 1:
+       movq    %rsp, %rdi
+       call    syscall_return_slowpath /* returns with IRQs disabled */
++      pax_exit_kernel_user
++      pax_erase_kstack
+       TRACE_IRQS_ON                   /* user mode is traced as IRQS on */
+       SWAPGS
+       jmp     restore_regs_and_iret
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+ /*
+  * Build the entry stubs with some assembler magic.
+@@ -419,7 +829,7 @@ ENTRY(irq_entries_start)
+       jmp     common_interrupt
+       .align  8
+     .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+ /*
+  * Interrupt entry/exit.
+@@ -445,6 +855,12 @@ END(irq_entries_start)
+        */
+       SWAPGS
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pax_enter_kernel_user
++#else
++      pax_enter_kernel
++#endif
++
+       /*
+        * We need to tell lockdep that IRQs are off.  We can't do this until
+        * we fix gsbase, and we should do it before enter_from_user_mode
+@@ -457,7 +873,9 @@ END(irq_entries_start)
+       CALL_enter_from_user_mode
+-1:
++      jmp     2f
++1:    pax_enter_kernel
++2:
+       /*
+        * Save previous stack pointer, optionally switch to interrupt stack.
+        * irq_count is used to check if a CPU is already on an interrupt stack
+@@ -469,6 +887,7 @@ END(irq_entries_start)
+       incl    PER_CPU_VAR(irq_count)
+       cmovzq  PER_CPU_VAR(irq_stack_ptr), %rsp
+       pushq   %rdi
++
+       /* We entered an interrupt context - irqs are off: */
+       TRACE_IRQS_OFF
+@@ -500,6 +919,8 @@ ret_from_intr:
+ GLOBAL(retint_user)
+       mov     %rsp,%rdi
+       call    prepare_exit_to_usermode
++      pax_exit_kernel_user
++#     pax_erase_kstack
+       TRACE_IRQS_IRETQ
+       SWAPGS
+       jmp     restore_regs_and_iret
+@@ -517,6 +938,21 @@ retint_kernel:
+       jmp     0b
+ 1:
+ #endif
++
++      pax_exit_kernel
++
++#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC_PLUGIN)
++      /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
++       * namely calling EFI runtime services with a phys mapping. We're
++       * starting off with NOPs and patch in the real instrumentation
++       * (BTS/OR) before starting any userland process; even before starting
++       * up the APs.
++       */
++      ALTERNATIVE "", "pax_force_retaddr 16*8", X86_FEATURE_ALWAYS
++#else
++      pax_force_retaddr RIP
++#endif
++
+       /*
+        * The iretq could re-enable interrupts:
+        */
+@@ -560,15 +996,15 @@ native_irq_return_ldt:
+       SWAPGS
+       movq    PER_CPU_VAR(espfix_waddr), %rdi
+       movq    %rax, (0*8)(%rdi)               /* RAX */
+-      movq    (2*8)(%rsp), %rax               /* RIP */
++      movq    (2*8 + RIP-RIP)(%rsp), %rax     /* RIP */
+       movq    %rax, (1*8)(%rdi)
+-      movq    (3*8)(%rsp), %rax               /* CS */
++      movq    (2*8 + CS-RIP)(%rsp), %rax      /* CS */
+       movq    %rax, (2*8)(%rdi)
+-      movq    (4*8)(%rsp), %rax               /* RFLAGS */
++      movq    (2*8 + EFLAGS-RIP)(%rsp), %rax  /* RFLAGS */
+       movq    %rax, (3*8)(%rdi)
+-      movq    (6*8)(%rsp), %rax               /* SS */
++      movq    (2*8 + SS-RIP)(%rsp), %rax      /* SS */
+       movq    %rax, (5*8)(%rdi)
+-      movq    (5*8)(%rsp), %rax               /* RSP */
++      movq    (2*8 + RSP-RIP)(%rsp), %rax     /* RSP */
+       movq    %rax, (4*8)(%rdi)
+       andl    $0xffff0000, %eax
+       popq    %rdi
+@@ -578,7 +1014,7 @@ native_irq_return_ldt:
+       popq    %rax
+       jmp     native_irq_return_iret
+ #endif
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+ /*
+  * APIC interrupts.
+@@ -590,7 +1026,7 @@ ENTRY(\sym)
+ .Lcommon_\sym:
+       interrupt \do_sym
+       jmp     ret_from_intr
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+ #ifdef CONFIG_TRACING
+@@ -666,7 +1102,7 @@ apicinterrupt IRQ_WORK_VECTOR                     irq_work_interrupt              smp_irq_work_interrupt
+ /*
+  * Exception entry points.
+  */
+-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
++#define CPU_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
+ .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+ ENTRY(\sym)
+@@ -713,6 +1149,12 @@ ENTRY(\sym)
+       .endif
+       .if \shift_ist != -1
++#ifdef CONFIG_SMP
++      imul    $TSS_size, PER_CPU_VAR(cpu_number), %r13d
++      leaq    cpu_tss(%r13), %r13
++#else
++      leaq    cpu_tss(%rip), %r13
++#endif
+       subq    $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+       .endif
+@@ -756,7 +1198,7 @@ ENTRY(\sym)
+       jmp     error_exit                      /* %ebx: no swapgs flag */
+       .endif
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+ #ifdef CONFIG_TRACING
+@@ -784,6 +1226,9 @@ idtentry coprocessor_error                do_coprocessor_error            has_error_code=0
+ idtentry alignment_check              do_alignment_check              has_error_code=1
+ idtentry simd_coprocessor_error               do_simd_coprocessor_error       has_error_code=0
++#ifdef CONFIG_PAX_REFCOUNT
++idtentry refcount_error                       do_refcount_error               has_error_code=0
++#endif
+       /*
+        * Reload gs selector with exception handling
+@@ -798,8 +1243,9 @@ ENTRY(native_load_gs_index)
+ 2:    ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
+       SWAPGS
+       popfq
++      pax_force_retaddr
+       ret
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+       _ASM_EXTABLE(.Lgs_change, bad_gs)
+       .section .fixup, "ax"
+@@ -827,8 +1273,9 @@ ENTRY(do_softirq_own_stack)
+       call    __do_softirq
+       leaveq
+       decl    PER_CPU_VAR(irq_count)
++      pax_force_retaddr
+       ret
+-END(do_softirq_own_stack)
++ENDPROC(do_softirq_own_stack)
+ #ifdef CONFIG_XEN
+ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+@@ -864,7 +1311,7 @@ ENTRY(xen_do_hypervisor_callback)         /* do_hypervisor_callback(struct *pt_regs) */
+       call    xen_maybe_preempt_hcall
+ #endif
+       jmp     error_exit
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+ /*
+  * Hypervisor uses this for application faults while it executes.
+@@ -909,7 +1356,7 @@ ENTRY(xen_failsafe_callback)
+       SAVE_C_REGS
+       SAVE_EXTRA_REGS
+       jmp     error_exit
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+       xen_hvm_callback_vector xen_evtchn_do_upcall
+@@ -921,7 +1368,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+       hyperv_callback_vector hyperv_vector_handler
+ #endif /* CONFIG_HYPERV */
+-idtentry debug                        do_debug                has_error_code=0        paranoid=1 shift_ist=DEBUG_STACK
++idtentry int1                 do_debug                has_error_code=0        paranoid=1 shift_ist=DEBUG_STACK
+ idtentry int3                 do_int3                 has_error_code=0        paranoid=1 shift_ist=DEBUG_STACK
+ idtentry stack_segment                do_stack_segment        has_error_code=1
+@@ -958,8 +1405,34 @@ ENTRY(paranoid_entry)
+       js      1f                              /* negative -> in kernel */
+       SWAPGS
+       xorl    %ebx, %ebx
+-1:    ret
+-END(paranoid_entry)
++1:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      testb   $3, CS+8(%rsp)
++      jz      1f
++      pax_enter_kernel_user
++      jmp     2f
++#endif
++1:    pax_enter_kernel
++2:
++      pax_force_retaddr
++      ret
++ENDPROC(paranoid_entry)
++
++ENTRY(paranoid_entry_nmi)
++      cld
++      SAVE_C_REGS 8
++      SAVE_EXTRA_REGS 8
++      movl    $1, %ebx
++      movl    $MSR_GS_BASE, %ecx
++      rdmsr
++      testl   %edx, %edx
++      js      1f      /* negative -> in kernel */
++      SWAPGS
++      xorl    %ebx, %ebx
++1:    pax_enter_kernel_nmi
++      pax_force_retaddr
++      ret
++ENDPROC(paranoid_entry_nmi)
+ /*
+  * "Paranoid" exit path from exception stack.  This is invoked
+@@ -976,19 +1449,26 @@ END(paranoid_entry)
+ ENTRY(paranoid_exit)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF_DEBUG
+-      testl   %ebx, %ebx                      /* swapgs needed? */
++      testl   $1, %ebx                        /* swapgs needed? */
+       jnz     paranoid_exit_no_swapgs
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pax_exit_kernel_user
++#else
++      pax_exit_kernel
++#endif
+       TRACE_IRQS_IRETQ
+       SWAPGS_UNSAFE_STACK
+       jmp     paranoid_exit_restore
+ paranoid_exit_no_swapgs:
++      pax_exit_kernel
+       TRACE_IRQS_IRETQ_DEBUG
+ paranoid_exit_restore:
+       RESTORE_EXTRA_REGS
+       RESTORE_C_REGS
+       REMOVE_PT_GPREGS_FROM_STACK 8
++      pax_force_retaddr_bts
+       INTERRUPT_RETURN
+-END(paranoid_exit)
++ENDPROC(paranoid_exit)
+ /*
+  * Save all registers in pt_regs, and switch gs if needed.
+@@ -1008,6 +1488,12 @@ ENTRY(error_entry)
+        */
+       SWAPGS
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pax_enter_kernel_user
++#else
++      pax_enter_kernel
++#endif
++
+ .Lerror_entry_from_usermode_after_swapgs:
+       /*
+        * We need to tell lockdep that IRQs are off.  We can't do this until
+@@ -1016,10 +1502,12 @@ ENTRY(error_entry)
+        */
+       TRACE_IRQS_OFF
+       CALL_enter_from_user_mode
++      pax_force_retaddr
+       ret
+ .Lerror_entry_done:
+       TRACE_IRQS_OFF
++      pax_force_retaddr
+       ret
+       /*
+@@ -1037,7 +1525,7 @@ ENTRY(error_entry)
+       cmpq    %rax, RIP+8(%rsp)
+       je      .Lbstep_iret
+       cmpq    $.Lgs_change, RIP+8(%rsp)
+-      jne     .Lerror_entry_done
++      jne     1f
+       /*
+        * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
+@@ -1045,7 +1533,8 @@ ENTRY(error_entry)
+        * .Lgs_change's error handler with kernel gsbase.
+        */
+       SWAPGS
+-      jmp .Lerror_entry_done
++1:    pax_enter_kernel
++      jmp     .Lerror_entry_done
+ .Lbstep_iret:
+       /* Fix truncated RIP */
+@@ -1059,6 +1548,12 @@ ENTRY(error_entry)
+        */
+       SWAPGS
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pax_enter_kernel_user
++#else
++      pax_enter_kernel
++#endif
++
+       /*
+        * Pretend that the exception came from user mode: set up pt_regs
+        * as if we faulted immediately after IRET and clear EBX so that
+@@ -1069,11 +1564,11 @@ ENTRY(error_entry)
+       mov     %rax, %rsp
+       decl    %ebx
+       jmp     .Lerror_entry_from_usermode_after_swapgs
+-END(error_entry)
++ENDPROC(error_entry)
+ /*
+- * On entry, EBS is a "return to kernel mode" flag:
++ * On entry, EBX is a "return to kernel mode" flag:
+  *   1: already in kernel mode, don't need SWAPGS
+  *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
+  */
+@@ -1081,10 +1576,10 @@ ENTRY(error_exit)
+       movl    %ebx, %eax
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+-      testl   %eax, %eax
++      testl   $1, %eax
+       jnz     retint_kernel
+       jmp     retint_user
+-END(error_exit)
++ENDPROC(error_exit)
+ /* Runs on exception stack */
+ ENTRY(nmi)
+@@ -1138,6 +1633,8 @@ ENTRY(nmi)
+        * other IST entries.
+        */
++      ASM_CLAC
++
+       /* Use %rdx as our temp variable throughout */
+       pushq   %rdx
+@@ -1181,6 +1678,12 @@ ENTRY(nmi)
+       pushq   %r14            /* pt_regs->r14 */
+       pushq   %r15            /* pt_regs->r15 */
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      xorl    %ebx, %ebx
++#endif
++
++      pax_enter_kernel_nmi
++
+       /*
+        * At this point we no longer need to worry about stack damage
+        * due to nesting -- we're on the normal thread stack and we're
+@@ -1191,12 +1694,19 @@ ENTRY(nmi)
+       movq    $-1, %rsi
+       call    do_nmi
++      pax_exit_kernel_nmi
++
+       /*
+        * Return back to user mode.  We must *not* do the normal exit
+        * work, because we don't want to enable interrupts.  Fortunately,
+        * do_nmi doesn't modify pt_regs.
+        */
+       SWAPGS
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      movq    RBX(%rsp), %rbx
++#endif
++
+       jmp     restore_c_regs_and_iret
+ .Lnmi_from_kernel:
+@@ -1318,6 +1828,7 @@ nested_nmi_out:
+       popq    %rdx
+       /* We are returning to kernel mode, so this cannot result in a fault. */
++#     pax_force_retaddr_bts
+       INTERRUPT_RETURN
+ first_nmi:
+@@ -1346,7 +1857,7 @@ first_nmi:
+       pushq   %rsp            /* RSP (minus 8 because of the previous push) */
+       addq    $8, (%rsp)      /* Fix up RSP */
+       pushfq                  /* RFLAGS */
+-      pushq   $__KERNEL_CS    /* CS */
++      pushq   4*8(%rsp)       /* CS */
+       pushq   $1f             /* RIP */
+       INTERRUPT_RETURN        /* continues at repeat_nmi below */
+ 1:
+@@ -1391,20 +1902,22 @@ end_repeat_nmi:
+       ALLOC_PT_GPREGS_ON_STACK
+       /*
+-       * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
++       * Use paranoid_entry_nmi to handle SWAPGS, but no need to use paranoid_exit
+        * as we should not be calling schedule in NMI context.
+        * Even with normal interrupts enabled. An NMI should not be
+        * setting NEED_RESCHED or anything that normal interrupts and
+        * exceptions might do.
+        */
+-      call    paranoid_entry
++      call    paranoid_entry_nmi
+       /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+       movq    %rsp, %rdi
+       movq    $-1, %rsi
+       call    do_nmi
+-      testl   %ebx, %ebx                      /* swapgs needed? */
++      pax_exit_kernel_nmi
++
++      testl   $1, %ebx                        /* swapgs needed? */
+       jnz     nmi_restore
+ nmi_swapgs:
+       SWAPGS_UNSAFE_STACK
+@@ -1415,6 +1928,8 @@ nmi_restore:
+       /* Point RSP at the "iret" frame. */
+       REMOVE_PT_GPREGS_FROM_STACK 6*8
++      pax_force_retaddr_bts
++
+       /*
+        * Clear "NMI executing".  Set DF first so that we can easily
+        * distinguish the remaining code between here and IRET from
+@@ -1432,12 +1947,12 @@ nmi_restore:
+        * mode, so this cannot result in a fault.
+        */
+       INTERRUPT_RETURN
+-END(nmi)
++ENDPROC(nmi)
+ ENTRY(ignore_sysret)
+       mov     $-ENOSYS, %eax
+       sysret
+-END(ignore_sysret)
++ENDPROC(ignore_sysret)
+ ENTRY(rewind_stack_do_exit)
+       /* Prevent any naive code from trying to unwind to our caller. */
+@@ -1446,6 +1961,6 @@ ENTRY(rewind_stack_do_exit)
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rax
+       leaq    -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
+-      call    do_exit
++      call    do_group_exit
+ 1:    jmp 1b
+-END(rewind_stack_do_exit)
++ENDPROC(rewind_stack_do_exit)
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index e1721da..83f2c49 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -13,11 +13,39 @@
+ #include <asm/irqflags.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
+ #include <linux/err.h>
++#include <asm/alternative-asm.h>
+       .section .entry.text, "ax"
++      .macro pax_enter_kernel_user
++      pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call    pax_enter_kernel_user
++#endif
++      .endm
++
++      .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      call    pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++      pushq   %rax
++      pushq   %r11
++      call    pax_randomize_kstack
++      popq    %r11
++      popq    %rax
++#endif
++      .endm
++
++      .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++      call    pax_erase_kstack
++#endif
++      .endm
++
+ /*
+  * 32-bit SYSENTER entry.
+  *
+@@ -74,23 +102,34 @@ ENTRY(entry_SYSENTER_compat)
+       pushq   $__USER32_CS            /* pt_regs->cs */
+       pushq   $0                      /* pt_regs->ip = 0 (placeholder) */
+       pushq   %rax                    /* pt_regs->orig_ax */
++      xorl    %eax,%eax
+       pushq   %rdi                    /* pt_regs->di */
+       pushq   %rsi                    /* pt_regs->si */
+       pushq   %rdx                    /* pt_regs->dx */
+       pushq   %rcx                    /* pt_regs->cx */
+       pushq   $-ENOSYS                /* pt_regs->ax */
+-      pushq   $0                      /* pt_regs->r8  = 0 */
+-      pushq   $0                      /* pt_regs->r9  = 0 */
+-      pushq   $0                      /* pt_regs->r10 = 0 */
+-      pushq   $0                      /* pt_regs->r11 = 0 */
++      pushq   %rax                    /* pt_regs->r8  = 0 */
++      pushq   %rax                    /* pt_regs->r9  = 0 */
++      pushq   %rax                    /* pt_regs->r10 = 0 */
++      pushq   %rax                    /* pt_regs->r11 = 0 */
+       pushq   %rbx                    /* pt_regs->rbx */
+       pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
+-      pushq   $0                      /* pt_regs->r12 = 0 */
+-      pushq   $0                      /* pt_regs->r13 = 0 */
+-      pushq   $0                      /* pt_regs->r14 = 0 */
+-      pushq   $0                      /* pt_regs->r15 = 0 */
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      pushq   %r12                    /* pt_regs->r12 */
++#else
++      pushq   %rax                    /* pt_regs->r12 = 0 */
++#endif
++      pushq   %rax                    /* pt_regs->r13 = 0 */
++      pushq   %rax                    /* pt_regs->r14 = 0 */
++      pushq   %rax                    /* pt_regs->r15 = 0 */
+       cld
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       /*
+        * SYSENTER doesn't filter flags, so we need to clear NT and AC
+        * ourselves.  To save a few cycles, we can check whether
+@@ -204,16 +243,27 @@ ENTRY(entry_SYSCALL_compat)
+       pushq   %rdx                    /* pt_regs->dx */
+       pushq   %rbp                    /* pt_regs->cx (stashed in bp) */
+       pushq   $-ENOSYS                /* pt_regs->ax */
+-      pushq   $0                      /* pt_regs->r8  = 0 */
+-      pushq   $0                      /* pt_regs->r9  = 0 */
+-      pushq   $0                      /* pt_regs->r10 = 0 */
+-      pushq   $0                      /* pt_regs->r11 = 0 */
++      xorl    %eax,%eax
++      pushq   %rax                    /* pt_regs->r8  = 0 */
++      pushq   %rax                    /* pt_regs->r9  = 0 */
++      pushq   %rax                    /* pt_regs->r10 = 0 */
++      pushq   %rax                    /* pt_regs->r11 = 0 */
+       pushq   %rbx                    /* pt_regs->rbx */
+       pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
+-      pushq   $0                      /* pt_regs->r12 = 0 */
+-      pushq   $0                      /* pt_regs->r13 = 0 */
+-      pushq   $0                      /* pt_regs->r14 = 0 */
+-      pushq   $0                      /* pt_regs->r15 = 0 */
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      pushq   %r12                    /* pt_regs->r12 */
++#else
++      pushq   %rax                    /* pt_regs->r12 = 0 */
++#endif
++      pushq   %rax                    /* pt_regs->r13 = 0 */
++      pushq   %rax                    /* pt_regs->r14 = 0 */
++      pushq   %rax                    /* pt_regs->r15 = 0 */
++
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
+       /*
+        * User mode is traced as though IRQs are on, and SYSENTER
+@@ -229,11 +279,18 @@ ENTRY(entry_SYSCALL_compat)
+       /* Opportunistic SYSRET */
+ sysret32_from_system_call:
++      pax_exit_kernel_user
++      pax_erase_kstack
+       TRACE_IRQS_ON                   /* User mode traces as IRQs on. */
+       movq    RBX(%rsp), %rbx         /* pt_regs->rbx */
+       movq    RBP(%rsp), %rbp         /* pt_regs->rbp */
+       movq    EFLAGS(%rsp), %r11      /* pt_regs->flags (in r11) */
+       movq    RIP(%rsp), %rcx         /* pt_regs->ip (in rcx) */
++
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      movq    R12(%rsp), %r12
++#endif
++
+       addq    $RAX, %rsp              /* Skip r8-r15 */
+       popq    %rax                    /* pt_regs->rax */
+       popq    %rdx                    /* Skip pt_regs->cx */
+@@ -262,7 +319,7 @@ sysret32_from_system_call:
+       movq    RSP-ORIG_RAX(%rsp), %rsp
+       swapgs
+       sysretl
+-END(entry_SYSCALL_compat)
++ENDPROC(entry_SYSCALL_compat)
+ /*
+  * 32-bit legacy system call entry.
+@@ -314,10 +371,11 @@ ENTRY(entry_INT80_compat)
+       pushq   %rdx                    /* pt_regs->dx */
+       pushq   %rcx                    /* pt_regs->cx */
+       pushq   $-ENOSYS                /* pt_regs->ax */
+-      pushq   $0                      /* pt_regs->r8  = 0 */
+-      pushq   $0                      /* pt_regs->r9  = 0 */
+-      pushq   $0                      /* pt_regs->r10 = 0 */
+-      pushq   $0                      /* pt_regs->r11 = 0 */
++      xorl    %eax,%eax
++      pushq   %rax                    /* pt_regs->r8  = 0 */
++      pushq   %rax                    /* pt_regs->r9  = 0 */
++      pushq   %rax                    /* pt_regs->r10 = 0 */
++      pushq   %rax                    /* pt_regs->r11 = 0 */
+       pushq   %rbx                    /* pt_regs->rbx */
+       pushq   %rbp                    /* pt_regs->rbp */
+       pushq   %r12                    /* pt_regs->r12 */
+@@ -326,6 +384,12 @@ ENTRY(entry_INT80_compat)
+       pushq   %r15                    /* pt_regs->r15 */
+       cld
++      pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++      pax_erase_kstack
++#endif
++
+       /*
+        * User mode is traced as though IRQs are on, and the interrupt
+        * gate turned them off.
+@@ -337,10 +401,12 @@ ENTRY(entry_INT80_compat)
+ .Lsyscall_32_done:
+       /* Go back to user mode. */
++      pax_exit_kernel_user
++      pax_erase_kstack
+       TRACE_IRQS_ON
+       SWAPGS
+       jmp     restore_regs_and_iret
+-END(entry_INT80_compat)
++ENDPROC(entry_INT80_compat)
+       ALIGN
+ GLOBAL(stub32_clone)
+diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
+index 627ecbc..6490d11 100644
+--- a/arch/x86/entry/thunk_64.S
++++ b/arch/x86/entry/thunk_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include "calling.h"
+ #include <asm/asm.h>
++#include <asm/alternative-asm.h>
+       /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+       .macro THUNK name, func, put_ret_addr_in_rdi=0
+@@ -65,6 +66,7 @@
+       popq %rsi
+       popq %rdi
+       popq %rbp
++      pax_force_retaddr
+       ret
+       _ASM_NOKPROBE(.L_restore)
+ #endif
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index d540966..443f0d7 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -170,7 +170,7 @@ quiet_cmd_vdso = VDSO    $@
+                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
+       $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+ GCOV_PROFILE := n
+diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
+index 94d54d0..390dce1 100644
+--- a/arch/x86/entry/vdso/vclock_gettime.c
++++ b/arch/x86/entry/vdso/vclock_gettime.c
+@@ -300,5 +300,5 @@ notrace time_t __vdso_time(time_t *t)
+               *t = result;
+       return result;
+ }
+-int time(time_t *t)
++time_t time(time_t *t)
+       __attribute__((weak, alias("__vdso_time")));
+diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
+index 3dab75f..2c439d0 100644
+--- a/arch/x86/entry/vdso/vdso2c.h
++++ b/arch/x86/entry/vdso/vdso2c.h
+@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+       unsigned long load_size = -1;  /* Work around bogus warning */
+       unsigned long mapping_size;
+       ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+-      int i;
++      unsigned int i;
+       unsigned long j;
+       ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+               *alt_sec = NULL;
+@@ -89,7 +89,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+       for (i = 0;
+            i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
+            i++) {
+-              int k;
++              unsigned int k;
+               ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
+                       GET_LE(&symtab_hdr->sh_entsize) * i;
+               const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
+diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
+index f840766..222abb1 100644
+--- a/arch/x86/entry/vdso/vma.c
++++ b/arch/x86/entry/vdso/vma.c
+@@ -21,10 +21,7 @@
+ #include <asm/page.h>
+ #include <asm/desc.h>
+ #include <asm/cpufeature.h>
+-
+-#if defined(CONFIG_X86_64)
+-unsigned int __read_mostly vdso64_enabled = 1;
+-#endif
++#include <asm/mman.h>
+ void __init init_vdso_image(const struct vdso_image *image)
+ {
+@@ -90,7 +87,7 @@ static int vdso_fault(const struct vm_special_mapping *sm,
+ {
+       const struct vdso_image *image = vma->vm_mm->context.vdso_image;
+-      if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
++      if (!image || vmf->pgoff >= (image->size >> PAGE_SHIFT))
+               return VM_FAULT_SIGBUS;
+       vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
+@@ -128,7 +125,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
+               return -EFAULT;
+       vdso_fix_landing(image, new_vma);
+-      current->mm->context.vdso = (void __user *)new_vma->vm_start;
++      current->mm->context.vdso = new_vma->vm_start;
+       return 0;
+ }
+@@ -193,6 +190,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+               .fault = vvar_fault,
+       };
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              calculate_addr = false;
++#endif
++
+       if (calculate_addr) {
+               addr = vdso_addr(current->mm->start_stack,
+                                image->size - image->sym_vvar_start);
+@@ -204,15 +206,15 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+               return -EINTR;
+       addr = get_unmapped_area(NULL, addr,
+-                               image->size - image->sym_vvar_start, 0, 0);
++                               image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
+       if (IS_ERR_VALUE(addr)) {
+               ret = addr;
+               goto up_fail;
+       }
+       text_start = addr - image->sym_vvar_start;
+-      current->mm->context.vdso = (void __user *)text_start;
+-      current->mm->context.vdso_image = image;
++      mm->context.vdso = text_start;
++      mm->context.vdso_image = image;
+       /*
+        * MAYWRITE to allow gdb to COW and set breakpoints
+@@ -236,14 +238,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+                                      VM_PFNMAP,
+                                      &vvar_mapping);
+-      if (IS_ERR(vma)) {
++      if (IS_ERR(vma))
+               ret = PTR_ERR(vma);
+-              goto up_fail;
+-      }
+ up_fail:
+       if (ret)
+-              current->mm->context.vdso = NULL;
++              mm->context.vdso = 0;
+       up_write(&mm->mmap_sem);
+       return ret;
+@@ -262,9 +262,6 @@ static int load_vdso32(void)
+ #ifdef CONFIG_X86_64
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+-      if (!vdso64_enabled)
+-              return 0;
+-
+       return map_vdso(&vdso_image_64, true);
+ }
+@@ -273,12 +270,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+                                      int uses_interp)
+ {
+ #ifdef CONFIG_X86_X32_ABI
+-      if (test_thread_flag(TIF_X32)) {
+-              if (!vdso64_enabled)
+-                      return 0;
+-
++      if (test_thread_flag(TIF_X32))
+               return map_vdso(&vdso_image_x32, true);
+-      }
+ #endif
+ #ifdef CONFIG_IA32_EMULATION
+       return load_vdso32();
+@@ -295,15 +288,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ #endif
+ #ifdef CONFIG_X86_64
+-static __init int vdso_setup(char *s)
+-{
+-      vdso64_enabled = simple_strtoul(s, NULL, 0);
+-      return 0;
+-}
+-__setup("vdso=", vdso_setup);
+-#endif
+-
+-#ifdef CONFIG_X86_64
+ static void vgetcpu_cpu_init(void *arg)
+ {
+       int cpu = smp_processor_id();
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 636c4b3..666991b 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -38,10 +38,8 @@
+ #define CREATE_TRACE_POINTS
+ #include "vsyscall_trace.h"
+-static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
+-#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
+-      NATIVE;
+-#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
++static enum { EMULATE, NONE } vsyscall_mode =
++#if defined(CONFIG_LEGACY_VSYSCALL_NONE)
+       NONE;
+ #else
+       EMULATE;
+@@ -52,8 +50,6 @@ static int __init vsyscall_setup(char *str)
+       if (str) {
+               if (!strcmp("emulate", str))
+                       vsyscall_mode = EMULATE;
+-              else if (!strcmp("native", str))
+-                      vsyscall_mode = NATIVE;
+               else if (!strcmp("none", str))
+                       vsyscall_mode = NONE;
+               else
+@@ -271,8 +267,7 @@ do_ret:
+       return true;
+ sigsegv:
+-      force_sig(SIGSEGV, current);
+-      return true;
++      do_group_exit(SIGKILL);
+ }
+ /*
+@@ -290,8 +285,8 @@ static const struct vm_operations_struct gate_vma_ops = {
+ static struct vm_area_struct gate_vma = {
+       .vm_start       = VSYSCALL_ADDR,
+       .vm_end         = VSYSCALL_ADDR + PAGE_SIZE,
+-      .vm_page_prot   = PAGE_READONLY_EXEC,
+-      .vm_flags       = VM_READ | VM_EXEC,
++      .vm_page_prot   = PAGE_READONLY,
++      .vm_flags       = VM_READ,
+       .vm_ops         = &gate_vma_ops,
+ };
+@@ -332,10 +327,7 @@ void __init map_vsyscall(void)
+       unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
+       if (vsyscall_mode != NONE)
+-              __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
+-                           vsyscall_mode == NATIVE
+-                           ? PAGE_KERNEL_VSYSCALL
+-                           : PAGE_KERNEL_VVAR);
++              __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
+       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
+                    (unsigned long)VSYSCALL_ADDR);
+diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S
+index c9596a9..e1f6d5d 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S
++++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S
+@@ -7,12 +7,13 @@
+  */
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/irq_vectors.h>
+ #include <asm/page_types.h>
+ #include <asm/unistd_64.h>
+-__PAGE_ALIGNED_DATA
++      __READ_ONLY
+       .globl __vsyscall_page
+       .balign PAGE_SIZE, 0xcc
+       .type __vsyscall_page, @object
+diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
+index b28200d..e93e14d 100644
+--- a/arch/x86/events/amd/iommu.c
++++ b/arch/x86/events/amd/iommu.c
+@@ -80,12 +80,12 @@ static struct attribute_group amd_iommu_format_group = {
+  * sysfs events attributes
+  *---------------------------------------------*/
+ struct amd_iommu_event_desc {
+-      struct kobj_attribute attr;
++      struct device_attribute attr;
+       const char *event;
+ };
+-static ssize_t _iommu_event_show(struct kobject *kobj,
+-                              struct kobj_attribute *attr, char *buf)
++static ssize_t _iommu_event_show(struct device *dev,
++                              struct device_attribute *attr, char *buf)
+ {
+       struct amd_iommu_event_desc *event =
+               container_of(attr, struct amd_iommu_event_desc, attr);
+@@ -407,7 +407,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
+ static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
+ {
+       struct attribute **attrs;
+-      struct attribute_group *attr_group;
++      attribute_group_no_const *attr_group;
+       int i = 0, j;
+       while (amd_iommu_v2_event_descs[i].attr.attr.name)
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index d0efb5c..10f0a95 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1545,7 +1545,7 @@ static void __init pmu_check_apic(void)
+ }
+-static struct attribute_group x86_pmu_format_group = {
++static attribute_group_no_const x86_pmu_format_group = {
+       .name = "format",
+       .attrs = NULL,
+ };
+@@ -1676,7 +1676,7 @@ static struct attribute *events_attr[] = {
+       NULL,
+ };
+-static struct attribute_group x86_pmu_events_group = {
++static attribute_group_no_const x86_pmu_events_group = {
+       .name = "events",
+       .attrs = events_attr,
+ };
+@@ -2313,7 +2313,7 @@ static unsigned long get_segment_base(unsigned int segment)
+               if (idx > GDT_ENTRIES)
+                       return 0;
+-              desc = raw_cpu_ptr(gdt_page.gdt) + idx;
++              desc = get_cpu_gdt_table(smp_processor_id()) + idx;
+       }
+       return get_desc_base(desc);
+@@ -2419,7 +2419,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+                       break;
+               perf_callchain_store(entry, frame.return_address);
+-              fp = (void __user *)frame.next_frame;
++              fp = (void __force_user *)frame.next_frame;
+       }
+       pagefault_enable();
+ }
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 4c9a79b..7c0d6ca 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2408,6 +2408,8 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ }
+ static void
++intel_start_scheduling(struct cpu_hw_events *cpuc) __acquires(&cpuc->excl_cntrs->lock);
++static void
+ intel_start_scheduling(struct cpu_hw_events *cpuc)
+ {
+       struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+@@ -2417,14 +2419,18 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
+       /*
+        * nothing needed if in group validation mode
+        */
+-      if (cpuc->is_fake || !is_ht_workaround_enabled())
++      if (cpuc->is_fake || !is_ht_workaround_enabled()) {
++              __acquire(&excl_cntrs->lock);
+               return;
++      }
+       /*
+        * no exclusion needed
+        */
+-      if (WARN_ON_ONCE(!excl_cntrs))
++      if (WARN_ON_ONCE(!excl_cntrs)) {
++              __acquire(&excl_cntrs->lock);
+               return;
++      }
+       xl = &excl_cntrs->states[tid];
+@@ -2464,6 +2470,8 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
+ }
+ static void
++intel_stop_scheduling(struct cpu_hw_events *cpuc) __releases(&cpuc->excl_cntrs->lock);
++static void
+ intel_stop_scheduling(struct cpu_hw_events *cpuc)
+ {
+       struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+@@ -2473,13 +2481,18 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
+       /*
+        * nothing needed if in group validation mode
+        */
+-      if (cpuc->is_fake || !is_ht_workaround_enabled())
++      if (cpuc->is_fake || !is_ht_workaround_enabled()) {
++              __release(&excl_cntrs->lock);
+               return;
++      }
++
+       /*
+        * no exclusion needed
+        */
+-      if (WARN_ON_ONCE(!excl_cntrs))
++      if (WARN_ON_ONCE(!excl_cntrs)) {
++              __release(&excl_cntrs->lock);
+               return;
++      }
+       xl = &excl_cntrs->states[tid];
+@@ -2662,19 +2675,22 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
+        * unused now.
+        */
+       if (hwc->idx >= 0) {
++              bool sched_started;
++
+               xl = &excl_cntrs->states[tid];
++              sched_started = xl->sched_started;
+               /*
+                * put_constraint may be called from x86_schedule_events()
+                * which already has the lock held so here make locking
+                * conditional.
+                */
+-              if (!xl->sched_started)
++              if (!sched_started)
+                       raw_spin_lock(&excl_cntrs->lock);
+               xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
+-              if (!xl->sched_started)
++              if (!sched_started)
+                       raw_spin_unlock(&excl_cntrs->lock);
+       }
+ }
+@@ -3608,10 +3624,10 @@ __init int intel_pmu_init(void)
+               x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+       if (boot_cpu_has(X86_FEATURE_PDCM)) {
+-              u64 capabilities;
++              u64 capabilities = x86_pmu.intel_cap.capabilities;
+-              rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+-              x86_pmu.intel_cap.capabilities = capabilities;
++              if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
++                      x86_pmu.intel_cap.capabilities = capabilities;
+       }
+       intel_ds_init();
+diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
+index 8f82b02..b10c4b0 100644
+--- a/arch/x86/events/intel/cqm.c
++++ b/arch/x86/events/intel/cqm.c
+@@ -1488,7 +1488,7 @@ static struct attribute *intel_cmt_mbm_events_attr[] = {
+       NULL,
+ };
+-static struct attribute_group intel_cqm_events_group = {
++static attribute_group_no_const intel_cqm_events_group __read_only = {
+       .name = "events",
+       .attrs = NULL,
+ };
+@@ -1732,7 +1732,9 @@ static int __init intel_cqm_init(void)
+               goto out;
+       }
+-      event_attr_intel_cqm_llc_scale.event_str = str;
++      pax_open_kernel();
++      const_cast(event_attr_intel_cqm_llc_scale.event_str) = str;
++      pax_close_kernel();
+       ret = intel_cqm_setup_rmid_cache();
+       if (ret)
+@@ -1743,12 +1745,14 @@ static int __init intel_cqm_init(void)
+       if (ret && !cqm_enabled)
+               goto out;
++      pax_open_kernel();
+       if (cqm_enabled && mbm_enabled)
+-              intel_cqm_events_group.attrs = intel_cmt_mbm_events_attr;
++              const_cast(intel_cqm_events_group.attrs) = intel_cmt_mbm_events_attr;
+       else if (!cqm_enabled && mbm_enabled)
+-              intel_cqm_events_group.attrs = intel_mbm_events_attr;
++              const_cast(intel_cqm_events_group.attrs) = intel_mbm_events_attr;
+       else if (cqm_enabled && !mbm_enabled)
+-              intel_cqm_events_group.attrs = intel_cqm_events_attr;
++              const_cast(intel_cqm_events_group.attrs) = intel_cqm_events_attr;
++      pax_close_kernel();
+       ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
+       if (ret) {
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 3ca87b5..207a386 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -95,14 +95,14 @@
+ MODULE_LICENSE("GPL");
+ #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)               \
+-static ssize_t __cstate_##_var##_show(struct kobject *kobj,   \
+-                              struct kobj_attribute *attr,    \
++static ssize_t __cstate_##_var##_show(struct device *dev,     \
++                              struct device_attribute *attr,  \
+                               char *page)                     \
+ {                                                             \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);             \
+       return sprintf(page, _format "\n");                     \
+ }                                                             \
+-static struct kobj_attribute format_attr_##_var =             \
++static struct device_attribute format_attr_##_var =           \
+       __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
+ static ssize_t cstate_get_attr_cpumask(struct device *dev,
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 9b983a4..b31c136 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -601,7 +601,7 @@ unlock:
+ static inline void intel_pmu_drain_pebs_buffer(void)
+ {
+-      struct pt_regs regs;
++      struct pt_regs regs = {};
+       x86_pmu.drain_pebs(&regs);
+ }
+@@ -909,7 +909,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       unsigned long from = cpuc->lbr_entries[0].from;
+       unsigned long old_to, to = cpuc->lbr_entries[0].to;
+-      unsigned long ip = regs->ip;
++      unsigned long ip = ktva_ktla(regs->ip);
+       int is_64bit = 0;
+       void *kaddr;
+       int size;
+@@ -961,6 +961,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+       } else {
+               kaddr = (void *)to;
+       }
++      kaddr = (void *)ktva_ktla((unsigned long)kaddr);
+       do {
+               struct insn insn;
+@@ -1109,7 +1110,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
+       }
+       if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
+-              regs->ip = pebs->real_ip;
++              set_linear_ip(regs, pebs->real_ip);
+               regs->flags |= PERF_EFLAGS_EXACT;
+       } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
+               regs->flags |= PERF_EFLAGS_EXACT;
+diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
+index 707d358..9eb1c4f 100644
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -811,7 +811,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
+                * Ensure we don't blindy read any address by validating it is
+                * a known text address.
+                */
+-              if (kernel_text_address(from)) {
++              if (kernel_text_address(ktva_ktla(from))) {
+                       addr = (void *)from;
+                       /*
+                        * Assume we can get the maximum possible size
+@@ -833,7 +833,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
+ #ifdef CONFIG_X86_64
+       is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
+ #endif
+-      insn_init(&insn, addr, bytes_read, is64);
++      insn_init(&insn, (void *)ktva_ktla((unsigned long)addr), bytes_read, is64);
+       insn_get_opcode(&insn);
+       if (!insn.opcode.got)
+               return X86_BR_ABORT;
+diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
+index 861a7d9..2ff89b2 100644
+--- a/arch/x86/events/intel/pt.c
++++ b/arch/x86/events/intel/pt.c
+@@ -172,11 +172,9 @@ static const struct attribute_group *pt_attr_groups[] = {
+ static int __init pt_pmu_hw_init(void)
+ {
+-      struct dev_ext_attribute *de_attrs;
+-      struct attribute **attrs;
+-      size_t size;
++      static struct dev_ext_attribute de_attrs[ARRAY_SIZE(pt_caps)];
++      static struct attribute *attrs[ARRAY_SIZE(pt_caps)];
+       u64 reg;
+-      int ret;
+       long i;
+       rdmsrl(MSR_PLATFORM_INFO, reg);
+@@ -207,8 +205,6 @@ static int __init pt_pmu_hw_init(void)
+                       pt_pmu.vmx = true;
+       }
+-      attrs = NULL;
+-
+       for (i = 0; i < PT_CPUID_LEAVES; i++) {
+               cpuid_count(20, i,
+                           &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
+@@ -217,39 +213,25 @@ static int __init pt_pmu_hw_init(void)
+                           &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
+       }
+-      ret = -ENOMEM;
+-      size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
+-      attrs = kzalloc(size, GFP_KERNEL);
+-      if (!attrs)
+-              goto fail;
+-
+-      size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
+-      de_attrs = kzalloc(size, GFP_KERNEL);
+-      if (!de_attrs)
+-              goto fail;
+-
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
+-              struct dev_ext_attribute *de_attr = de_attrs + i;
++              struct dev_ext_attribute *de_attr = &de_attrs[i];
+-              de_attr->attr.attr.name = pt_caps[i].name;
++              const_cast(de_attr->attr.attr.name) = pt_caps[i].name;
+               sysfs_attr_init(&de_attr->attr.attr);
+-              de_attr->attr.attr.mode         = S_IRUGO;
+-              de_attr->attr.show              = pt_cap_show;
+-              de_attr->var                    = (void *)i;
++              const_cast(de_attr->attr.attr.mode)     = S_IRUGO;
++              const_cast(de_attr->attr.show)          = pt_cap_show;
++              const_cast(de_attr->var)                = (void *)i;
+               attrs[i] = &de_attr->attr.attr;
+       }
+-      pt_cap_group.attrs = attrs;
++      const_cast(pt_cap_group.attrs) = attrs;
++      pax_close_kernel();
+       return 0;
+-
+-fail:
+-      kfree(attrs);
+-
+-      return ret;
+ }
+ #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC   | \
+diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
+index 2886593..f191122 100644
+--- a/arch/x86/events/intel/rapl.c
++++ b/arch/x86/events/intel/rapl.c
+@@ -117,14 +117,14 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
+ #define RAPL_EVENT_MASK       0xFFULL
+ #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format)         \
+-static ssize_t __rapl_##_var##_show(struct kobject *kobj,     \
+-                              struct kobj_attribute *attr,    \
++static ssize_t __rapl_##_var##_show(struct device *dev,               \
++                              struct device_attribute *attr,  \
+                               char *page)                     \
+ {                                                             \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);             \
+       return sprintf(page, _format "\n");                     \
+ }                                                             \
+-static struct kobj_attribute format_attr_##_var =             \
++static struct device_attribute format_attr_##_var =           \
+       __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
+ #define RAPL_CNTR_WIDTH 32
+@@ -533,7 +533,7 @@ static struct attribute *rapl_events_knl_attr[] = {
+       NULL,
+ };
+-static struct attribute_group rapl_pmu_events_group = {
++static attribute_group_no_const rapl_pmu_events_group __read_only = {
+       .name = "events",
+       .attrs = NULL, /* patched at runtime */
+ };
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index 463dc7a..4c8d08b 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -90,8 +90,8 @@ end:
+       return map;
+ }
+-ssize_t uncore_event_show(struct kobject *kobj,
+-                        struct kobj_attribute *attr, char *buf)
++ssize_t uncore_event_show(struct device *dev,
++                        struct device_attribute *attr, char *buf)
+ {
+       struct uncore_event_desc *event =
+               container_of(attr, struct uncore_event_desc, attr);
+@@ -819,7 +819,7 @@ static void uncore_types_exit(struct intel_uncore_type **types)
+ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
+ {
+       struct intel_uncore_pmu *pmus;
+-      struct attribute_group *attr_group;
++      attribute_group_no_const *attr_group;
+       struct attribute **attrs;
+       size_t size;
+       int i, j;
+diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
+index 78b9c23..2f5c61e 100644
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -122,9 +122,9 @@ struct intel_uncore_box {
+ #define UNCORE_BOX_FLAG_INITIATED     0
+ struct uncore_event_desc {
+-      struct kobj_attribute attr;
++      struct device_attribute attr;
+       const char *config;
+-};
++} __do_const;
+ struct pci2phy_map {
+       struct list_head list;
+@@ -134,8 +134,8 @@ struct pci2phy_map {
+ struct pci2phy_map *__find_pci2phy_map(int segment);
+-ssize_t uncore_event_show(struct kobject *kobj,
+-                        struct kobj_attribute *attr, char *buf);
++ssize_t uncore_event_show(struct device *dev,
++                        struct device_attribute *attr, char *buf);
+ #define INTEL_UNCORE_EVENT_DESC(_name, _config)                       \
+ {                                                             \
+@@ -144,14 +144,14 @@ ssize_t uncore_event_show(struct kobject *kobj,
+ }
+ #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                       \
+-static ssize_t __uncore_##_var##_show(struct kobject *kobj,           \
+-                              struct kobj_attribute *attr,            \
++static ssize_t __uncore_##_var##_show(struct device *dev,             \
++                              struct device_attribute *attr,          \
+                               char *page)                             \
+ {                                                                     \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
+       return sprintf(page, _format "\n");                             \
+ }                                                                     \
+-static struct kobj_attribute format_attr_##_var =                     \
++static struct device_attribute format_attr_##_var =                   \
+       __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+ static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index 8c4a477..bd8370d 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -801,7 +801,7 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
+       regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
+       if (regs->flags & X86_VM_MASK)
+               regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
+-      regs->ip = ip;
++      regs->ip = kernel_ip(ip) ? ktva_ktla(ip) : ip;
+ }
+ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index cb26f18..4f43f23 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
+       unsigned long dump_start, dump_size;
+       struct user32 dump;
++      memset(&dump, 0, sizeof(dump));
++
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       has_dumped = 1;
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index 2f29f4e..ac453b4 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -123,7 +123,7 @@ asmlinkage long sys32_sigreturn(void)
+       if (__get_user(set.sig[0], &frame->sc.oldmask)
+           || (_COMPAT_NSIG_WORDS > 1
+               && __copy_from_user((((char *) &set.sig) + 4),
+-                                  &frame->extramask,
++                                  frame->extramask,
+                                   sizeof(frame->extramask))))
+               goto badframe;
+@@ -243,7 +243,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+       sp -= frame_size;
+       /* Align the stack pointer according to the i386 ABI,
+        * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+-      sp = ((sp + 4) & -16ul) - 4;
++      sp = ((sp - 12) & -16ul) - 4;
+       return (void __user *) sp;
+ }
+@@ -288,10 +288,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+       } else {
+               /* Return stub is in 32bit vsyscall page */
+               if (current->mm->context.vdso)
+-                      restorer = current->mm->context.vdso +
+-                              vdso_image_32.sym___kernel_sigreturn;
++                      restorer = (void __force_user *)(current->mm->context.vdso +
++                              vdso_image_32.sym___kernel_sigreturn);
+               else
+-                      restorer = &frame->retcode;
++                      restorer = frame->retcode;
+       }
+       put_user_try {
+@@ -301,7 +301,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+                * These are actually not used anymore, but left because some
+                * gdb versions depend on them as a marker.
+                */
+-              put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++              put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+       } put_user_catch(err);
+       if (err)
+@@ -343,7 +343,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+               0xb8,
+               __NR_ia32_rt_sigreturn,
+               0x80cd,
+-              0,
++              0
+       };
+       frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
+@@ -366,16 +366,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+               if (ksig->ka.sa.sa_flags & SA_RESTORER)
+                       restorer = ksig->ka.sa.sa_restorer;
++              else if (current->mm->context.vdso)
++                      /* Return stub is in 32bit vsyscall page */
++                      restorer = (void __force_user *)(current->mm->context.vdso +
++                              vdso_image_32.sym___kernel_rt_sigreturn);
+               else
+-                      restorer = current->mm->context.vdso +
+-                              vdso_image_32.sym___kernel_rt_sigreturn;
++                      restorer = frame->retcode;
+               put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+               /*
+                * Not actually used anymore, but left because some gdb
+                * versions need it.
+                */
+-              put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++              put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+       } put_user_catch(err);
+       err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
+diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
+index 719cd70..72af944 100644
+--- a/arch/x86/ia32/sys_ia32.c
++++ b/arch/x86/ia32/sys_ia32.c
+@@ -49,18 +49,26 @@
+ #define AA(__x)               ((unsigned long)(__x))
++static inline loff_t compose_loff(unsigned int high, unsigned int low)
++{
++      loff_t retval = low;
++
++      BUILD_BUG_ON(sizeof retval != sizeof low + sizeof high);
++      __builtin_memcpy((unsigned char *)&retval + sizeof low, &high, sizeof high);
++      return retval;
++}
+ asmlinkage long sys32_truncate64(const char __user *filename,
+-                               unsigned long offset_low,
+-                               unsigned long offset_high)
++                               unsigned int offset_low,
++                               unsigned int offset_high)
+ {
+-       return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
++      return sys_truncate(filename, compose_loff(offset_high, offset_low));
+ }
+-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+-                                unsigned long offset_high)
++asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned int offset_low,
++                                unsigned int offset_high)
+ {
+-       return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
++      return sys_ftruncate(fd, ((unsigned long) offset_high << 32) | offset_low);
+ }
+ /*
+@@ -69,8 +77,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+  */
+ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+-      typeof(ubuf->st_uid) uid = 0;
+-      typeof(ubuf->st_gid) gid = 0;
++      typeof(((struct stat64 *)0)->st_uid) uid = 0;
++      typeof(((struct stat64 *)0)->st_gid) gid = 0;
+       SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
+       SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
+       if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+@@ -196,29 +204,29 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
+                       __u32 len_low, __u32 len_high, int advice)
+ {
+       return sys_fadvise64_64(fd,
+-                             (((u64)offset_high)<<32) | offset_low,
+-                             (((u64)len_high)<<32) | len_low,
++                             compose_loff(offset_high, offset_low),
++                             compose_loff(len_high, len_low),
+                               advice);
+ }
+ asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
+                                  size_t count)
+ {
+-      return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
++      return sys_readahead(fd, compose_loff(off_hi, off_lo), count);
+ }
+ asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
+                                     unsigned n_low, unsigned n_hi,  int flags)
+ {
+       return sys_sync_file_range(fd,
+-                                 ((u64)off_hi << 32) | off_low,
+-                                 ((u64)n_hi << 32) | n_low, flags);
++                                 compose_loff(off_hi, off_low),
++                                 compose_loff(n_hi, n_low), flags);
+ }
+ asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
+-                              size_t len, int advice)
++                              int len, int advice)
+ {
+-      return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
++      return sys_fadvise64_64(fd, compose_loff(offset_hi, offset_lo),
+                               len, advice);
+ }
+@@ -226,6 +234,6 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
+                               unsigned offset_hi, unsigned len_lo,
+                               unsigned len_hi)
+ {
+-      return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
+-                           ((u64)len_hi << 32) | len_lo);
++      return sys_fallocate(fd, mode, compose_loff(offset_hi, offset_lo),
++                           compose_loff(len_hi, len_lo));
+ }
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index e7636ba..b9d3a6d 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -4,6 +4,7 @@
+ #ifdef __ASSEMBLY__
+ #include <asm/asm.h>
++#include <asm/irq_vectors.h>
+ #ifdef CONFIG_SMP
+       .macro LOCK_PREFIX
+@@ -18,6 +19,45 @@
+       .endm
+ #endif
++#ifdef KERNEXEC_PLUGIN
++      .macro pax_force_retaddr_bts rip=0
++      btsq $63,\rip(%rsp)
++      .endm
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++      .macro pax_force_retaddr rip=0, reload=0
++      btsq $63,\rip(%rsp)
++      .endm
++      .macro pax_force_fptr ptr
++      btsq $63,\ptr
++      .endm
++      .macro pax_set_fptr_mask
++      .endm
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++      .macro pax_force_retaddr rip=0, reload=0
++      .if \reload
++      pax_set_fptr_mask
++      .endif
++      orq %r12,\rip(%rsp)
++      .endm
++      .macro pax_force_fptr ptr
++      orq %r12,\ptr
++      .endm
++      .macro pax_set_fptr_mask
++      movabs $0x8000000000000000,%r12
++      .endm
++#endif
++#else
++      .macro pax_force_retaddr rip=0, reload=0
++      .endm
++      .macro pax_force_fptr ptr
++      .endm
++      .macro pax_force_retaddr_bts rip=0
++      .endm
++      .macro pax_set_fptr_mask
++      .endm
++#endif
++
+ /*
+  * Issue one struct alt_instr descriptor entry (need to put it into
+  * the section .altinstructions, see below). This entry contains
+@@ -50,7 +90,7 @@
+       altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
+       .popsection
+-      .pushsection .altinstr_replacement,"ax"
++      .pushsection .altinstr_replacement,"a"
+ 143:
+       \newinstr
+ 144:
+@@ -86,7 +126,7 @@
+       altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
+       .popsection
+-      .pushsection .altinstr_replacement,"ax"
++      .pushsection .altinstr_replacement,"a"
+ 143:
+       \newinstr1
+ 144:
+@@ -95,6 +135,26 @@
+       .popsection
+ .endm
++.macro __PAX_REFCOUNT section, counter
++#ifdef CONFIG_PAX_REFCOUNT
++      jo 111f
++      .pushsection .text.\section
++111:  lea \counter,%_ASM_CX
++      int $X86_REFCOUNT_VECTOR
++222:
++      .popsection
++333:
++      _ASM_EXTABLE(222b, 333b)
++#endif
++.endm
++
++.macro PAX_REFCOUNT64_OVERFLOW counter
++      __PAX_REFCOUNT refcount64_overflow, \counter
++.endm
++
++.macro PAX_REFCOUNT64_UNDERFLOW counter
++      __PAX_REFCOUNT refcount64_underflow, \counter
++.endm
+ #endif  /*  __ASSEMBLY__  */
+ #endif /* _ASM_X86_ALTERNATIVE_ASM_H */
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index e77a644..6bbec6f 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -7,6 +7,7 @@
+ #include <linux/stddef.h>
+ #include <linux/stringify.h>
+ #include <asm/asm.h>
++#include <asm/irq_vectors.h>
+ /*
+  * Alternative inline assembly for SMP.
+@@ -137,7 +138,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+       ".pushsection .altinstructions,\"a\"\n"                         \
+       ALTINSTR_ENTRY(feature, 1)                                      \
+       ".popsection\n"                                                 \
+-      ".pushsection .altinstr_replacement, \"ax\"\n"                  \
++      ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
+       ".popsection"
+@@ -147,7 +148,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+       ALTINSTR_ENTRY(feature1, 1)                                     \
+       ALTINSTR_ENTRY(feature2, 2)                                     \
+       ".popsection\n"                                                 \
+-      ".pushsection .altinstr_replacement, \"ax\"\n"                  \
++      ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
+       ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
+       ".popsection"
+@@ -234,6 +235,35 @@ static inline int alternatives_text_reserved(void *start, void *end)
+  */
+ #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
++#ifdef CONFIG_PAX_REFCOUNT
++#define __PAX_REFCOUNT(size)                          \
++      "jo 111f\n"                                     \
++      ".if "__stringify(size)" == 4\n\t"              \
++      ".pushsection .text.refcount_overflow\n"        \
++      ".elseif "__stringify(size)" == -4\n\t"         \
++      ".pushsection .text.refcount_underflow\n"       \
++      ".elseif "__stringify(size)" == 8\n\t"          \
++      ".pushsection .text.refcount64_overflow\n"      \
++      ".elseif "__stringify(size)" == -8\n\t"         \
++      ".pushsection .text.refcount64_underflow\n"     \
++      ".else\n"                                       \
++      ".error \"invalid size\"\n"                     \
++      ".endif\n"                                      \
++      "111:\tlea %[counter],%%"_ASM_CX"\n\t"          \
++      "int $"__stringify(X86_REFCOUNT_VECTOR)"\n"     \
++      "222:\n\t"                                      \
++      ".popsection\n"                                 \
++      "333:\n"                                        \
++      _ASM_EXTABLE(222b, 333b)
++
++#define PAX_REFCOUNT_OVERFLOW(size)   __PAX_REFCOUNT(size)
++#define PAX_REFCOUNT_UNDERFLOW(size)  __PAX_REFCOUNT(-(size))
++#else
++#define __PAX_REFCOUNT(size)
++#define PAX_REFCOUNT_OVERFLOW(size)
++#define PAX_REFCOUNT_UNDERFLOW(size)
++#endif
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* _ASM_X86_ALTERNATIVE_H */
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 1243577..302ac39 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -49,7 +49,7 @@ static inline void generic_apic_probe(void)
+ #ifdef CONFIG_X86_LOCAL_APIC
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 93eebc63..6a64395 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+-              "lcall *%%cs:apm_bios_entry\n\t"
++              "lcall *%%ss:apm_bios_entry\n\t"
+               "setc %%al\n\t"
+               "popl %%ebp\n\t"
+               "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+-              "lcall *%%cs:apm_bios_entry\n\t"
++              "lcall *%%ss:apm_bios_entry\n\t"
+               "setc %%bl\n\t"
+               "popl %%ebp\n\t"
+               "popl %%edi\n\t"
+diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
+index 7acb51c..46ba0b3 100644
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -79,30 +79,6 @@
+       _ASM_PTR (entry);                                       \
+       .popsection
+-.macro ALIGN_DESTINATION
+-      /* check for bad alignment of destination */
+-      movl %edi,%ecx
+-      andl $7,%ecx
+-      jz 102f                         /* already aligned */
+-      subl $8,%ecx
+-      negl %ecx
+-      subl %ecx,%edx
+-100:  movb (%rsi),%al
+-101:  movb %al,(%rdi)
+-      incq %rsi
+-      incq %rdi
+-      decl %ecx
+-      jnz 100b
+-102:
+-      .section .fixup,"ax"
+-103:  addl %ecx,%edx                  /* ecx is zerorest also */
+-      jmp copy_user_handle_tail
+-      .previous
+-
+-      _ASM_EXTABLE(100b,103b)
+-      _ASM_EXTABLE(101b,103b)
+-      .endm
+-
+ #else
+ # define _EXPAND_EXTABLE_HANDLE(x) #x
+ # define _ASM_EXTABLE_HANDLE(from, to, handler)                       \
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index 14635c5..199ea31 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -27,6 +27,17 @@ static __always_inline int atomic_read(const atomic_t *v)
+ }
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static __always_inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++      return ACCESS_ONCE((v)->counter);
++}
++
++/**
+  * atomic_set - set atomic variable
+  * @v: pointer of type atomic_t
+  * @i: required value
+@@ -39,6 +50,18 @@ static __always_inline void atomic_set(atomic_t *v, int i)
+ }
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static __always_inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++      v->counter = i;
++}
++
++/**
+  * atomic_add - add integer to atomic variable
+  * @i: integer value to add
+  * @v: pointer of type atomic_t
+@@ -47,8 +70,24 @@ static __always_inline void atomic_set(atomic_t *v, int i)
+  */
+ static __always_inline void atomic_add(int i, atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "addl %1,%0"
+-                   : "+m" (v->counter)
++      asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
++                   PAX_REFCOUNT_OVERFLOW(4)
++                   : [counter] "+m" (v->counter)
++                   : "ir" (i)
++                   : "cc", "cx");
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static __always_inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "addl %1,%0\n"
++                   : [counter] "+m" (v->counter)
+                    : "ir" (i));
+ }
+@@ -61,7 +100,23 @@ static __always_inline void atomic_add(int i, atomic_t *v)
+  */
+ static __always_inline void atomic_sub(int i, atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "subl %1,%0"
++      asm volatile(LOCK_PREFIX "subl %1,%0\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(4)
++                   : [counter] "+m" (v->counter)
++                   : "ir" (i)
++                   : "cc", "cx");
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static __always_inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "subl %1,%0\n"
+                    : "+m" (v->counter)
+                    : "ir" (i));
+ }
+@@ -77,7 +132,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
+  */
+ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+ {
+-      GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
++      GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, -4, "er", i, "%0", e);
+ }
+ /**
+@@ -88,7 +143,21 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+  */
+ static __always_inline void atomic_inc(atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "incl %0"
++      asm volatile(LOCK_PREFIX "incl %0\n\t"
++                   PAX_REFCOUNT_OVERFLOW(4)
++                   : [counter] "+m" (v->counter)
++                   : : "cc", "cx");
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static __always_inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "incl %0\n"
+                    : "+m" (v->counter));
+ }
+@@ -100,7 +169,21 @@ static __always_inline void atomic_inc(atomic_t *v)
+  */
+ static __always_inline void atomic_dec(atomic_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "decl %0"
++      asm volatile(LOCK_PREFIX "decl %0\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(4)
++                   : [counter] "+m" (v->counter)
++                   : : "cc", "cx");
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static __always_inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "decl %0\n"
+                    : "+m" (v->counter));
+ }
+@@ -114,7 +197,7 @@ static __always_inline void atomic_dec(atomic_t *v)
+  */
+ static __always_inline bool atomic_dec_and_test(atomic_t *v)
+ {
+-      GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
++      GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, -4, "%0", e);
+ }
+ /**
+@@ -127,7 +210,20 @@ static __always_inline bool atomic_dec_and_test(atomic_t *v)
+  */
+ static __always_inline bool atomic_inc_and_test(atomic_t *v)
+ {
+-      GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
++      GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, 4, "%0", e);
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static __always_inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++      GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", e);
+ }
+ /**
+@@ -141,7 +237,7 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v)
+  */
+ static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+ {
+-      GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
++      GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, 4, "er", i, "%0", s);
+ }
+ /**
+@@ -151,7 +247,19 @@ static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+  *
+  * Atomically adds @i to @v and returns @i + @v
+  */
+-static __always_inline int atomic_add_return(int i, atomic_t *v)
++static __always_inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
++{
++      return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic_add_return_unchecked - add integer and return
++ * @i: integer value to add
++ * @v: pointer of type atomi_uncheckedc_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static __always_inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+ {
+       return i + xadd(&v->counter, i);
+ }
+@@ -163,25 +271,34 @@ static __always_inline int atomic_add_return(int i, atomic_t *v)
+  *
+  * Atomically subtracts @i from @v and returns @v - @i
+  */
+-static __always_inline int atomic_sub_return(int i, atomic_t *v)
++static __always_inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
+ {
+       return atomic_add_return(-i, v);
+ }
+ #define atomic_inc_return(v)  (atomic_add_return(1, v))
++static __always_inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++      return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v)  (atomic_sub_return(1, v))
+ static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+ {
+-      return xadd(&v->counter, i);
++      return xadd_check_overflow(&v->counter, i);
+ }
+ static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+ {
+-      return xadd(&v->counter, -i);
++      return xadd_check_overflow(&v->counter, -i);
+ }
+-static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++static __always_inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
++
++static __always_inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+ {
+       return cmpxchg(&v->counter, old, new);
+ }
+@@ -191,6 +308,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
+       return xchg(&v->counter, new);
+ }
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++      return xchg(&v->counter, new);
++}
++
+ #define ATOMIC_OP(op)                                                 \
+ static inline void atomic_##op(int i, atomic_t *v)                    \
+ {                                                                     \
+@@ -236,12 +358,20 @@ ATOMIC_OPS(xor, ^)
+  */
+ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+-      int c, old;
++      int c, old, new;
+       c = atomic_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic_cmpxchg((v), c, c + (a));
++
++              asm volatile("addl %2,%0\n\t"
++                           PAX_REFCOUNT_OVERFLOW(4)
++                           : "=r" (new)
++                           : "0" (c), "ir" (a),
++                             [counter] "m" (v->counter)
++                           : "cc", "cx");
++
++              old = atomic_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+@@ -250,6 +380,114 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ }
+ /**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
++{
++      int val, c = hint, new;
++
++      /* sanity test, should be removed by compiler if hint is a constant */
++      if (!hint)
++              return __atomic_add_unless(v, 1, 0);
++
++      do {
++              asm volatile("incl %0\n\t"
++                           PAX_REFCOUNT_OVERFLOW(4)
++                           : "=r" (new)
++                           : "0" (c),
++                             [counter] "m" (v->counter)
++                           : "cc", "cx");
++
++              val = atomic_cmpxchg(v, c, new);
++              if (val == c)
++                      return 1;
++              c = val;
++      } while (c);
++
++      return 0;
++}
++
++#define atomic_inc_unless_negative atomic_inc_unless_negative
++static inline int atomic_inc_unless_negative(atomic_t *p)
++{
++      int v, v1, new;
++
++      for (v = 0; v >= 0; v = v1) {
++              asm volatile("incl %0\n\t"
++                           PAX_REFCOUNT_OVERFLOW(4)
++                           : "=r" (new)
++                           : "0" (v),
++                             [counter] "m" (p->counter)
++                           : "cc", "cx");
++
++              v1 = atomic_cmpxchg(p, v, new);
++              if (likely(v1 == v))
++                      return 1;
++      }
++      return 0;
++}
++
++#define atomic_dec_unless_positive atomic_dec_unless_positive
++static inline int atomic_dec_unless_positive(atomic_t *p)
++{
++      int v, v1, new;
++
++      for (v = 0; v <= 0; v = v1) {
++              asm volatile("decl %0\n\t"
++                           PAX_REFCOUNT_UNDERFLOW(4)
++                           : "=r" (new)
++                           : "0" (v),
++                             [counter] "m" (p->counter)
++                           : "cc", "cx");
++
++              v1 = atomic_cmpxchg(p, v, new);
++              if (likely(v1 == v))
++                      return 1;
++      }
++      return 0;
++}
++
++/*
++ * atomic_dec_if_positive - decrement by 1 if old value positive
++ * @v: pointer of type atomic_t
++ *
++ * The function returns the old value of *v minus 1, even if
++ * the atomic variable, v, was not decremented.
++ */
++#define atomic_dec_if_positive atomic_dec_if_positive
++static inline int atomic_dec_if_positive(atomic_t *v)
++{
++      int c, old, dec;
++      c = atomic_read(v);
++      for (;;) {
++              asm volatile("decl %0\n\t"
++                           PAX_REFCOUNT_UNDERFLOW(4)
++                           : "=r" (dec)
++                           : "0" (c),
++                             [counter] "m" (v->counter)
++                           : "cc", "cx");
++
++              if (unlikely(dec < 0))
++                      break;
++              old = atomic_cmpxchg(v, c, dec);
++              if (likely(old == c))
++                      break;
++              c = old;
++      }
++      return dec;
++}
++
++/**
+  * atomic_inc_short - increment of a short integer
+  * @v: pointer to type int
+  *
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index 71d7705..99a1fe8 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -8,9 +8,17 @@
+ /* An 64bit atomic type */
+ typedef struct {
+-      u64 __aligned(8) counter;
++      s64 __aligned(8) counter;
+ } atomic64_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      s64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val)    { (val) }
+ #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
+@@ -36,21 +44,31 @@ typedef struct {
+       ATOMIC64_DECL_ONE(sym##_386)
+ ATOMIC64_DECL_ONE(add_386);
++ATOMIC64_DECL_ONE(add_unchecked_386);
+ ATOMIC64_DECL_ONE(sub_386);
++ATOMIC64_DECL_ONE(sub_unchecked_386);
+ ATOMIC64_DECL_ONE(inc_386);
++ATOMIC64_DECL_ONE(inc_unchecked_386);
+ ATOMIC64_DECL_ONE(dec_386);
++ATOMIC64_DECL_ONE(dec_unchecked_386);
+ #endif
+ #define alternative_atomic64(f, out, in...) \
+       __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+ ATOMIC64_DECL(read);
++ATOMIC64_DECL(read_unchecked);
+ ATOMIC64_DECL(set);
++ATOMIC64_DECL(set_unchecked);
+ ATOMIC64_DECL(xchg);
+ ATOMIC64_DECL(add_return);
++ATOMIC64_DECL(add_return_unchecked);
+ ATOMIC64_DECL(sub_return);
++ATOMIC64_DECL(sub_return_unchecked);
+ ATOMIC64_DECL(inc_return);
++ATOMIC64_DECL(inc_return_unchecked);
+ ATOMIC64_DECL(dec_return);
++ATOMIC64_DECL(dec_return_unchecked);
+ ATOMIC64_DECL(dec_if_positive);
+ ATOMIC64_DECL(inc_not_zero);
+ ATOMIC64_DECL(add_unless);
+@@ -76,6 +94,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
+ }
+ /**
++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
++ * @p: pointer to type atomic64_unchecked_t
++ * @o: expected value
++ * @n: new value
++ *
++ * Atomically sets @v to @n if it was equal to @o and returns
++ * the old value.
++ */
++
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
++{
++      return cmpxchg64(&v->counter, o, n);
++}
++
++/**
+  * atomic64_xchg - xchg atomic64 variable
+  * @v: pointer to type atomic64_t
+  * @n: value to assign
+@@ -95,6 +128,25 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
+ }
+ /**
++ * atomic64_xchg_unchecked - xchg atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically xchgs the value of @v to @n and returns
++ * the old value.
++ */
++static inline long long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long long n)
++{
++      long long o;
++      unsigned high = (unsigned)(n >> 32);
++      unsigned low = (unsigned)n;
++      alternative_atomic64(xchg, "=&A" (o),
++                           "S" (v), "b" (low), "c" (high)
++                           : "memory");
++      return o;
++}
++
++/**
+  * atomic64_set - set atomic64 variable
+  * @v: pointer to type atomic64_t
+  * @i: value to assign
+@@ -111,6 +163,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ }
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically sets the value of @v to @n.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++      unsigned high = (unsigned)(i >> 32);
++      unsigned low = (unsigned)i;
++      alternative_atomic64(set, /* no output */,
++                           "S" (v), "b" (low), "c" (high)
++                           : "eax", "edx", "memory");
++}
++
++/**
+  * atomic64_read - read atomic64 variable
+  * @v: pointer to type atomic64_t
+  *
+@@ -124,6 +192,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+  }
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v and returns it.
++ */
++static inline long long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      long long r;
++      alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
++      return r;
++ }
++
++/**
+  * atomic64_add_return - add and return
+  * @i: integer value to add
+  * @v: pointer to type atomic64_t
+@@ -138,6 +219,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
+       return i;
+ }
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + *@v
++ */
++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
++{
++      alternative_atomic64(add_return_unchecked,
++                           ASM_OUTPUT2("+A" (i), "+c" (v)),
++                           ASM_NO_INPUT_CLOBBER("memory"));
++      return i;
++}
++
+ /*
+  * Other variants with different arithmetic operators:
+  */
+@@ -157,6 +253,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
+       return a;
+ }
++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++      long long a;
++      alternative_atomic64(inc_return_unchecked, "=&A" (a),
++                           "S" (v) : "memory", "ecx");
++      return a;
++}
++
+ static inline long long atomic64_dec_return(atomic64_t *v)
+ {
+       long long a;
+@@ -181,6 +285,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
+ }
+ /**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
++{
++      __alternative_atomic64(add_unchecked, add_return_unchecked,
++                             ASM_OUTPUT2("+A" (i), "+c" (v)),
++                             ASM_NO_INPUT_CLOBBER("memory"));
++      return i;
++}
++
++/**
+  * atomic64_sub - subtract the atomic64 variable
+  * @i: integer value to subtract
+  * @v: pointer to type atomic64_t
+@@ -222,6 +341,18 @@ static inline void atomic64_inc(atomic64_t *v)
+ }
+ /**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++      __alternative_atomic64(inc_unchecked, inc_return_unchecked, /* no output */,
++                             "S" (v) : "memory", "eax", "ecx", "edx");
++}
++
++/**
+  * atomic64_dec - decrement atomic64 variable
+  * @v: pointer to type atomic64_t
+  *
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index 89ed2f6..25490ad 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
+ }
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++      return ACCESS_ONCE((v)->counter);
++}
++
++/**
+  * atomic64_set - set atomic64 variable
+  * @v: pointer to type atomic64_t
+  * @i: required value
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ }
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++      v->counter = i;
++}
++
++/**
+  * atomic64_add - add integer to atomic64 variable
+  * @i: integer value to add
+  * @v: pointer to type atomic64_t
+@@ -42,6 +66,22 @@ static inline void atomic64_set(atomic64_t *v, long i)
+  */
+ static __always_inline void atomic64_add(long i, atomic64_t *v)
+ {
++      asm volatile(LOCK_PREFIX "addq %1,%0\n\t"
++                   PAX_REFCOUNT_OVERFLOW(8)
++                   : [counter] "=m" (v->counter)
++                   : "er" (i), "m" (v->counter)
++                   : "cc", "cx");
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static __always_inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+       asm volatile(LOCK_PREFIX "addq %1,%0"
+                    : "=m" (v->counter)
+                    : "er" (i), "m" (v->counter));
+@@ -56,7 +96,23 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
+  */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "subq %1,%0"
++      asm volatile(LOCK_PREFIX "subq %1,%0\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(8)
++                   : [counter] "=m" (v->counter)
++                   : "er" (i), "m" (v->counter)
++                   : "cc", "cx");
++}
++
++/**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "subq %1,%0\n"
+                    : "=m" (v->counter)
+                    : "er" (i), "m" (v->counter));
+ }
+@@ -72,7 +128,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
+  */
+ static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+-      GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
++      GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, -8, "er", i, "%0", e);
+ }
+ /**
+@@ -83,6 +139,21 @@ static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
+  */
+ static __always_inline void atomic64_inc(atomic64_t *v)
+ {
++      asm volatile(LOCK_PREFIX "incq %0\n\t"
++                   PAX_REFCOUNT_OVERFLOW(8)
++                   : [counter] "=m" (v->counter)
++                   : "m" (v->counter)
++                   : "cc", "cx");
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static __always_inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+       asm volatile(LOCK_PREFIX "incq %0"
+                    : "=m" (v->counter)
+                    : "m" (v->counter));
+@@ -96,7 +167,22 @@ static __always_inline void atomic64_inc(atomic64_t *v)
+  */
+ static __always_inline void atomic64_dec(atomic64_t *v)
+ {
+-      asm volatile(LOCK_PREFIX "decq %0"
++      asm volatile(LOCK_PREFIX "decq %0\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(8)
++                   : [counter] "=m" (v->counter)
++                   : "m" (v->counter)
++                   : "cc", "cx");
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static __always_inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++      asm volatile(LOCK_PREFIX "decq %0\n"
+                    : "=m" (v->counter)
+                    : "m" (v->counter));
+ }
+@@ -111,7 +197,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
+  */
+ static inline bool atomic64_dec_and_test(atomic64_t *v)
+ {
+-      GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
++      GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, -8, "%0", e);
+ }
+ /**
+@@ -124,7 +210,7 @@ static inline bool atomic64_dec_and_test(atomic64_t *v)
+  */
+ static inline bool atomic64_inc_and_test(atomic64_t *v)
+ {
+-      GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
++      GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, 8, "%0", e);
+ }
+ /**
+@@ -138,7 +224,7 @@ static inline bool atomic64_inc_and_test(atomic64_t *v)
+  */
+ static inline bool atomic64_add_negative(long i, atomic64_t *v)
+ {
+-      GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
++      GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, 8, "er", i, "%0", s);
+ }
+ /**
+@@ -150,6 +236,18 @@ static inline bool atomic64_add_negative(long i, atomic64_t *v)
+  */
+ static __always_inline long atomic64_add_return(long i, atomic64_t *v)
+ {
++      return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static __always_inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
+       return i + xadd(&v->counter, i);
+ }
+@@ -160,15 +258,19 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
+ static inline long atomic64_fetch_add(long i, atomic64_t *v)
+ {
+-      return xadd(&v->counter, i);
++      return xadd_check_overflow(&v->counter, i);
+ }
+ static inline long atomic64_fetch_sub(long i, atomic64_t *v)
+ {
+-      return xadd(&v->counter, -i);
++      return xadd_check_overflow(&v->counter, -i);
+ }
+ #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++      return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -176,11 +278,21 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+       return cmpxchg(&v->counter, old, new);
+ }
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++      return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+       return xchg(&v->counter, new);
+ }
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++      return xchg(&v->counter, new);
++}
++
+ /**
+  * atomic64_add_unless - add unless the number is a given value
+  * @v: pointer of type atomic64_t
+@@ -192,17 +304,25 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
+  */
+ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+-      long c, old;
++      long c, old, new;
+       c = atomic64_read(v);
+       for (;;) {
+-              if (unlikely(c == (u)))
++              if (unlikely(c == u))
+                       break;
+-              old = atomic64_cmpxchg((v), c, c + (a));
++
++              asm volatile("addq %2,%0\n\t"
++                           PAX_REFCOUNT_OVERFLOW(8)
++                           : "=r" (new)
++                           : "0" (c), "ir" (a),
++                             [counter] "m" (v->counter)
++                           : "cc", "cx");
++
++              old = atomic64_cmpxchg(v, c, new);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+-      return c != (u);
++      return c != u;
+ }
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 68557f52..d9828ec 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -50,7 +50,7 @@
+  * a mask operation on a byte.
+  */
+ #define IS_IMMEDIATE(nr)              (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr)     BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr)     BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr)                        (1 << ((nr) & 7))
+ /**
+@@ -203,7 +203,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
+  */
+ static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+ {
+-      GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
++      GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
+ }
+ /**
+@@ -249,7 +249,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
+  */
+ static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+ {
+-      GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
++      GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
+ }
+ /**
+@@ -302,7 +302,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
+  */
+ static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+ {
+-      GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
++      GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
+ }
+ static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+@@ -343,7 +343,7 @@ static bool test_bit(int nr, const volatile unsigned long *addr);
+  *
+  * Undefined if no bit exists, so code should check against 0 first.
+  */
+-static __always_inline unsigned long __ffs(unsigned long word)
++static __always_inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
+ {
+       asm("rep; bsf %1,%0"
+               : "=r" (word)
+@@ -357,7 +357,7 @@ static __always_inline unsigned long __ffs(unsigned long word)
+  *
+  * Undefined if no zero exists, so code should check against ~0UL first.
+  */
+-static __always_inline unsigned long ffz(unsigned long word)
++static __always_inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
+ {
+       asm("rep; bsf %1,%0"
+               : "=r" (word)
+@@ -371,7 +371,7 @@ static __always_inline unsigned long ffz(unsigned long word)
+  *
+  * Undefined if no set bit exists, so code should check against 0 first.
+  */
+-static __always_inline unsigned long __fls(unsigned long word)
++static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
+ {
+       asm("bsr %1,%0"
+           : "=r" (word)
+@@ -434,7 +434,7 @@ static __always_inline int ffs(int x)
+  * set bit if value is nonzero. The last (most significant) bit is
+  * at position 32.
+  */
+-static __always_inline int fls(int x)
++static __always_inline int __intentional_overflow(-1) fls(int x)
+ {
+       int r;
+@@ -476,7 +476,7 @@ static __always_inline int fls(int x)
+  * at position 64.
+  */
+ #ifdef CONFIG_X86_64
+-static __always_inline int fls64(__u64 x)
++static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
+ {
+       int bitpos = -1;
+       /*
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index abd06b1..17fc65f 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -6,7 +6,7 @@
+ #include <uapi/asm/boot.h>
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+                               + (CONFIG_PHYSICAL_ALIGN - 1)) \
+                               & ~(CONFIG_PHYSICAL_ALIGN - 1))
+diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
+index 48f99f1..26ab08a 100644
+--- a/arch/x86/include/asm/cache.h
++++ b/arch/x86/include/asm/cache.h
+@@ -5,12 +5,12 @@
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT        (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES        (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES        (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
+ #ifdef CONFIG_X86_VSMP
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
+index 7b53743..5f207d2 100644
+--- a/arch/x86/include/asm/checksum_32.h
++++ b/arch/x86/include/asm/checksum_32.h
+@@ -30,6 +30,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+                                           int len, __wsum sum,
+                                           int *src_err_ptr, int *dst_err_ptr);
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++                                                int len, __wsum sum,
++                                                int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++                                                int len, __wsum sum,
++                                                int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+  *    Note: when you get a NULL pointer exception here this means someone
+  *    passed in an incorrect kernel address to one of these functions.
+@@ -52,7 +60,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
+       might_sleep();
+       stac();
+-      ret = csum_partial_copy_generic((__force void *)src, dst,
++      ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
+                                       len, sum, err_ptr, NULL);
+       clac();
+@@ -183,7 +191,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
+       might_sleep();
+       if (access_ok(VERIFY_WRITE, dst, len)) {
+               stac();
+-              ret = csum_partial_copy_generic(src, (__force void *)dst,
++              ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
+                                               len, sum, NULL, err_ptr);
+               clac();
+               return ret;
+diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
+index 9733361..49bda42 100644
+--- a/arch/x86/include/asm/cmpxchg.h
++++ b/arch/x86/include/asm/cmpxchg.h
+@@ -15,8 +15,12 @@ extern void __cmpxchg_wrong_size(void)
+       __compiletime_error("Bad argument size for cmpxchg");
+ extern void __xadd_wrong_size(void)
+       __compiletime_error("Bad argument size for xadd");
++extern void __xadd_check_overflow_wrong_size(void)
++      __compiletime_error("Bad argument size for xadd_check_overflow");
+ extern void __add_wrong_size(void)
+       __compiletime_error("Bad argument size for add");
++extern void __add_check_overflow_wrong_size(void)
++      __compiletime_error("Bad argument size for add_check_overflow");
+ /*
+  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
+@@ -68,6 +72,32 @@ extern void __add_wrong_size(void)
+               __ret;                                                  \
+       })
++#ifdef CONFIG_PAX_REFCOUNT
++#define __xchg_op_check_overflow(ptr, arg, op, lock)                  \
++      ({                                                              \
++              __typeof__ (*(ptr)) __ret = (arg);                      \
++              switch (sizeof(*(ptr))) {                               \
++              case __X86_CASE_L:                                      \
++                      asm volatile (lock #op "l %0, %1\n"             \
++                                    PAX_REFCOUNT_OVERFLOW(4)          \
++                                    : "+r" (__ret), [counter] "+m" (*(ptr))\
++                                    : : "memory", "cc", "cx");        \
++                      break;                                          \
++              case __X86_CASE_Q:                                      \
++                      asm volatile (lock #op "q %q0, %1\n"            \
++                                    PAX_REFCOUNT_OVERFLOW(8)          \
++                                    : "+r" (__ret), [counter] "+m" (*(ptr))\
++                                    : : "memory", "cc", "cx");        \
++                      break;                                          \
++              default:                                                \
++                      __ ## op ## _check_overflow_wrong_size();       \
++              }                                                       \
++              __ret;                                                  \
++      })
++#else
++#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
++#endif
++
+ /*
+  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
+  * Since this is generally used to protect other memory information, we
+@@ -166,6 +196,9 @@ extern void __add_wrong_size(void)
+ #define xadd_sync(ptr, inc)   __xadd((ptr), (inc), "lock; ")
+ #define xadd_local(ptr, inc)  __xadd((ptr), (inc), "")
++#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
++#define xadd_check_overflow(ptr, inc)         __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
++
+ #define __add(ptr, inc, lock)                                         \
+       ({                                                              \
+               __typeof__ (*(ptr)) __ret = (inc);                      \
+diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
+index a188061..280d840 100644
+--- a/arch/x86/include/asm/compat.h
++++ b/arch/x86/include/asm/compat.h
+@@ -42,7 +42,11 @@ typedef u32         compat_uint_t;
+ typedef u32           compat_ulong_t;
+ typedef u32           compat_u32;
+ typedef u64 __attribute__((aligned(4))) compat_u64;
++#ifdef CHECKER_PLUGIN_USER
+ typedef u32           compat_uptr_t;
++#else
++typedef u32           __user compat_uptr_t;
++#endif
+ struct compat_timespec {
+       compat_time_t   tv_sec;
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 1d2b69f..8ca35d6 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -156,7 +156,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit)
+                        " .byte 5f - 4f\n"             /* repl len */
+                        " .byte 3b - 2b\n"             /* pad len */
+                        ".previous\n"
+-                       ".section .altinstr_replacement,\"ax\"\n"
++                       ".section .altinstr_replacement,\"a\"\n"
+                        "4: jmp %l[t_no]\n"
+                        "5:\n"
+                        ".previous\n"
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 92a8308..4e44144 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,7 +205,8 @@
+ #define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
+ #define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
+-
++#define X86_FEATURE_PCIDUDEREF        ( 8*32+30) /* PaX PCID based UDEREF */
++#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+ #define X86_FEATURE_FSGSBASE  ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+@@ -213,7 +214,7 @@
+ #define X86_FEATURE_BMI1      ( 9*32+ 3) /* 1st group bit manipulation extensions */
+ #define X86_FEATURE_HLE               ( 9*32+ 4) /* Hardware Lock Elision */
+ #define X86_FEATURE_AVX2      ( 9*32+ 5) /* AVX2 instructions */
+-#define X86_FEATURE_SMEP      ( 9*32+ 7) /* Supervisor Mode Execution Protection */
++#define X86_FEATURE_SMEP      ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
+ #define X86_FEATURE_BMI2      ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+ #define X86_FEATURE_ERMS      ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+ #define X86_FEATURE_INVPCID   ( 9*32+10) /* Invalidate Processor Context ID */
+diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
+index bb93333..e3d3d57 100644
+--- a/arch/x86/include/asm/crypto/camellia.h
++++ b/arch/x86/include/asm/crypto/camellia.h
+@@ -39,34 +39,35 @@ extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ /* regular block cipher functions */
+ asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+                                  const u8 *src, bool xor);
+-asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_dec_blk(void *ctx, u8 *dst,
+                                const u8 *src);
+ /* 2-way parallel cipher functions */
+ asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+                                       const u8 *src, bool xor);
+-asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_dec_blk_2way(void *ctx, u8 *dst,
+                                     const u8 *src);
+ /* 16-way parallel cipher functions (avx/aes-ni) */
+-asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_ecb_enc_16way(void *ctx, u8 *dst,
+                                      const u8 *src);
+-asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_ecb_dec_16way(void *ctx, u8 *dst,
+                                      const u8 *src);
+-asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
++asmlinkage void camellia_cbc_dec_16way(void *ctx, u8 *dst,
+                                      const u8 *src);
+-asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
+-                                 const u8 *src, le128 *iv);
++asmlinkage void camellia_ctr_16way(void *ctx, u128 *dst,
++                                 const u128 *src, le128 *iv);
+-asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
+-                                     const u8 *src, le128 *iv);
+-asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+-                                     const u8 *src, le128 *iv);
++asmlinkage void camellia_xts_enc_16way(void *ctx, u128 *dst,
++                                     const u128 *src, le128 *iv);
++asmlinkage void camellia_xts_dec_16way(void *ctx, u128 *dst,
++                                     const u128 *src, le128 *iv);
+-static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
++static inline void camellia_enc_blk(void *_ctx, u8 *dst,
+                                   const u8 *src)
+ {
++      struct camellia_ctx *ctx = _ctx;
+       __camellia_enc_blk(ctx, dst, src, false);
+ }
+@@ -76,9 +77,10 @@ static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
+       __camellia_enc_blk(ctx, dst, src, true);
+ }
+-static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
++static inline void camellia_enc_blk_2way(void *_ctx, u8 *dst,
+                                        const u8 *src)
+ {
++      struct camellia_ctx *ctx = _ctx;
+       __camellia_enc_blk_2way(ctx, dst, src, false);
+ }
+@@ -89,7 +91,7 @@ static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
+ }
+ /* glue helpers */
+-extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
++extern void camellia_decrypt_cbc_2way(void *ctx, u8 *dst, const u8 *src);
+ extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+                              le128 *iv);
+ extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
+diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
+index 03bb106..9e7a45c 100644
+--- a/arch/x86/include/asm/crypto/glue_helper.h
++++ b/arch/x86/include/asm/crypto/glue_helper.h
+@@ -11,16 +11,16 @@
+ #include <crypto/b128ops.h>
+ typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
+-typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
++typedef void (*common_glue_cbc_func_t)(void *ctx, u8 *dst, const u8 *src);
+ typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
+                                      le128 *iv);
+ typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
+                                      le128 *iv);
+-#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
+-#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
+-#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
+-#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
++#define GLUE_FUNC_CAST(fn) (fn)
++#define GLUE_CBC_FUNC_CAST(fn) (fn)
++#define GLUE_CTR_FUNC_CAST(fn) (fn)
++#define GLUE_XTS_FUNC_CAST(fn) (fn)
+ struct common_glue_func_entry {
+       unsigned int num_blocks; /* number of blocks that @fn will process */
+diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
+index 33c2b8a..586871f 100644
+--- a/arch/x86/include/asm/crypto/serpent-avx.h
++++ b/arch/x86/include/asm/crypto/serpent-avx.h
+@@ -16,20 +16,20 @@ struct serpent_xts_ctx {
+       struct serpent_ctx crypt_ctx;
+ };
+-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_ecb_enc_8way_avx(void *ctx, u8 *dst,
+                                        const u8 *src);
+-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_ecb_dec_8way_avx(void *ctx, u8 *dst,
+                                        const u8 *src);
+-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
++asmlinkage void serpent_cbc_dec_8way_avx(void *ctx, u8 *dst,
+                                        const u8 *src);
+-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+-                                   const u8 *src, le128 *iv);
++asmlinkage void serpent_ctr_8way_avx(void *ctx, u128 *dst,
++                                   const u128 *src, le128 *iv);
+-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+-                                       const u8 *src, le128 *iv);
+-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+-                                       const u8 *src, le128 *iv);
++asmlinkage void serpent_xts_enc_8way_avx(void *ctx, u128 *dst,
++                                       const u128 *src, le128 *iv);
++asmlinkage void serpent_xts_dec_8way_avx(void *ctx, u128 *dst,
++                                       const u128 *src, le128 *iv);
+ extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+                               le128 *iv);
+diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h
+index e6e77df..fe42081 100644
+--- a/arch/x86/include/asm/crypto/serpent-sse2.h
++++ b/arch/x86/include/asm/crypto/serpent-sse2.h
+@@ -13,7 +13,7 @@ asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+ asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+                                    const u8 *src);
+-static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
++static inline void serpent_enc_blk_xway(void *ctx, u8 *dst,
+                                       const u8 *src)
+ {
+       __serpent_enc_blk_4way(ctx, dst, src, false);
+@@ -25,7 +25,7 @@ static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
+       __serpent_enc_blk_4way(ctx, dst, src, true);
+ }
+-static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
++static inline void serpent_dec_blk_xway(void *ctx, u8 *dst,
+                                       const u8 *src)
+ {
+       serpent_dec_blk_4way(ctx, dst, src);
+@@ -40,7 +40,7 @@ asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+ asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+                                    const u8 *src);
+-static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
++static inline void serpent_enc_blk_xway(void *ctx, u8 *dst,
+                                  const u8 *src)
+ {
+       __serpent_enc_blk_8way(ctx, dst, src, false);
+@@ -52,7 +52,7 @@ static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
+       __serpent_enc_blk_8way(ctx, dst, src, true);
+ }
+-static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
++static inline void serpent_dec_blk_xway(void *ctx, u8 *dst,
+                                  const u8 *src)
+ {
+       serpent_dec_blk_8way(ctx, dst, src);
+diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
+index 878c51c..86fc65f 100644
+--- a/arch/x86/include/asm/crypto/twofish.h
++++ b/arch/x86/include/asm/crypto/twofish.h
+@@ -17,19 +17,19 @@ struct twofish_xts_ctx {
+ };
+ /* regular block cipher functions from twofish_x86_64 module */
+-asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_enc_blk(void *ctx, u8 *dst,
+                               const u8 *src);
+-asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_dec_blk(void *ctx, u8 *dst,
+                               const u8 *src);
+ /* 3-way parallel cipher functions */
+-asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void __twofish_enc_blk_3way(void *ctx, u8 *dst,
+                                      const u8 *src, bool xor);
+-asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
++asmlinkage void twofish_dec_blk_3way(void *ctx, u8 *dst,
+                                    const u8 *src);
+ /* helpers from twofish_x86_64-3way module */
+-extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
++extern void twofish_dec_blk_cbc_3way(void *ctx, u8 *dst, const u8 *src);
+ extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
+                               le128 *iv);
+ extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index 4e10d73..7319a47 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+ #include <linux/smp.h>
+ #include <linux/percpu.h>
+@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+       desc->type              = (info->read_exec_only ^ 1) << 1;
+       desc->type             |= info->contents << 2;
++      desc->type             |= info->seg_not_present ^ 1;
+       desc->s                 = 1;
+       desc->dpl               = 0x3;
+@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+ }
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-extern struct desc_ptr debug_idt_descr;
+-extern gate_desc debug_idt_table[];
+-
+-struct gdt_page {
+-      struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[IDT_ENTRIES];
++extern const struct desc_ptr debug_idt_descr;
++extern gate_desc debug_idt_table[IDT_ENTRIES];
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+-      return per_cpu(gdt_page, cpu).gdt;
++      return cpu_gdt_table[cpu];
+ }
+ #ifdef CONFIG_X86_64
+@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
+                            unsigned long base, unsigned dpl, unsigned flags,
+                            unsigned short seg)
+ {
+-      gate->a = (seg << 16) | (base & 0xffff);
+-      gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++      gate->gate.offset_low   = base;
++      gate->gate.seg          = seg;
++      gate->gate.reserved     = 0;
++      gate->gate.type         = type;
++      gate->gate.s            = 0;
++      gate->gate.dpl          = dpl;
++      gate->gate.p            = 1;
++      gate->gate.offset_high  = base >> 16;
+ }
+ #endif
+@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+ static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
+ {
++      pax_open_kernel();
+       memcpy(&idt[entry], gate, sizeof(*gate));
++      pax_close_kernel();
+ }
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
+ {
++      pax_open_kernel();
+       memcpy(&ldt[entry], desc, 8);
++      pax_close_kernel();
+ }
+ static inline void
+@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
+       default:        size = sizeof(*gdt);            break;
+       }
++      pax_open_kernel();
+       memcpy(&gdt[entry], desc, size);
++      pax_close_kernel();
+ }
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
+ static inline void native_load_tr_desc(void)
+ {
++      pax_open_kernel();
+       asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++      pax_close_kernel();
+ }
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+       struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+               gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++      pax_close_kernel();
+ }
+ /* This intentionally ignores lm, since 32-bit apps don't have that field. */
+@@ -280,7 +293,7 @@ static inline void clear_LDT(void)
+       set_ldt(NULL, 0);
+ }
+-static inline unsigned long get_desc_base(const struct desc_struct *desc)
++static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
+ {
+       return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+ }
+@@ -304,7 +317,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+ }
+ #ifdef CONFIG_X86_64
+-static inline void set_nmi_gate(int gate, void *addr)
++static inline void set_nmi_gate(int gate, const void *addr)
+ {
+       gate_desc s;
+@@ -314,14 +327,14 @@ static inline void set_nmi_gate(int gate, void *addr)
+ #endif
+ #ifdef CONFIG_TRACING
+-extern struct desc_ptr trace_idt_descr;
+-extern gate_desc trace_idt_table[];
++extern const struct desc_ptr trace_idt_descr;
++extern gate_desc trace_idt_table[IDT_ENTRIES];
+ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+ {
+       write_idt_entry(trace_idt_table, entry, gate);
+ }
+-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
++static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
+                                  unsigned dpl, unsigned ist, unsigned seg)
+ {
+       gate_desc s;
+@@ -341,7 +354,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+ #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
+ #endif
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+                            unsigned dpl, unsigned ist, unsigned seg)
+ {
+       gate_desc s;
+@@ -364,14 +377,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+ #define set_intr_gate_notrace(n, addr)                                        \
+       do {                                                            \
+               BUG_ON((unsigned)n > 0xFF);                             \
+-              _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0,        \
++              _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0,  \
+                         __KERNEL_CS);                                 \
+       } while (0)
+ #define set_intr_gate(n, addr)                                                \
+       do {                                                            \
+               set_intr_gate_notrace(n, addr);                         \
+-              _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
++              _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
+                               0, 0, __KERNEL_CS);                     \
+       } while (0)
+@@ -399,19 +412,19 @@ static inline void alloc_system_vector(int vector)
+ /*
+  * This routine sets up an interrupt gate at directory privilege level 3.
+  */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -420,16 +433,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+-      _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++      _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+       BUG_ON((unsigned)n > 0xFF);
+       _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+@@ -501,4 +514,17 @@ static inline void load_current_idt(void)
+       else
+               load_idt((const struct desc_ptr *)&idt_descr);
+ }
++
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++      struct desc_struct d;
++
++      if (likely(limit))
++              limit = (limit - 1UL) >> PAGE_SHIFT;
++      pack_descriptor(&d, base, limit, 0xFB, 0xC);
++      write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
+index eb5deb4..ec19436 100644
+--- a/arch/x86/include/asm/desc_defs.h
++++ b/arch/x86/include/asm/desc_defs.h
+@@ -31,6 +31,12 @@ struct desc_struct {
+                       unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+                       unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+               };
++              struct {
++                      u16 offset_low;
++                      u16 seg;
++                      unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++                      unsigned offset_high: 16;
++              } gate;
+       };
+ } __attribute__((packed));
+diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
+index ced283a..ffe04cc 100644
+--- a/arch/x86/include/asm/div64.h
++++ b/arch/x86/include/asm/div64.h
+@@ -39,7 +39,7 @@
+       __mod;                                                  \
+ })
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+       union {
+               u64 v64;
+diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
+index fe884e1..46149ae 100644
+--- a/arch/x86/include/asm/dma.h
++++ b/arch/x86/include/asm/dma.h
+@@ -149,6 +149,7 @@
+ #ifdef CONFIG_ISA_DMA_API
+ extern spinlock_t  dma_spin_lock;
++static inline unsigned long claim_dma_lock(void) __acquires(&dma_spin_lock);
+ static inline unsigned long claim_dma_lock(void)
+ {
+       unsigned long flags;
+@@ -156,6 +157,7 @@ static inline unsigned long claim_dma_lock(void)
+       return flags;
+ }
++static inline void release_dma_lock(unsigned long flags) __releases(&dma_spin_lock);
+ static inline void release_dma_lock(unsigned long flags)
+ {
+       spin_unlock_irqrestore(&dma_spin_lock, flags);
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index d0bb76d..bb192fc 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -151,6 +151,11 @@ static inline bool efi_is_native(void)
+ static inline bool efi_runtime_supported(void)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      return false;
++#endif
++
+       if (efi_is_native())
+               return true;
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index e7f155c..8611814 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
+ #include <asm/vdso.h>
+-#ifdef CONFIG_X86_64
+-extern unsigned int vdso64_enabled;
+-#endif
+ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+ extern unsigned int vdso32_enabled;
+ #endif
+@@ -250,7 +247,25 @@ extern int force_personality32;
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE               ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE               (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE   0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN    (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN   (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE   0x400000UL
++
++#define PAX_DELTA_MMAP_LEN    ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN   ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+@@ -299,17 +314,13 @@ do {                                                                     \
+ #define ARCH_DLINFO                                                   \
+ do {                                                                  \
+-      if (vdso64_enabled)                                             \
+-              NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
+-                          (unsigned long __force)current->mm->context.vdso); \
++      NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);        \
+ } while (0)
+ /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
+ #define ARCH_DLINFO_X32                                                       \
+ do {                                                                  \
+-      if (vdso64_enabled)                                             \
+-              NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
+-                          (unsigned long __force)current->mm->context.vdso); \
++      NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);        \
+ } while (0)
+ #define AT_SYSINFO            32
+@@ -324,10 +335,10 @@ else                                                                     \
+ #endif /* !CONFIG_X86_32 */
+-#define VDSO_CURRENT_BASE     ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE     (current->mm->context.vdso)
+ #define VDSO_ENTRY                                                    \
+-      ((unsigned long)current->mm->context.vdso +                     \
++      (current->mm->context.vdso +                                    \
+        vdso_image_32.sym___kernel_vsyscall)
+ struct linux_binprm;
+diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
+index 77a99ac..39ff7f5 100644
+--- a/arch/x86/include/asm/emergency-restart.h
++++ b/arch/x86/include/asm/emergency-restart.h
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_X86_EMERGENCY_RESTART_H
+ #define _ASM_X86_EMERGENCY_RESTART_H
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
+index 8554f96..6c58add9 100644
+--- a/arch/x86/include/asm/fixmap.h
++++ b/arch/x86/include/asm/fixmap.h
+@@ -142,7 +142,7 @@ extern pte_t *kmap_pte;
+ extern pte_t *pkmap_page_table;
+ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
+-void native_set_fixmap(enum fixed_addresses idx,
++void native_set_fixmap(unsigned int idx,
+                      phys_addr_t phys, pgprot_t flags);
+ #ifndef CONFIG_PARAVIRT
+diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
+index 1c7eefe..d0e4702 100644
+--- a/arch/x86/include/asm/floppy.h
++++ b/arch/x86/include/asm/floppy.h
+@@ -229,18 +229,18 @@ static struct fd_routine_l {
+       int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+ } fd_routine[] = {
+       {
+-              request_dma,
+-              free_dma,
+-              get_dma_residue,
+-              dma_mem_alloc,
+-              hard_dma_setup
++              ._request_dma = request_dma,
++              ._free_dma = free_dma,
++              ._get_dma_residue = get_dma_residue,
++              ._dma_mem_alloc = dma_mem_alloc,
++              ._dma_setup = hard_dma_setup
+       },
+       {
+-              vdma_request_dma,
+-              vdma_nop,
+-              vdma_get_dma_residue,
+-              vdma_mem_alloc,
+-              vdma_dma_setup
++              ._request_dma = vdma_request_dma,
++              ._free_dma = vdma_nop,
++              ._get_dma_residue = vdma_get_dma_residue,
++              ._dma_mem_alloc = vdma_mem_alloc,
++              ._dma_setup = vdma_dma_setup
+       }
+ };
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 2737366..e152d4b 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -102,9 +102,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
+ #define user_insn(insn, output, input...)                             \
+ ({                                                                    \
+       int err;                                                        \
+-      asm volatile(ASM_STAC "\n"                                      \
+-                   "1:" #insn "\n\t"                                  \
+-                   "2: " ASM_CLAC "\n"                                \
++      user_access_begin();                                            \
++      asm volatile("1:"                                               \
++                   __copyuser_seg                                     \
++                   #insn "\n\t"                                       \
++                   "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:  movl $-1,%[err]\n"                            \
+                    "    jmp  2b\n"                                    \
+@@ -112,6 +114,7 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : [err] "=r" (err), output                         \
+                    : "0"(0), input);                                  \
++      user_access_end();                                              \
+       err;                                                            \
+ })
+@@ -191,9 +194,9 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
+ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ {
+       if (IS_ENABLED(CONFIG_X86_32))
+-              asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
++              asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+       else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
+-              asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
++              asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
+       else {
+               /* Using "rex64; fxsave %0" is broken because, if the memory
+                * operand uses any extended registers for addressing, a second
+@@ -210,15 +213,15 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+                * an extended register is needed for addressing (fix submitted
+                * to mainline 2005-11-21).
+                *
+-               *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
++               *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
+                *
+                * This, however, we can work around by forcing the compiler to
+                * select an addressing mode that doesn't require extended
+                * registers.
+                */
+               asm volatile( "rex64/fxsave (%[fx])"
+-                           : "=m" (fpu->state.fxsave)
+-                           : [fx] "R" (&fpu->state.fxsave));
++                           : "=m" (fpu->state->fxsave)
++                           : [fx] "R" (&fpu->state->fxsave));
+       }
+ }
+@@ -390,9 +393,9 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
+       if (unlikely(err))
+               return -EFAULT;
+-      stac();
+-      XSTATE_OP(XSAVE, buf, -1, -1, err);
+-      clac();
++      user_access_begin();
++      XSTATE_OP(__copyuser_seg XSAVE, buf, -1, -1, err);
++      user_access_end();
+       return err;
+ }
+@@ -402,14 +405,14 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
+  */
+ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
+ {
+-      struct xregs_state *xstate = ((__force struct xregs_state *)buf);
++      struct xregs_state *xstate = ((__force_kernel struct xregs_state *)buf);
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
+       int err;
+-      stac();
+-      XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
+-      clac();
++      user_access_begin();
++      XSTATE_OP(__copyuser_seg XRSTOR, xstate, lmask, hmask, err);
++      user_access_end();
+       return err;
+ }
+@@ -427,7 +430,7 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
+ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
+ {
+       if (likely(use_xsave())) {
+-              copy_xregs_to_kernel(&fpu->state.xsave);
++              copy_xregs_to_kernel(&fpu->state->xsave);
+               return 1;
+       }
+@@ -440,7 +443,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
+        * Legacy FPU register saving, FNSAVE always clears FPU registers,
+        * so we have to mark them inactive:
+        */
+-      asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
++      asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state->fsave));
+       return 0;
+ }
+@@ -469,7 +472,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
+                       "fnclex\n\t"
+                       "emms\n\t"
+                       "fildl %P[addr]"        /* set F?P to defined value */
+-                      : : [addr] "m" (fpstate));
++                      : : [addr] "m" (cpu_tss[raw_smp_processor_id()].x86_tss.sp0));
+       }
+       __copy_kernel_to_fpregs(fpstate);
+@@ -614,7 +617,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+                       new_fpu->counter++;
+                       __fpregs_activate(new_fpu);
+                       trace_x86_fpu_regs_activated(new_fpu);
+-                      prefetch(&new_fpu->state);
++                      prefetch(new_fpu->state);
+               } else {
+                       __fpregs_deactivate_hw();
+               }
+@@ -626,7 +629,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+                       if (fpu_want_lazy_restore(new_fpu, cpu))
+                               fpu.preload = 0;
+                       else
+-                              prefetch(&new_fpu->state);
++                              prefetch(new_fpu->state);
+                       fpregs_activate(new_fpu);
+               }
+       }
+@@ -646,7 +649,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+ static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+ {
+       if (fpu_switch.preload)
+-              copy_kernel_to_fpregs(&new_fpu->state);
++              copy_kernel_to_fpregs(new_fpu->state);
+ }
+ /*
+diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
+index 48df486..e32babd 100644
+--- a/arch/x86/include/asm/fpu/types.h
++++ b/arch/x86/include/asm/fpu/types.h
+@@ -276,6 +276,39 @@ union fpregs_state {
+  */
+ struct fpu {
+       /*
++       * @state:
++       *
++       * In-memory copy of all FPU registers that we save/restore
++       * over context switches. If the task is using the FPU then
++       * the registers in the FPU are more recent than this state
++       * copy. If the task context-switches away then they get
++       * saved here and represent the FPU state.
++       *
++       * After context switches there may be a (short) time period
++       * during which the in-FPU hardware registers are unchanged
++       * and still perfectly match this state, if the tasks
++       * scheduled afterwards are not using the FPU.
++       *
++       * This is the 'lazy restore' window of optimization, which
++       * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
++       *
++       * We detect whether a subsequent task uses the FPU via setting
++       * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
++       *
++       * During this window, if the task gets scheduled again, we
++       * might be able to skip having to do a restore from this
++       * memory buffer to the hardware registers - at the cost of
++       * incurring the overhead of #NM fault traps.
++       *
++       * Note that on modern CPUs that support the XSAVEOPT (or other
++       * optimized XSAVE instructions), we don't use #NM traps anymore,
++       * as the hardware can track whether FPU registers need saving
++       * or not. On such CPUs we activate the non-lazy ('eagerfpu')
++       * logic, which unconditionally saves/restores all FPU state
++       * across context switches. (if FPU state exists.)
++       */
++      union fpregs_state              *state;
++      /*
+        * @last_cpu:
+        *
+        * Records the last CPU on which this context was loaded into
+@@ -332,43 +365,6 @@ struct fpu {
+        * deal with bursty apps that only use the FPU for a short time:
+        */
+       unsigned char                   counter;
+-      /*
+-       * @state:
+-       *
+-       * In-memory copy of all FPU registers that we save/restore
+-       * over context switches. If the task is using the FPU then
+-       * the registers in the FPU are more recent than this state
+-       * copy. If the task context-switches away then they get
+-       * saved here and represent the FPU state.
+-       *
+-       * After context switches there may be a (short) time period
+-       * during which the in-FPU hardware registers are unchanged
+-       * and still perfectly match this state, if the tasks
+-       * scheduled afterwards are not using the FPU.
+-       *
+-       * This is the 'lazy restore' window of optimization, which
+-       * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
+-       *
+-       * We detect whether a subsequent task uses the FPU via setting
+-       * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
+-       *
+-       * During this window, if the task gets scheduled again, we
+-       * might be able to skip having to do a restore from this
+-       * memory buffer to the hardware registers - at the cost of
+-       * incurring the overhead of #NM fault traps.
+-       *
+-       * Note that on modern CPUs that support the XSAVEOPT (or other
+-       * optimized XSAVE instructions), we don't use #NM traps anymore,
+-       * as the hardware can track whether FPU registers need saving
+-       * or not. On such CPUs we activate the non-lazy ('eagerfpu')
+-       * logic, which unconditionally saves/restores all FPU state
+-       * across context switches. (if FPU state exists.)
+-       */
+-      union fpregs_state              state;
+-      /*
+-       * WARNING: 'state' is dynamically-sized.  Do not put
+-       * anything after it here.
+-       */
+ };
+ #endif /* _ASM_X86_FPU_H */
+diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
+index 19f30a8..d0561c13 100644
+--- a/arch/x86/include/asm/fpu/xstate.h
++++ b/arch/x86/include/asm/fpu/xstate.h
+@@ -43,6 +43,7 @@
+ #define REX_PREFIX
+ #endif
++extern unsigned int xstate_size;
+ extern u64 xfeatures_mask;
+ extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
+diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
+index b4c1f54..726053d 100644
+--- a/arch/x86/include/asm/futex.h
++++ b/arch/x86/include/asm/futex.h
+@@ -12,25 +12,25 @@
+ #include <asm/smap.h>
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)   \
+-      asm volatile("\t" ASM_STAC "\n"                         \
+-                   "1:\t" insn "\n"                           \
+-                   "2:\t" ASM_CLAC "\n"                       \
++      typecheck(u32 __user *, uaddr);                         \
++      asm volatile("1:\t" insn "\n"                           \
++                   "2:\t\n"                                   \
+                    "\t.section .fixup,\"ax\"\n"               \
+                    "3:\tmov\t%3, %1\n"                        \
+                    "\tjmp\t2b\n"                              \
+                    "\t.previous\n"                            \
+                    _ASM_EXTABLE(1b, 3b)                       \
+-                   : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++                   : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))    \
+                    : "i" (-EFAULT), "0" (oparg), "1" (0))
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)   \
+-      asm volatile("\t" ASM_STAC "\n"                         \
+-                   "1:\tmovl  %2, %0\n"                       \
++      typecheck(u32 __user *, uaddr);                         \
++      asm volatile("1:\tmovl  %2, %0\n"                       \
+                    "\tmovl\t%0, %3\n"                         \
+                    "\t" insn "\n"                             \
+-                   "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"     \
++                   "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n"       \
+                    "\tjnz\t1b\n"                              \
+-                   "3:\t" ASM_CLAC "\n"                       \
++                   "3:\t\n"                                   \
+                    "\t.section .fixup,\"ax\"\n"               \
+                    "4:\tmov\t%5, %1\n"                        \
+                    "\tjmp\t3b\n"                              \
+@@ -38,7 +38,7 @@
+                    _ASM_EXTABLE(1b, 4b)                       \
+                    _ASM_EXTABLE(2b, 4b)                       \
+                    : "=&a" (oldval), "=&r" (ret),             \
+-                     "+m" (*uaddr), "=&r" (tem)               \
++                     "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem)  \
+                    : "r" (oparg), "i" (-EFAULT), "1" (0))
+ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+@@ -57,12 +57,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+       pagefault_disable();
++      user_access_begin();
+       switch (op) {
+       case FUTEX_OP_SET:
+-              __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++              __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++              __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
+                                  uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+@@ -77,6 +78,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+       default:
+               ret = -ENOSYS;
+       }
++      user_access_end();
+       pagefault_enable();
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index b90e105..30a5950 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -164,8 +164,8 @@ static inline void unlock_vector_lock(void) {}
+ #endif        /* CONFIG_X86_LOCAL_APIC */
+ /* Statistics */
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
++extern atomic_unchecked_t irq_err_count;
++extern atomic_unchecked_t irq_mis_count;
+ extern void elcr_set_level_irq(unsigned int irq);
+diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
+index 055ea99..7dabb68 100644
+--- a/arch/x86/include/asm/hypervisor.h
++++ b/arch/x86/include/asm/hypervisor.h
+@@ -43,7 +43,7 @@ struct hypervisor_x86 {
+       /* X2APIC detection (run once per boot) */
+       bool            (*x2apic_available)(void);
+-};
++} __do_const;
+ extern const struct hypervisor_x86 *x86_hyper;
+diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
+index 39bcefc..272d904 100644
+--- a/arch/x86/include/asm/i8259.h
++++ b/arch/x86/include/asm/i8259.h
+@@ -63,7 +63,7 @@ struct legacy_pic {
+       int (*probe)(void);
+       int (*irq_pending)(unsigned int irq);
+       void (*make_irq)(unsigned int irq);
+-};
++} __do_const;
+ extern struct legacy_pic *legacy_pic;
+ extern struct legacy_pic null_legacy_pic;
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index de25aad..dc04476 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -42,6 +42,7 @@
+ #include <asm/page.h>
+ #include <asm/early_ioremap.h>
+ #include <asm/pgtable_types.h>
++#include <asm/processor.h>
+ #define build_mmio_read(name, size, type, reg, barrier) \
+ static inline type name(const volatile void __iomem *addr) \
+@@ -54,12 +55,12 @@ static inline void name(type val, volatile void __iomem *addr) \
+ "m" (*(volatile type __force *)addr) barrier); }
+ build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
+-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
+-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
++build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
++build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
+ build_mmio_read(__readb, "b", unsigned char, "=q", )
+-build_mmio_read(__readw, "w", unsigned short, "=r", )
+-build_mmio_read(__readl, "l", unsigned int, "=r", )
++build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
++build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
+ build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
+ build_mmio_write(writew, "w", unsigned short, "r", :"memory")
+@@ -115,7 +116,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
+  *    this function
+  */
+-static inline phys_addr_t virt_to_phys(volatile void *address)
++static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
+ {
+       return __pa(address);
+ }
+@@ -194,7 +195,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+       return ioremap_nocache(offset, size);
+ }
+-extern void iounmap(volatile void __iomem *addr);
++extern void iounmap(const volatile void __iomem *addr);
+ extern void set_iounmap_nonlazy(void);
+@@ -202,6 +203,17 @@ extern void set_iounmap_nonlazy(void);
+ #include <asm-generic/iomap.h>
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++      return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++      return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ /*
+  * Convert a virtual cached pointer to an uncached pointer
+  */
+diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
+index 6ca9fd6..4c0aa55 100644
+--- a/arch/x86/include/asm/irq_vectors.h
++++ b/arch/x86/include/asm/irq_vectors.h
+@@ -48,6 +48,8 @@
+ #define IA32_SYSCALL_VECTOR           0x80
++#define X86_REFCOUNT_VECTOR           0x81 /* Refcount Overflow or Underflow Exception */
++
+ /*
+  * Vectors 0x30-0x3f are used for ISA interrupts.
+  *   round up to the next 16-vector boundary
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index b77f5ed..3862b91 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -23,11 +23,13 @@ static inline unsigned long native_save_fl(void)
+                    : /* no input */
+                    : "memory");
++      BUG_ON(flags & X86_EFLAGS_AC);
+       return flags;
+ }
+ static inline void native_restore_fl(unsigned long flags)
+ {
++      BUG_ON(flags & X86_EFLAGS_AC);
+       asm volatile("push %0 ; popf"
+                    : /* no output */
+                    :"g" (flags)
+@@ -137,6 +139,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
+       swapgs;                                 \
+       sysretl
++#define GET_CR0_INTO_RDI              mov %cr0, %rdi
++#define SET_RDI_INTO_CR0              mov %rdi, %cr0
++#define GET_CR3_INTO_RDI              mov %cr3, %rdi
++#define SET_RDI_INTO_CR3              mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN              iret
+ #define ENABLE_INTERRUPTS_SYSEXIT     sti; sysexit
+diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
+index d1d1e50..5bacb6d 100644
+--- a/arch/x86/include/asm/kprobes.h
++++ b/arch/x86/include/asm/kprobes.h
+@@ -37,7 +37,7 @@ typedef u8 kprobe_opcode_t;
+ #define RELATIVEJUMP_SIZE 5
+ #define RELATIVECALL_OPCODE 0xe8
+ #define RELATIVE_ADDR_SIZE 4
+-#define MAX_STACK_SIZE 64
++#define MAX_STACK_SIZE 64UL
+ #define CUR_STACK_SIZE(ADDR) \
+       (current_top_of_stack() - (unsigned long)(ADDR))
+ #define MIN_STACK_SIZE(ADDR)                          \
+diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
+index e9cd7be..0f3574f 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -279,6 +279,8 @@ enum x86emul_mode {
+ #define X86EMUL_SMM_MASK             (1 << 6)
+ #define X86EMUL_SMM_INSIDE_NMI_MASK  (1 << 7)
++struct fastop;
++
+ struct x86_emulate_ctxt {
+       const struct x86_emulate_ops *ops;
+@@ -311,7 +313,10 @@ struct x86_emulate_ctxt {
+       struct operand src;
+       struct operand src2;
+       struct operand dst;
+-      int (*execute)(struct x86_emulate_ctxt *ctxt);
++      union {
++              int (*execute)(struct x86_emulate_ctxt *ctxt);
++              void (*fastop)(struct fastop *fake);
++      } u;
+       int (*check_perm)(struct x86_emulate_ctxt *ctxt);
+       /*
+        * The following six fields are cleared together,
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 7511978..cf52573 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -10,33 +10,73 @@ typedef struct {
+       atomic_long_t a;
+ } local_t;
++typedef struct {
++      atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l)       atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i)       atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i)     atomic_long_set_unchecked(&(l)->a, (i))
+ static inline void local_inc(local_t *l)
+ {
+-      asm volatile(_ASM_INC "%0"
++      asm volatile(_ASM_INC "%0\n\t"
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)
++                   : [counter] "+m" (l->a.counter)
++                   : : "cc", "cx");
++}
++
++static inline void local_inc_unchecked(local_unchecked_t *l)
++{
++      asm volatile(_ASM_INC "%0\n"
+                    : "+m" (l->a.counter));
+ }
+ static inline void local_dec(local_t *l)
+ {
+-      asm volatile(_ASM_DEC "%0"
++      asm volatile(_ASM_DEC "%0\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8)
++                   : [counter] "+m" (l->a.counter)
++                   : : "cc", "cx");
++}
++
++static inline void local_dec_unchecked(local_unchecked_t *l)
++{
++      asm volatile(_ASM_DEC "%0\n"
+                    : "+m" (l->a.counter));
+ }
+ static inline void local_add(long i, local_t *l)
+ {
+-      asm volatile(_ASM_ADD "%1,%0"
++      asm volatile(_ASM_ADD "%1,%0\n\t"
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)
++                   : [counter] "+m" (l->a.counter)
++                   : "ir" (i)
++                   : "cc", "cx");
++}
++
++static inline void local_add_unchecked(long i, local_unchecked_t *l)
++{
++      asm volatile(_ASM_ADD "%1,%0\n"
+                    : "+m" (l->a.counter)
+                    : "ir" (i));
+ }
+ static inline void local_sub(long i, local_t *l)
+ {
+-      asm volatile(_ASM_SUB "%1,%0"
++      asm volatile(_ASM_SUB "%1,%0\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8)
++                   : [counter] "+m" (l->a.counter)
++                   : "ir" (i)
++                   : "cc", "cx");
++}
++
++static inline void local_sub_unchecked(long i, local_unchecked_t *l)
++{
++      asm volatile(_ASM_SUB "%1,%0\n"
+                    : "+m" (l->a.counter)
+                    : "ir" (i));
+ }
+@@ -52,7 +92,7 @@ static inline void local_sub(long i, local_t *l)
+  */
+ static inline bool local_sub_and_test(long i, local_t *l)
+ {
+-      GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
++      GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, -BITS_PER_LONG/8, "er", i, "%0", e);
+ }
+ /**
+@@ -65,7 +105,7 @@ static inline bool local_sub_and_test(long i, local_t *l)
+  */
+ static inline bool local_dec_and_test(local_t *l)
+ {
+-      GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
++      GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, -BITS_PER_LONG/8, "%0", e);
+ }
+ /**
+@@ -78,7 +118,7 @@ static inline bool local_dec_and_test(local_t *l)
+  */
+ static inline bool local_inc_and_test(local_t *l)
+ {
+-      GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
++      GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, BITS_PER_LONG/8, "%0", e);
+ }
+ /**
+@@ -92,7 +132,7 @@ static inline bool local_inc_and_test(local_t *l)
+  */
+ static inline bool local_add_negative(long i, local_t *l)
+ {
+-      GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
++      GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, BITS_PER_LONG/8, "er", i, "%0", s);
+ }
+ /**
+@@ -105,6 +145,23 @@ static inline bool local_add_negative(long i, local_t *l)
+ static inline long local_add_return(long i, local_t *l)
+ {
+       long __i = i;
++      asm volatile(_ASM_XADD "%0, %1\n\t"
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)
++                   : "+r" (i), [counter] "+m" (l->a.counter)
++                   : : "memory", "cc", "cx");
++      return i + __i;
++}
++
++/**
++ * local_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @l: pointer to type local_unchecked_t
++ *
++ * Atomically adds @i to @l and returns @i + @l
++ */
++static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
++{
++      long __i = i;
+       asm volatile(_ASM_XADD "%0, %1;"
+                    : "+r" (i), "+m" (l->a.counter)
+                    : : "memory");
+@@ -121,6 +178,8 @@ static inline long local_sub_return(long i, local_t *l)
+ #define local_cmpxchg(l, o, n) \
+       (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++      (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ /* Always has a lock prefix */
+ #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
+index 8bf766e..d800b61 100644
+--- a/arch/x86/include/asm/mce.h
++++ b/arch/x86/include/asm/mce.h
+@@ -184,7 +184,7 @@ struct mca_msr_regs {
+       u32 (*status)   (int bank);
+       u32 (*addr)     (int bank);
+       u32 (*misc)     (int bank);
+-};
++} __no_const;
+ extern struct mce_vendor_flags mce_flags;
+diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
+new file mode 100644
+index 0000000..2bfd3ba
+--- /dev/null
++++ b/arch/x86/include/asm/mman.h
+@@ -0,0 +1,15 @@
++#ifndef _X86_MMAN_H
++#define _X86_MMAN_H
++
++#include <uapi/asm/mman.h>
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check       i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
++#endif
++#endif
++#endif
++
++#endif /* X86_MMAN_H */
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 1ea0bae..25de747 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -19,7 +19,19 @@ typedef struct {
+ #endif
+       struct mutex lock;
+-      void __user *vdso;                      /* vdso base address */
++      unsigned long vdso;                     /* vdso base address */
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      unsigned long user_cs_base;
++      unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++      cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
+       const struct vdso_image *vdso_image;    /* vdso image in use */
+       atomic_t perf_rdpmc_allowed;    /* nonzero if rdpmc is allowed */
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index d8abfcf..721da30 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -46,7 +46,7 @@ struct ldt_struct {
+        * allocations, but it's not worth trying to optimize.
+        */
+       struct desc_struct *entries;
+-      int size;
++      unsigned int size;
+ };
+ /*
+@@ -58,6 +58,23 @@ void destroy_context_ldt(struct mm_struct *mm);
+ static inline int init_new_context_ldt(struct task_struct *tsk,
+                                      struct mm_struct *mm)
+ {
++      if (tsk == current) {
++              mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++              mm->context.user_cs_base = 0UL;
++              mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++              cpumask_clear(&mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++      }
++
+       return 0;
+ }
+ static inline void destroy_context_ldt(struct mm_struct *mm) {}
+@@ -98,6 +115,20 @@ static inline void load_mm_ldt(struct mm_struct *mm)
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (!(static_cpu_has(X86_FEATURE_PCIDUDEREF))) {
++              unsigned int i;
++              pgd_t *pgd;
++
++              pax_open_kernel();
++              pgd = get_cpu_pgd(smp_processor_id(), kernel);
++              for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++                      set_pgd_batched(pgd+i, native_make_pgd(0));
++              pax_close_kernel();
++      }
++#endif
++
+ #ifdef CONFIG_SMP
+       if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+index e3b7819..ba128ec 100644
+--- a/arch/x86/include/asm/module.h
++++ b/arch/x86/include/asm/module.h
+@@ -5,6 +5,7 @@
+ #ifdef CONFIG_X86_64
+ /* X86_64 does not define MODULE_PROC_FAMILY */
++#define MODULE_PROC_FAMILY ""
+ #elif defined CONFIG_M486
+ #define MODULE_PROC_FAMILY "486 "
+ #elif defined CONFIG_M586
+@@ -57,8 +58,26 @@
+ #error unknown processor family
+ #endif
+-#ifdef CONFIG_X86_32
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
++#else
++#define MODULE_PAX_KERNEXEC ""
+ #endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
++#ifdef CONFIG_PAX_RAP
++#define MODULE_PAX_RAP "RAP "
++#else
++#define MODULE_PAX_RAP ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_RAP
++
+ #endif /* _ASM_X86_MODULE_H */
+diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
+index 5f2fc44..106caa6 100644
+--- a/arch/x86/include/asm/nmi.h
++++ b/arch/x86/include/asm/nmi.h
+@@ -36,26 +36,35 @@ enum {
+ typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
++struct nmiaction;
++
++struct nmiwork {
++      const struct nmiaction  *action;
++      u64                     max_duration;
++      struct irq_work         irq_work;
++};
++
+ struct nmiaction {
+       struct list_head        list;
+       nmi_handler_t           handler;
+-      u64                     max_duration;
+-      struct irq_work         irq_work;
+       unsigned long           flags;
+       const char              *name;
+-};
++      struct nmiwork          *work;
++} __do_const;
+ #define register_nmi_handler(t, fn, fg, n, init...)   \
+ ({                                                    \
+-      static struct nmiaction init fn##_na = {        \
++      static struct nmiwork fn##_nw;                  \
++      static const struct nmiaction init fn##_na = {  \
+               .handler = (fn),                        \
+               .name = (n),                            \
+               .flags = (fg),                          \
++              .work = &fn##_nw,                       \
+       };                                              \
+       __register_nmi_handler((t), &fn##_na);          \
+ })
+-int __register_nmi_handler(unsigned int, struct nmiaction *);
++int __register_nmi_handler(unsigned int, const struct nmiaction *);
+ void unregister_nmi_handler(unsigned int, const char *);
+diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
+index cf8f619..bbcf5e6 100644
+--- a/arch/x86/include/asm/page.h
++++ b/arch/x86/include/asm/page.h
+@@ -58,6 +58,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+ #define __va(x)                       ((void *)((unsigned long)(x)+PAGE_OFFSET))
+ #endif
++#define __early_va(x)         ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
++
+ #define __boot_va(x)          __va(x)
+ #define __boot_pa(x)          __pa(x)
+@@ -65,11 +67,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+  * virt_to_page(kaddr) returns a valid pointer if and only if
+  * virt_addr_valid(kaddr) returns true.
+  */
+-#define virt_to_page(kaddr)   pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+ #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+ extern bool __virt_addr_valid(unsigned long kaddr);
+ #define virt_addr_valid(kaddr)        __virt_addr_valid((unsigned long) (kaddr))
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++#define virt_to_page(kaddr)   \
++      ({ \
++              const void *__kaddr = (const void *)(kaddr); \
++              BUG_ON(!virt_addr_valid(__kaddr)); \
++              pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
++      })
++#else
++#define virt_to_page(kaddr)   pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++#endif
++
+ #endif        /* __ASSEMBLY__ */
+ #include <asm-generic/memory_model.h>
+diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
+index 904f528..b4d0d24 100644
+--- a/arch/x86/include/asm/page_32.h
++++ b/arch/x86/include/asm/page_32.h
+@@ -7,11 +7,17 @@
+ #define __phys_addr_nodebug(x)        ((x) - PAGE_OFFSET)
+ #ifdef CONFIG_DEBUG_VIRTUAL
+-extern unsigned long __phys_addr(unsigned long);
++extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
+ #else
+-#define __phys_addr(x)                __phys_addr_nodebug(x)
++static inline unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
++{
++      return __phys_addr_nodebug(x);
++}
+ #endif
+-#define __phys_addr_symbol(x) __phys_addr(x)
++static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long x)
++{
++      return __phys_addr(x);
++}
+ #define __phys_reloc_hide(x)  RELOC_HIDE((x), 0)
+ #ifdef CONFIG_FLATMEM
+diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
+index b3bebf9..cb419e7 100644
+--- a/arch/x86/include/asm/page_64.h
++++ b/arch/x86/include/asm/page_64.h
+@@ -7,9 +7,9 @@
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+-static inline unsigned long __phys_addr_nodebug(unsigned long x)
++static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
+ {
+       unsigned long y = x - __START_KERNEL_map;
+@@ -20,12 +20,14 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
+ }
+ #ifdef CONFIG_DEBUG_VIRTUAL
+-extern unsigned long __phys_addr(unsigned long);
+-extern unsigned long __phys_addr_symbol(unsigned long);
++extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
++extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
+ #else
+ #define __phys_addr(x)                __phys_addr_nodebug(x)
+-#define __phys_addr_symbol(x) \
+-      ((unsigned long)(x) - __START_KERNEL_map + phys_base)
++static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long x)
++{
++      return x - __START_KERNEL_map + phys_base;
++}
+ #endif
+ #define __phys_reloc_hide(x)  (x)
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index 2970d22..fce32bd 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -509,7 +509,7 @@ static inline pmd_t __pmd(pmdval_t val)
+       return (pmd_t) { ret };
+ }
+-static inline pmdval_t pmd_val(pmd_t pmd)
++static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
+ {
+       pmdval_t ret;
+@@ -575,6 +575,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+                           val);
+ }
++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
++      pgdval_t val = native_pgd_val(pgd);
++
++      if (sizeof(pgdval_t) > sizeof(long))
++              PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
++                          val, (u64)val >> 32);
++      else
++              PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
++                          val);
++}
++
+ static inline void pgd_clear(pgd_t *pgdp)
+ {
+       set_pgd(pgdp, __pgd(0));
+@@ -659,6 +671,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+       pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++      return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++      return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+ #ifdef CONFIG_QUEUED_SPINLOCKS
+@@ -886,7 +913,7 @@ extern void default_banner(void);
+ #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr)   *%cs:addr
++#define PARA_INDIRECT(addr)   *%ss:addr
+ #endif
+ #define INTERRUPT_RETURN                                              \
+@@ -944,6 +971,21 @@ extern void default_banner(void);
+       PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
+                 CLBR_NONE,                                            \
+                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
++
++#define GET_CR0_INTO_RDI                              \
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++      mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0                              \
++      call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI                              \
++      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++      mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3                              \
++      call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif        /* CONFIG_X86_32 */
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 7fa9e77..aa09e68 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -83,7 +83,7 @@ struct pv_init_ops {
+        */
+       unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+                         unsigned long addr, unsigned len);
+-};
++} __no_const __no_randomize_layout;
+ struct pv_lazy_ops {
+@@ -91,12 +91,12 @@ struct pv_lazy_ops {
+       void (*enter)(void);
+       void (*leave)(void);
+       void (*flush)(void);
+-};
++} __no_randomize_layout;
+ struct pv_time_ops {
+       unsigned long long (*sched_clock)(void);
+       unsigned long long (*steal_clock)(int cpu);
+-};
++} __no_const __no_randomize_layout;
+ struct pv_cpu_ops {
+       /* hooks for various privileged instructions */
+@@ -178,7 +178,7 @@ struct pv_cpu_ops {
+       void (*start_context_switch)(struct task_struct *prev);
+       void (*end_context_switch)(struct task_struct *next);
+-};
++} __no_const __no_randomize_layout;
+ struct pv_irq_ops {
+       /*
+@@ -201,7 +201,7 @@ struct pv_irq_ops {
+ #ifdef CONFIG_X86_64
+       void (*adjust_exception_frame)(void);
+ #endif
+-};
++} __no_randomize_layout;
+ struct pv_mmu_ops {
+       unsigned long (*read_cr2)(void);
+@@ -285,6 +285,7 @@ struct pv_mmu_ops {
+       struct paravirt_callee_save make_pud;
+       void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
++      void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
+ #endif        /* CONFIG_PGTABLE_LEVELS == 4 */
+ #endif        /* CONFIG_PGTABLE_LEVELS >= 3 */
+@@ -296,7 +297,13 @@ struct pv_mmu_ops {
+          an mfn.  We can tell which is which from the index. */
+       void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+                          phys_addr_t phys, pgprot_t flags);
+-};
++
++#ifdef CONFIG_PAX_KERNEXEC
++      unsigned long (*pax_open_kernel)(void);
++      unsigned long (*pax_close_kernel)(void);
++#endif
++
++} __no_randomize_layout;
+ struct arch_spinlock;
+ #ifdef CONFIG_SMP
+@@ -318,11 +325,14 @@ struct pv_lock_ops {
+       struct paravirt_callee_save lock_spinning;
+       void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
+ #endif /* !CONFIG_QUEUED_SPINLOCKS */
+-};
++} __no_randomize_layout;
+ /* This contains all the paravirt structures: we get a convenient
+  * number for each function using the offset which we use to indicate
+- * what to patch. */
++ * what to patch.
++ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
++ */
++
+ struct paravirt_patch_template {
+       struct pv_init_ops pv_init_ops;
+       struct pv_time_ops pv_time_ops;
+@@ -330,7 +340,7 @@ struct paravirt_patch_template {
+       struct pv_irq_ops pv_irq_ops;
+       struct pv_mmu_ops pv_mmu_ops;
+       struct pv_lock_ops pv_lock_ops;
+-};
++} __no_randomize_layout;
+ extern struct pv_info pv_info;
+ extern struct pv_init_ops pv_init_ops;
+diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
+index b6d4259..da6324e 100644
+--- a/arch/x86/include/asm/pgalloc.h
++++ b/arch/x86/include/asm/pgalloc.h
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
+                                      pmd_t *pmd, pte_t *pte)
+ {
+       paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++      set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++                                     pmd_t *pmd, pte_t *pte)
++{
++      paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+       set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+@@ -112,12 +119,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ #ifdef CONFIG_X86_PAE
+ extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
++{
++      pud_populate(mm, pudp, pmd);
++}
+ #else /* !CONFIG_X86_PAE */
+ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+       set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++      paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
++      set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
++}
+ #endif        /* CONFIG_X86_PAE */
+ #if CONFIG_PGTABLE_LEVELS > 3
+@@ -127,6 +144,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+       set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+ }
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++      paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
++      set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+       gfp_t gfp = GFP_KERNEL_ACCOUNT;
+diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
+index fd74a11..35fd5af 100644
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++      pax_open_kernel();
+       *pmdp = pmd;
++      pax_close_kernel();
+ }
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index cdaa58c..ae30f0d 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++      pax_open_kernel();
+       set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++      pax_close_kernel();
+ }
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++      pax_open_kernel();
+       set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++      pax_close_kernel();
+ }
+ /*
+@@ -116,9 +120,12 @@ static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ static inline void native_pmd_clear(pmd_t *pmd)
+ {
+       u32 *tmp = (u32 *)pmd;
++
++      pax_open_kernel();
+       *tmp = 0;
+       smp_wmb();
+       *(tmp + 1) = 0;
++      pax_close_kernel();
+ }
+ static inline void pud_clear(pud_t *pudp)
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 437feb4..a4b2570 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -54,6 +54,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+ #ifndef __PAGETABLE_PUD_FOLDED
+ #define set_pgd(pgdp, pgd)            native_set_pgd(pgdp, pgd)
++#define set_pgd_batched(pgdp, pgd)    native_set_pgd_batched(pgdp, pgd)
+ #define pgd_clear(pgd)                        native_pgd_clear(pgd)
+ #endif
+@@ -88,12 +89,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+ #define arch_end_context_switch(prev) do {} while(0)
++#define pax_open_kernel()     native_pax_open_kernel()
++#define pax_close_kernel()    native_pax_close_kernel()
+ #endif        /* CONFIG_PARAVIRT */
++#define  __HAVE_ARCH_PAX_OPEN_KERNEL
++#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++      unsigned long cr0;
++
++      preempt_disable();
++      barrier();
++      cr0 = read_cr0() ^ X86_CR0_WP;
++      BUG_ON(cr0 & X86_CR0_WP);
++      write_cr0(cr0);
++      barrier();
++      return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++      unsigned long cr0;
++
++      barrier();
++      cr0 = read_cr0() ^ X86_CR0_WP;
++      BUG_ON(!(cr0 & X86_CR0_WP));
++      write_cr0(cr0);
++      barrier();
++      preempt_enable_no_resched();
++      return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+  * The following only work if pte_present() is true.
+  * Undefined behaviour if not..
+  */
++static inline int pte_user(pte_t pte)
++{
++      return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+       return pte_flags(pte) & _PAGE_DIRTY;
+@@ -168,6 +210,11 @@ static inline unsigned long pud_pfn(pud_t pud)
+       return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ }
++static inline unsigned long pgd_pfn(pgd_t pgd)
++{
++      return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+ static inline int pmd_large(pmd_t pte)
+@@ -224,9 +271,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+       return pte_clear_flags(pte, _PAGE_RW);
+ }
++static inline pte_t pte_mkread(pte_t pte)
++{
++      return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+-      return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++      if (__supported_pte_mask & _PAGE_NX)
++              return pte_clear_flags(pte, _PAGE_NX);
++      else
++#endif
++              return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++      if (__supported_pte_mask & _PAGE_NX)
++              return pte_set_flags(pte, _PAGE_NX);
++      else
++#endif
++              return pte_clear_flags(pte, _PAGE_USER);
+ }
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -431,7 +498,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+ #define canon_pgprot(p) __pgprot(massage_pgprot(p))
+-static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
++static inline int is_new_memtype_allowed(u64 paddr, u64 size,
+                                        enum page_cache_mode pcm,
+                                        enum page_cache_mode new_pcm)
+ {
+@@ -474,6 +541,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+ #endif
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
++enum cpu_pgd_type {kernel = 0, user = 1};
++static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
++{
++      return cpu_pgd[cpu][type];
++}
++#endif
++
+ #include <linux/mm_types.h>
+ #include <linux/mmdebug.h>
+ #include <linux/log2.h>
+@@ -675,7 +752,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+  * Currently stuck as a macro due to indirect forward reference to
+  * linux/mmzone.h's __section_mem_map_addr() definition:
+  */
+-#define pgd_page(pgd)         pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
++#define pgd_page(pgd)         pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
+ /* to find an entry in a page-table-directory. */
+ static inline unsigned long pud_index(unsigned long address)
+@@ -690,7 +767,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+ static inline int pgd_bad(pgd_t pgd)
+ {
+-      return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++      return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+ static inline int pgd_none(pgd_t pgd)
+@@ -719,7 +796,12 @@ static inline int pgd_none(pgd_t pgd)
+  * pgd_offset() returns a (pgd_t *)
+  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+  */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
++#endif
++
+ /*
+  * a shortcut which implies the use of the kernel's pgd, instead
+  * of a process's
+@@ -730,6 +812,25 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY   pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS               (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS         KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS         (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#ifdef __ASSEMBLY__
++#define pax_user_shadow_base  pax_user_shadow_base(%rip)
++#else
++extern unsigned long pax_user_shadow_base;
++extern pgdval_t clone_pgd_mask;
++#endif
++#else
++#define pax_user_shadow_base  (0UL)
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+ extern int direct_gbpages;
+@@ -901,11 +1002,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+  * dst and src can be on the same page, but the range must not overlap,
+  * and must not cross a page boundary.
+  */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+-       memcpy(dst, src, count * sizeof(pgd_t));
++      pax_open_kernel();
++      while (count--)
++              *dst++ = *src++;
++      pax_close_kernel();
+ }
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
++#endif
++
+ #define PTE_SHIFT ilog2(PTRS_PER_PTE)
+ static inline int page_level_shift(enum pg_level level)
+ {
+diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
+index b6c0b40..3535d47 100644
+--- a/arch/x86/include/asm/pgtable_32.h
++++ b/arch/x86/include/asm/pgtable_32.h
+@@ -25,9 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t initial_page_table[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -45,6 +42,12 @@ void paging_init(void);
+ # include <asm/pgtable-2level.h>
+ #endif
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t initial_page_table[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address)                                  \
+       ((pte_t *)kmap_atomic(pmd_page(*(dir))) +               \
+@@ -59,12 +62,17 @@ void paging_init(void);
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr)         \
+ do {                                          \
++      pax_open_kernel();                      \
+       pte_clear(&init_mm, (vaddr), (ptep));   \
++      pax_close_kernel();                     \
+       __flush_tlb_one((vaddr));               \
+ } while (0)
+ #endif /* !__ASSEMBLY__ */
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+  * kern_addr_valid() is (1) for FLATMEM and (0) for
+  * SPARSEMEM and DISCONTIGMEM
+diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
+index 9fb2f2b..8e18c70 100644
+--- a/arch/x86/include/asm/pgtable_32_types.h
++++ b/arch/x86/include/asm/pgtable_32_types.h
+@@ -8,7 +8,7 @@
+  */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE     (1UL << PMD_SHIFT)
++# define PMD_SIZE     (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK     (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,28 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
+ # define VMALLOC_END  (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++static inline unsigned long __intentional_overflow(-1) ktla_ktva(unsigned long addr)
++{
++      return addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET;
++
++}
++static inline unsigned long __intentional_overflow(-1) ktva_ktla(unsigned long addr)
++{
++      return addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET;
++}
++#endif
++#else
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END   VMALLOC_END
+ #define MODULES_LEN   (MODULES_VADDR - MODULES_END)
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index 1cc82ec..ba29fd8 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -16,11 +16,17 @@
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_start_pgt[4][512];
++extern pud_t level3_vmalloc_end_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pte_t level1_fixmap_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[2][512];
++extern pte_t level1_modules_pgt[4][512];
++extern pte_t level1_fixmap_pgt[3][512];
++extern pte_t level1_vsyscall_pgt[512];
++extern pgd_t init_level4_pgt[512];
+ #define swapper_pg_dir init_level4_pgt
+@@ -62,7 +68,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++      pax_open_kernel();
+       *pmdp = pmd;
++      pax_close_kernel();
+ }
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -98,7 +106,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++      pax_open_kernel();
+       *pudp = pud;
++      pax_close_kernel();
+ }
+ static inline void native_pud_clear(pud_t *pud)
+@@ -108,6 +118,13 @@ static inline void native_pud_clear(pud_t *pud)
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++      pax_open_kernel();
++      *pgdp = pgd;
++      pax_close_kernel();
++}
++
++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
+       *pgdp = pgd;
+ }
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 6fdef9e..7cda9d5 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -67,11 +67,16 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+ #define MODULES_END      _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
+ #define ESPFIX_PGD_ENTRY _AC(-2, UL)
+ #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
+ #define EFI_VA_START   ( -4 * (_AC(1, UL) << 30))
+ #define EFI_VA_END     (-68 * (_AC(1, UL) << 30))
++#define ktla_ktva(addr)               (addr)
++#define ktva_ktla(addr)               (addr)
++
+ #define EARLY_DYNAMIC_PAGE_TABLES     64
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index f1218f5..b0cafcd 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -112,10 +112,14 @@
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX      (_AT(pteval_t, 1) << _PAGE_BIT_NX)
++#ifdef CONFIG_PAX_SEGMEXEC
++#define _PAGE_DEVMAP  (_AT(pteval_t, 0))
++#else
+ #define _PAGE_DEVMAP  (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
+ #define __HAVE_ARCH_PTE_DEVMAP
++#endif
+ #else
+-#define _PAGE_NX      (_AT(pteval_t, 0))
++#define _PAGE_NX      (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
+ #define _PAGE_DEVMAP  (_AT(pteval_t, 0))
+ #endif
+@@ -176,6 +180,9 @@ enum page_cache_mode {
+ #define PAGE_READONLY_EXEC    __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
+                                        _PAGE_ACCESSED)
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC                                            \
+       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL         (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -183,7 +190,7 @@ enum page_cache_mode {
+ #define __PAGE_KERNEL_RO              (__PAGE_KERNEL & ~_PAGE_RW)
+ #define __PAGE_KERNEL_RX              (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
+ #define __PAGE_KERNEL_NOCACHE         (__PAGE_KERNEL | _PAGE_NOCACHE)
+-#define __PAGE_KERNEL_VSYSCALL                (__PAGE_KERNEL_RX | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL                (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR            (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_LARGE           (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC      (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+@@ -229,7 +236,7 @@ enum page_cache_mode {
+ #ifdef CONFIG_X86_64
+ #define __PAGE_KERNEL_IDENT_LARGE_EXEC        __PAGE_KERNEL_LARGE_EXEC
+ #else
+-#define PTE_IDENT_ATTR         0x003          /* PRESENT+RW */
++#define PTE_IDENT_ATTR         0x063          /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PDE_IDENT_ATTR         0x063          /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR         0x001          /* PRESENT (no other attributes) */
+ #endif
+@@ -271,7 +278,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
+ {
+       return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
++#if CONFIG_PGTABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if CONFIG_PGTABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if CONFIG_PGTABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+@@ -285,8 +302,6 @@ static inline pudval_t native_pud_val(pud_t pud)
+       return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+       return native_pgd_val(pud.pgd);
+@@ -306,8 +321,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+       return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+       return native_pgd_val(pmd.pud.pgd);
+@@ -424,7 +437,6 @@ typedef struct page *pgtable_t;
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
+-extern int nx_enabled;
+ #define pgprot_writecombine   pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
+index 643eba4..0dbfcf5 100644
+--- a/arch/x86/include/asm/pmem.h
++++ b/arch/x86/include/asm/pmem.h
+@@ -38,7 +38,7 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
+        * fault) we would have already reported a general protection fault
+        * before the WARN+BUG.
+        */
+-      rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
++      rem = __copy_from_user_inatomic_nocache(dst, (void __force_user *) src, n);
+       if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
+                               __func__, dst, src, rem))
+               BUG();
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 17f2186..f394307 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val)
+  */
+ static __always_inline bool __preempt_count_dec_and_test(void)
+ {
+-      GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
++      GEN_UNARY_RMWcc("decl", __preempt_count, -4, __percpu_arg(0), e);
+ }
+ /*
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 63def95..3d8c203 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -135,7 +135,7 @@ struct cpuinfo_x86 {
+       /* Index into per_cpu list: */
+       u16                     cpu_index;
+       u32                     microcode;
+-};
++} __randomize_layout;
+ #define X86_VENDOR_INTEL      0
+ #define X86_VENDOR_CYRIX      1
+@@ -205,9 +205,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
+           : "memory");
+ }
++/* invpcid (%rdx),%rax */
++#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
++
++#define INVPCID_SINGLE_ADDRESS        0UL
++#define INVPCID_SINGLE_CONTEXT        1UL
++#define INVPCID_ALL_GLOBAL    2UL
++#define INVPCID_ALL_NONGLOBAL 3UL
++
++#define PCID_KERNEL           0UL
++#define PCID_USER             1UL
++#define PCID_NOFLUSH          (1UL << 63)
++
+ static inline void load_cr3(pgd_t *pgdir)
+ {
+-      write_cr3(__pa(pgdir));
++      write_cr3(__pa(pgdir) | PCID_KERNEL);
+ }
+ #ifdef CONFIG_X86_32
+@@ -307,11 +319,9 @@ struct tss_struct {
+ } ____cacheline_aligned;
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
++extern struct tss_struct cpu_tss[NR_CPUS];
+-#ifdef CONFIG_X86_32
+ DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+-#endif
+ /*
+  * Save the original ist values for checking stack pointers during debugging
+@@ -388,6 +398,7 @@ struct thread_struct {
+       unsigned short          ds;
+       unsigned short          fsindex;
+       unsigned short          gsindex;
++      unsigned short          ss;
+ #endif
+ #ifdef CONFIG_X86_32
+       unsigned long           ip;
+@@ -404,6 +415,9 @@ struct thread_struct {
+       unsigned long gs;
+ #endif
++      /* Floating point and extended processor state */
++      struct fpu              fpu;
++
+       /* Save middle states of ptrace breakpoints */
+       struct perf_event       *ptrace_bps[HBP_NUM];
+       /* Debug status used for traps, single steps, etc... */
+@@ -424,18 +438,9 @@ struct thread_struct {
+       /* Max allowed port in the bitmap, in bytes: */
+       unsigned                io_bitmap_max;
+-      mm_segment_t            addr_limit;
+-
+       unsigned int            sig_on_uaccess_err:1;
+       unsigned int            uaccess_err:1;  /* uaccess failed */
+-
+-      /* Floating point and extended processor state */
+-      struct fpu              fpu;
+-      /*
+-       * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
+-       * the end.
+-       */
+-};
++} __randomize_layout;
+ /*
+  * Set IOPL bits in EFLAGS from given mask
+@@ -478,12 +483,8 @@ static inline void native_swapgs(void)
+ static inline unsigned long current_top_of_stack(void)
+ {
+-#ifdef CONFIG_X86_64
+-      return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
+-#else
+       /* sp0 on x86_32 is special in and around vm86 mode. */
+       return this_cpu_read_stable(cpu_current_top_of_stack);
+-#endif
+ }
+ #ifdef CONFIG_PARAVIRT
+@@ -708,20 +709,29 @@ static inline void spin_lock_prefetch(const void *x)
+ #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
+                          TOP_OF_KERNEL_STACK_PADDING)
++extern union fpregs_state init_fpregs_state;
++
+ #ifdef CONFIG_X86_32
+ /*
+  * User space process size: 3GB (default).
+  */
+ #define TASK_SIZE             PAGE_OFFSET
+ #define TASK_SIZE_MAX         TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE    (TASK_SIZE / 2)
++#define STACK_TOP             ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP             TASK_SIZE
+-#define STACK_TOP_MAX         STACK_TOP
++#endif
++
++#define STACK_TOP_MAX         TASK_SIZE
+ #define INIT_THREAD  {                                                          \
+       .sp0                    = TOP_OF_INIT_STACK,                      \
+       .sysenter_cs            = __KERNEL_CS,                            \
+       .io_bitmap_ptr          = NULL,                                   \
+-      .addr_limit             = KERNEL_DS,                              \
++      .fpu.state              = &init_fpregs_state,                     \
+ }
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -736,12 +746,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+  * "struct pt_regs" is possible, but they may contain the
+  * completely wrong values.
+  */
+-#define task_pt_regs(task) \
+-({                                                                    \
+-      unsigned long __ptr = (unsigned long)task_stack_page(task);     \
+-      __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;             \
+-      ((struct pt_regs *)__ptr) - 1;                                  \
+-})
++#define task_pt_regs(tsk)     ((struct pt_regs *)(tsk)->thread.sp0 - 1)
+ #define KSTK_ESP(task)                (task_pt_regs(task)->sp)
+@@ -755,13 +760,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+  * particular problem by preventing anything from being mapped
+  * at the maximum canonical address.
+  */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+ /* This decides where the kernel will search for a free chunk of vm
+  * space during mmap's.
+  */
+ #define IA32_PAGE_OFFSET      ((current->personality & ADDR_LIMIT_3GB) ? \
+-                                      0xc0000000 : 0xFFFFe000)
++                                      0xc0000000 : 0xFFFFf000)
+ #define TASK_SIZE             (test_thread_flag(TIF_ADDR32) ? \
+                                       IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -773,7 +778,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define INIT_THREAD  {                                                \
+       .sp0                    = TOP_OF_INIT_STACK,            \
+-      .addr_limit             = KERNEL_DS,                    \
++      .fpu.state              = &init_fpregs_state,           \
+ }
+ /*
+@@ -796,6 +801,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+  */
+ #define TASK_UNMAPPED_BASE    (PAGE_ALIGN(TASK_SIZE / 3))
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE   (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task)                (task_pt_regs(task)->ip)
+ /* Get/set a process' ability to use the timestamp counter instruction */
+@@ -841,7 +850,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
+       return 0;
+ }
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+ void default_idle(void);
+@@ -851,6 +860,6 @@ bool xen_set_default_idle(void);
+ #define xen_set_default_idle 0
+ #endif
+-void stop_this_cpu(void *dummy);
++void stop_this_cpu(void *dummy) __noreturn;
+ void df_debug(struct pt_regs *regs, long error_code);
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 2b5d686..8693ed0 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -118,15 +118,16 @@ static inline int v8086_mode(struct pt_regs *regs)
+ #ifdef CONFIG_X86_64
+ static inline bool user_64bit_mode(struct pt_regs *regs)
+ {
++      unsigned long cs = regs->cs & 0xffff;
+ #ifndef CONFIG_PARAVIRT
+       /*
+        * On non-paravirt systems, this is the only long mode CPL 3
+        * selector.  We do not allow long mode selectors in the LDT.
+        */
+-      return regs->cs == __USER_CS;
++      return cs == __USER_CS;
+ #else
+       /* Headers are too twisted for this to go in paravirt.h. */
+-      return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
++      return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
+ #endif
+ }
+@@ -173,9 +174,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
+        * Traps from the kernel do not save sp and ss.
+        * Use the helper function to retrieve sp.
+        */
+-      if (offset == offsetof(struct pt_regs, sp) &&
+-          regs->cs == __KERNEL_CS)
+-              return kernel_stack_pointer(regs);
++      if (offset == offsetof(struct pt_regs, sp)) {
++              unsigned long cs = regs->cs & 0xffff;
++              if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
++                      return kernel_stack_pointer(regs);
++      }
+ #endif
+       return *(unsigned long *)((unsigned long)regs + offset);
+ }
+diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
+index b2988c0..421f625 100644
+--- a/arch/x86/include/asm/realmode.h
++++ b/arch/x86/include/asm/realmode.h
+@@ -22,16 +22,14 @@ struct real_mode_header {
+ #endif
+       /* APM/BIOS reboot */
+       u32     machine_real_restart_asm;
+-#ifdef CONFIG_X86_64
+       u32     machine_real_restart_seg;
+-#endif
+ };
+ /* This must match data at trampoline_32/64.S */
+ struct trampoline_header {
+ #ifdef CONFIG_X86_32
+       u32 start;
+-      u16 gdt_pad;
++      u16 boot_cs;
+       u16 gdt_limit;
+       u32 gdt_base;
+ #else
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index 2cb1cc2..787d524 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -6,13 +6,13 @@
+ struct pt_regs;
+ struct machine_ops {
+-      void (*restart)(char *cmd);
+-      void (*halt)(void);
+-      void (*power_off)(void);
++      void (* __noreturn restart)(char *cmd);
++      void (* __noreturn halt)(void);
++      void (* __noreturn power_off)(void);
+       void (*shutdown)(void);
+       void (*crash_shutdown)(struct pt_regs *);
+-      void (*emergency_restart)(void);
+-};
++      void (* __noreturn emergency_restart)(void);
++} __no_const;
+ extern struct machine_ops machine_ops;
+diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
+index 661dd30..e804f84 100644
+--- a/arch/x86/include/asm/rmwcc.h
++++ b/arch/x86/include/asm/rmwcc.h
+@@ -5,7 +5,19 @@
+ /* Use asm goto */
+-#define __GEN_RMWcc(fullop, var, cc, ...)                             \
++#define __GEN_RMWcc(fullop, var, size, cc, ...)                               \
++do {                                                                  \
++      asm_volatile_goto (fullop                                       \
++                      "\n\t"__PAX_REFCOUNT(size)                      \
++                      ";j" #cc " %l[cc_label]"                        \
++                      : : [counter] "m" (var), ## __VA_ARGS__         \
++                      : "memory", "cc", "cx" : cc_label);             \
++      return 0;                                                       \
++cc_label:                                                             \
++      return 1;                                                       \
++} while (0)
++
++#define __GEN_RMWcc_unchecked(fullop, var, cc, ...)                   \
+ do {                                                                  \
+       asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"             \
+                       : : "m" (var), ## __VA_ARGS__                   \
+@@ -15,17 +27,34 @@ cc_label:                                                          \
+       return 1;                                                       \
+ } while (0)
+-#define GEN_UNARY_RMWcc(op, var, arg0, cc)                            \
+-      __GEN_RMWcc(op " " arg0, var, cc)
++#define GEN_UNARY_RMWcc(op, var, size, arg0, cc)                      \
++      __GEN_RMWcc(op " " arg0, var, size, cc)
+-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                        \
+-      __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
++#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc)                  \
++      __GEN_RMWcc_unchecked(op " " arg0, var, cc)
++
++#define GEN_BINARY_RMWcc(op, var, size, vcon, val, arg0, cc)          \
++      __GEN_RMWcc(op " %1, " arg0, var, size, cc, vcon (val))
++
++#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc)      \
++      __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
+ #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+ /* Use flags output or a set instruction */
+-#define __GEN_RMWcc(fullop, var, cc, ...)                             \
++#define __GEN_RMWcc(fullop, var, size, cc, ...)                               \
++do {                                                                  \
++      bool c;                                                         \
++      asm volatile (fullop                                            \
++                      "\n\t"__PAX_REFCOUNT(size)                      \
++                      ";" CC_SET(cc)                                  \
++                      : [counter] "+m" (var), CC_OUT(cc) (c)          \
++                      : __VA_ARGS__ : "memory", "cc", "cx");          \
++      return c != 0;                                                  \
++} while (0)
++
++#define __GEN_RMWcc_unchecked(fullop, var, cc, ...)                   \
+ do {                                                                  \
+       bool c;                                                         \
+       asm volatile (fullop ";" CC_SET(cc)                             \
+@@ -34,11 +63,17 @@ do {                                                                       \
+       return c;                                                       \
+ } while (0)
+-#define GEN_UNARY_RMWcc(op, var, arg0, cc)                            \
+-      __GEN_RMWcc(op " " arg0, var, cc)
++#define GEN_UNARY_RMWcc(op, var, size, arg0, cc)                      \
++      __GEN_RMWcc(op " " arg0, var, size, cc)
+-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                        \
+-      __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
++#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc)                  \
++      __GEN_RMWcc_unchecked(op " " arg0, var, cc)
++
++#define GEN_BINARY_RMWcc(op, var, size, vcon, val, arg0, cc)          \
++      __GEN_RMWcc(op " %2, " arg0, var, size, cc, vcon (val))
++
++#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc)      \
++      __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
+ #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
+index 8dbc762..5ff77d9 100644
+--- a/arch/x86/include/asm/rwsem.h
++++ b/arch/x86/include/asm/rwsem.h
+@@ -64,14 +64,15 @@ static inline void __down_read(struct rw_semaphore *sem)
+ {
+       asm volatile("# beginning down_read\n\t"
+                    LOCK_PREFIX _ASM_INC "(%1)\n\t"
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)
+                    /* adds 0x00000001 */
+                    "  jns        1f\n"
+                    "  call call_rwsem_down_read_failed\n"
+                    "1:\n\t"
+                    "# ending down_read\n\t"
+-                   : "+m" (sem->count)
++                   : [counter] "+m" (sem->count)
+                    : "a" (sem)
+-                   : "memory", "cc");
++                   : "memory", "cc", "cx");
+ }
+ /*
+@@ -85,14 +86,15 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
+                    "1:\n\t"
+                    "  mov          %1,%2\n\t"
+                    "  add          %3,%2\n\t"
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)
+                    "  jle          2f\n\t"
+                    LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
+                    "  jnz          1b\n\t"
+                    "2:\n\t"
+                    "# ending __down_read_trylock\n\t"
+-                   : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
++                   : [counter] "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+                    : "i" (RWSEM_ACTIVE_READ_BIAS)
+-                   : "memory", "cc");
++                   : "memory", "cc", "cx");
+       return result >= 0;
+ }
+@@ -105,6 +107,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
+       struct rw_semaphore* ret;                       \
+       asm volatile("# beginning down_write\n\t"       \
+                    LOCK_PREFIX "  xadd      %1,(%3)\n\t"      \
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)\
+                    /* adds 0xffff0001, returns the old value */ \
+                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
+                    /* was the active mask 0 before? */\
+@@ -112,9 +115,9 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
+                    "  call " slow_path "\n"           \
+                    "1:\n"                             \
+                    "# ending down_write"              \
+-                   : "+m" (sem->count), "=d" (tmp), "=a" (ret)        \
++                   : [counter] "+m" (sem->count), "=d" (tmp), "=a" (ret)\
+                    : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
+-                   : "memory", "cc");                 \
++                   : "memory", "cc", "cx");           \
+       ret;                                            \
+ })
+@@ -146,15 +149,16 @@ static inline bool __down_write_trylock(struct rw_semaphore *sem)
+                    "  jnz          2f\n\t"
+                    "  mov          %1,%2\n\t"
+                    "  add          %4,%2\n\t"
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)
+                    LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
+                    "  jnz          1b\n\t"
+                    "2:\n\t"
+                    CC_SET(e)
+                    "# ending __down_write_trylock\n\t"
+-                   : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
++                   : [counter] "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
+                      CC_OUT(e) (result)
+                    : "er" (RWSEM_ACTIVE_WRITE_BIAS)
+-                   : "memory", "cc");
++                   : "memory", "cc", "cx");
+       return result;
+ }
+@@ -166,14 +170,15 @@ static inline void __up_read(struct rw_semaphore *sem)
+       long tmp;
+       asm volatile("# beginning __up_read\n\t"
+                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8)
+                    /* subtracts 1, returns the old value */
+                    "  jns        1f\n\t"
+                    "  call call_rwsem_wake\n" /* expects old value in %edx */
+                    "1:\n"
+                    "# ending __up_read\n"
+-                   : "+m" (sem->count), "=d" (tmp)
++                   : [counter] "+m" (sem->count), "=d" (tmp)
+                    : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
+-                   : "memory", "cc");
++                   : "memory", "cc", "cx");
+ }
+ /*
+@@ -184,14 +189,15 @@ static inline void __up_write(struct rw_semaphore *sem)
+       long tmp;
+       asm volatile("# beginning __up_write\n\t"
+                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
++                   PAX_REFCOUNT_UNDERFLOW(BITS_PER_LONG/8)
+                    /* subtracts 0xffff0001, returns the old value */
+                    "  jns        1f\n\t"
+                    "  call call_rwsem_wake\n" /* expects old value in %edx */
+                    "1:\n\t"
+                    "# ending __up_write\n"
+-                   : "+m" (sem->count), "=d" (tmp)
++                   : [counter] "+m" (sem->count), "=d" (tmp)
+                    : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+-                   : "memory", "cc");
++                   : "memory", "cc", "cx");
+ }
+ /*
+@@ -201,6 +207,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ {
+       asm volatile("# beginning __downgrade_write\n\t"
+                    LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++                   PAX_REFCOUNT_OVERFLOW(BITS_PER_LONG/8)
+                    /*
+                     * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+                     *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+@@ -209,9 +216,9 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+                    "  call call_rwsem_downgrade_wake\n"
+                    "1:\n\t"
+                    "# ending __downgrade_write\n"
+-                   : "+m" (sem->count)
++                   : [counter] "+m" (sem->count)
+                    : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+-                   : "memory", "cc");
++                   : "memory", "cc", "cx");
+ }
+ #endif /* __KERNEL__ */
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index 1549caa0..aa9ebe1 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -83,14 +83,20 @@
+  *  26 - ESPFIX small SS
+  *  27 - per-cpu                      [ offset to per-cpu data area ]
+  *  28 - stack_canary-20              [ for stack protector ]         <=== cacheline #8
+- *  29 - unused
+- *  30 - unused
++ *  29 - PCI BIOS CS
++ *  30 - PCI BIOS DS
+  *  31 - TSS for double fault handler
+  */
++#define GDT_ENTRY_KERNEXEC_EFI_CS     (1)
++#define GDT_ENTRY_KERNEXEC_EFI_DS     (2)
++#define __KERNEXEC_EFI_CS     (GDT_ENTRY_KERNEXEC_EFI_CS*8)
++#define __KERNEXEC_EFI_DS     (GDT_ENTRY_KERNEXEC_EFI_DS*8)
++
+ #define GDT_ENTRY_TLS_MIN             6
+ #define GDT_ENTRY_TLS_MAX             (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+ #define GDT_ENTRY_KERNEL_CS           12
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS  4
+ #define GDT_ENTRY_KERNEL_DS           13
+ #define GDT_ENTRY_DEFAULT_USER_CS     14
+ #define GDT_ENTRY_DEFAULT_USER_DS     15
+@@ -107,6 +113,12 @@
+ #define GDT_ENTRY_PERCPU              27
+ #define GDT_ENTRY_STACK_CANARY                28
++#define GDT_ENTRY_PCIBIOS_CS          29
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS          30
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS     31
+ /*
+@@ -119,6 +131,7 @@
+  */
+ #define __KERNEL_CS                   (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS          (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS                   (GDT_ENTRY_KERNEL_DS*8)
+ #define __USER_DS                     (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
+ #define __USER_CS                     (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
+@@ -130,7 +143,7 @@
+ #define PNP_CS16                      (GDT_ENTRY_PNPBIOS_CS16*8)
+ /* "Is this PNP code selector (PNP_CS32 or PNP_CS16)?" */
+-#define SEGMENT_IS_PNP_CODE(x)                (((x) & 0xf4) == PNP_CS32)
++#define SEGMENT_IS_PNP_CODE(x)                (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+ /* data segment for BIOS: */
+ #define PNP_DS                                (GDT_ENTRY_PNPBIOS_DS*8)
+@@ -177,6 +190,8 @@
+ #define GDT_ENTRY_DEFAULT_USER_DS     5
+ #define GDT_ENTRY_DEFAULT_USER_CS     6
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS  7
++
+ /* Needs two entries */
+ #define GDT_ENTRY_TSS                 8
+ /* Needs two entries */
+@@ -188,10 +203,12 @@
+ /* Abused to load per CPU data from limit */
+ #define GDT_ENTRY_PER_CPU             15
++#define GDT_ENTRY_UDEREF_KERNEL_DS    16
++
+ /*
+  * Number of entries in the GDT table:
+  */
+-#define GDT_ENTRIES                   16
++#define GDT_ENTRIES                   17
+ /*
+  * Segment selector values corresponding to the above entries:
+@@ -201,7 +218,9 @@
+  */
+ #define __KERNEL32_CS                 (GDT_ENTRY_KERNEL32_CS*8)
+ #define __KERNEL_CS                   (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS          (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS                   (GDT_ENTRY_KERNEL_DS*8)
++#define __UDEREF_KERNEL_DS            (GDT_ENTRY_UDEREF_KERNEL_DS*8)
+ #define __USER32_CS                   (GDT_ENTRY_DEFAULT_USER32_CS*8 + 3)
+ #define __USER_DS                     (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
+ #define __USER32_DS                   __USER_DS
+diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
+index ac1d5da..6c4be50 100644
+--- a/arch/x86/include/asm/setup.h
++++ b/arch/x86/include/asm/setup.h
+@@ -61,6 +61,7 @@ static inline void x86_ce4100_early_setup(void) { }
+ #ifndef _SETUP
+ #include <asm/espfix.h>
++#include <asm/uaccess.h>
+ #include <linux/kernel.h>
+ /*
+@@ -76,7 +77,7 @@ static inline bool kaslr_enabled(void)
+ static inline unsigned long kaslr_offset(void)
+ {
+-      return (unsigned long)&_text - __START_KERNEL;
++      return ktla_ktva((unsigned long)&_text) - __START_KERNEL;
+ }
+ /*
+diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
+index db33330..fa80df3 100644
+--- a/arch/x86/include/asm/smap.h
++++ b/arch/x86/include/asm/smap.h
+@@ -25,6 +25,18 @@
+ #include <asm/alternative-asm.h>
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ASM_PAX_OPEN_USERLAND                                 \
++      ALTERNATIVE "", "call __pax_open_userland", X86_FEATURE_STRONGUDEREF
++
++#define ASM_PAX_CLOSE_USERLAND                                        \
++      ALTERNATIVE "", "call __pax_close_userland", X86_FEATURE_STRONGUDEREF
++
++#else
++#define ASM_PAX_OPEN_USERLAND
++#define ASM_PAX_CLOSE_USERLAND
++#endif
++
+ #ifdef CONFIG_X86_SMAP
+ #define ASM_CLAC \
+@@ -40,10 +52,44 @@
+ #endif /* CONFIG_X86_SMAP */
++#define ASM_USER_ACCESS_BEGIN ASM_PAX_OPEN_USERLAND; ASM_STAC
++#define ASM_USER_ACCESS_END   ASM_CLAC; ASM_PAX_CLOSE_USERLAND
++
+ #else /* __ASSEMBLY__ */
+ #include <asm/alternative.h>
++#define __HAVE_ARCH_PAX_OPEN_USERLAND
++#define __HAVE_ARCH_PAX_CLOSE_USERLAND
++
++extern void __pax_open_userland(void);
++static __always_inline unsigned long pax_open_userland(void)
++{
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      asm volatile(ALTERNATIVE("", "call %P[open]", X86_FEATURE_STRONGUDEREF)
++              :
++              : [open] "i" (__pax_open_userland)
++              : "memory", "rax");
++#endif
++
++      return 0;
++}
++
++extern void __pax_close_userland(void);
++static __always_inline unsigned long pax_close_userland(void)
++{
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      asm volatile(ALTERNATIVE("", "call %P[close]", X86_FEATURE_STRONGUDEREF)
++              :
++              : [close] "i" (__pax_close_userland)
++              : "memory", "rax");
++#endif
++
++      return 0;
++}
++
+ #ifdef CONFIG_X86_SMAP
+ static __always_inline void clac(void)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index ebd0c16..ff7f35d 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -25,7 +25,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
+ /* cpus sharing the last level cache: */
+ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
+ DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
+-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
++DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
+ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+ {
+@@ -59,7 +59,7 @@ struct smp_ops {
+       void (*send_call_func_ipi)(const struct cpumask *mask);
+       void (*send_call_func_single_ipi)(int cpu);
+-};
++} __no_const;
+ /* Globals due to paravirt */
+ extern void set_cpu_sibling_map(int cpu);
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 58505f0..bff3b5b 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -49,7 +49,7 @@
+  * head_32 for boot CPU and setup_per_cpu_areas() for others.
+  */
+ #define GDT_STACK_CANARY_INIT                                         \
+-      [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
++      [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
+ /*
+  * Initialize the stackprotector canary value.
+@@ -114,7 +114,7 @@ static inline void setup_stack_canary_segment(int cpu)
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+       asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
+index 0944218..2f17b1b 100644
+--- a/arch/x86/include/asm/stacktrace.h
++++ b/arch/x86/include/asm/stacktrace.h
+@@ -11,28 +11,20 @@
+ extern int kstack_depth_to_print;
+-struct thread_info;
++struct task_struct;
+ struct stacktrace_ops;
+-typedef unsigned long (*walk_stack_t)(struct task_struct *task,
+-                                    unsigned long *stack,
+-                                    unsigned long bp,
+-                                    const struct stacktrace_ops *ops,
+-                                    void *data,
+-                                    unsigned long *end,
+-                                    int *graph);
++typedef unsigned long walk_stack_t(struct task_struct *task,
++                                 void *stack_start,
++                                 unsigned long *stack,
++                                 unsigned long bp,
++                                 const struct stacktrace_ops *ops,
++                                 void *data,
++                                 unsigned long *end,
++                                 int *graph);
+-extern unsigned long
+-print_context_stack(struct task_struct *task,
+-                  unsigned long *stack, unsigned long bp,
+-                  const struct stacktrace_ops *ops, void *data,
+-                  unsigned long *end, int *graph);
+-
+-extern unsigned long
+-print_context_stack_bp(struct task_struct *task,
+-                     unsigned long *stack, unsigned long bp,
+-                     const struct stacktrace_ops *ops, void *data,
+-                     unsigned long *end, int *graph);
++extern walk_stack_t print_context_stack;
++extern walk_stack_t print_context_stack_bp;
+ /* Generic stack tracer with callbacks */
+@@ -40,7 +32,7 @@ struct stacktrace_ops {
+       int (*address)(void *data, unsigned long address, int reliable);
+       /* On negative return stop dumping */
+       int (*stack)(void *data, char *name);
+-      walk_stack_t    walk_stack;
++      walk_stack_t    *walk_stack;
+ };
+ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
+index 3d3e835..50b64b1 100644
+--- a/arch/x86/include/asm/string_32.h
++++ b/arch/x86/include/asm/string_32.h
+@@ -6,28 +6,28 @@
+ /* Let gcc decide whether to inline or use the out of line functions */
+ #define __HAVE_ARCH_STRCPY
+-extern char *strcpy(char *dest, const char *src);
++extern char *strcpy(char *dest, const char *src) __nocapture(2);
+ #define __HAVE_ARCH_STRNCPY
+-extern char *strncpy(char *dest, const char *src, size_t count);
++extern char *strncpy(char *dest, const char *src, size_t count) __nocapture(2);
+ #define __HAVE_ARCH_STRCAT
+-extern char *strcat(char *dest, const char *src);
++extern char *strcat(char *dest, const char *src) __nocapture(2);
+ #define __HAVE_ARCH_STRNCAT
+-extern char *strncat(char *dest, const char *src, size_t count);
++extern char *strncat(char *dest, const char *src, size_t count) __nocapture(2);
+ #define __HAVE_ARCH_STRCMP
+-extern int strcmp(const char *cs, const char *ct);
++extern int strcmp(const char *cs, const char *ct) __nocapture();
+ #define __HAVE_ARCH_STRNCMP
+-extern int strncmp(const char *cs, const char *ct, size_t count);
++extern int strncmp(const char *cs, const char *ct, size_t count) __nocapture(1, 2);
+ #define __HAVE_ARCH_STRCHR
+-extern char *strchr(const char *s, int c);
++extern char *strchr(const char *s, int c) __nocapture(-1);
+ #define __HAVE_ARCH_STRLEN
+-extern size_t strlen(const char *s);
++extern size_t strlen(const char *s) __nocapture(1);
+ static __always_inline void *__memcpy(void *to, const void *from, size_t n)
+ {
+@@ -197,12 +197,12 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
+ #endif
+ #define __HAVE_ARCH_MEMMOVE
+-void *memmove(void *dest, const void *src, size_t n);
++void *memmove(void *dest, const void *src, size_t n) __nocapture(2);
+ #define memcmp __builtin_memcmp
+ #define __HAVE_ARCH_MEMCHR
+-extern void *memchr(const void *cs, int c, size_t count);
++extern void *memchr(const void *cs, int c, size_t count) __nocapture(-1);
+ static inline void *__memset_generic(void *s, char c, size_t count)
+ {
+@@ -243,11 +243,11 @@ void *__constant_c_memset(void *s, unsigned long c, size_t count)
+ /* Added by Gertjan van Wingerde to make minix and sysv module work */
+ #define __HAVE_ARCH_STRNLEN
+-extern size_t strnlen(const char *s, size_t count);
++extern size_t strnlen(const char *s, size_t count) __nocapture(1);
+ /* end of additional stuff */
+ #define __HAVE_ARCH_STRSTR
+-extern char *strstr(const char *cs, const char *ct);
++extern char *strstr(const char *cs, const char *ct) __nocapture(-1, 2);
+ /*
+  * This looks horribly ugly, but the compiler can optimize it totally,
+diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
+index 90dbbd9..607d3ba 100644
+--- a/arch/x86/include/asm/string_64.h
++++ b/arch/x86/include/asm/string_64.h
+@@ -27,8 +27,8 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
+    function. */
+ #define __HAVE_ARCH_MEMCPY 1
+-extern void *memcpy(void *to, const void *from, size_t len);
+-extern void *__memcpy(void *to, const void *from, size_t len);
++extern void *memcpy(void *to, const void *from, size_t len) __nocapture(2);
++extern void *__memcpy(void *to, const void *from, size_t len) __nocapture(2);
+ #ifndef CONFIG_KMEMCHECK
+ #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
+@@ -56,14 +56,14 @@ void *memset(void *s, int c, size_t n);
+ void *__memset(void *s, int c, size_t n);
+ #define __HAVE_ARCH_MEMMOVE
+-void *memmove(void *dest, const void *src, size_t count);
+-void *__memmove(void *dest, const void *src, size_t count);
++void *memmove(void *dest, const void *src, size_t count) __nocapture(2);
++void *__memmove(void *dest, const void *src, size_t count) __nocapture(2);
+-int memcmp(const void *cs, const void *ct, size_t count);
+-size_t strlen(const char *s);
+-char *strcpy(char *dest, const char *src);
+-char *strcat(char *dest, const char *src);
+-int strcmp(const char *cs, const char *ct);
++int memcmp(const void *cs, const void *ct, size_t count) __nocapture(1, 2);
++size_t strlen(const char *s) __nocapture(1);
++char *strcpy(char *dest, const char *src) __nocapture(2);
++char *strcat(char *dest, const char *src) __nocapture(2);
++int strcmp(const char *cs, const char *ct) __nocapture(1, 2);
+ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+@@ -89,7 +89,7 @@ int strcmp(const char *cs, const char *ct);
+  *
+  * Return 0 for success, -EFAULT for fail
+  */
+-int memcpy_mcsafe(void *dst, const void *src, size_t cnt);
++int memcpy_mcsafe(void *dst, const void *src, size_t cnt) __nocapture(2);
+ #endif /* __KERNEL__ */
+diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
+index 8f321a1..6207183 100644
+--- a/arch/x86/include/asm/switch_to.h
++++ b/arch/x86/include/asm/switch_to.h
+@@ -110,7 +110,7 @@ do {                                                                       \
+            "call __switch_to\n\t"                                       \
+            "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
+            __switch_canary                                              \
+-           "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
++           "movq "__percpu_arg([thread_info])",%%r8\n\t"                \
+            "movq %%rax,%%rdi\n\t"                                       \
+            "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"                 \
+            "jnz   ret_from_fork\n\t"                                    \
+@@ -121,7 +121,7 @@ do {                                                                       \
+              [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+              [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
+              [_tif_fork] "i" (_TIF_FORK),                               \
+-             [thread_info] "i" (offsetof(struct task_struct, stack)),   \
++             [thread_info] "m" (current_tinfo),                         \
+              [current_task] "m" (current_task)                          \
+              __switch_canary_iparam                                     \
+            : "memory", "cc" __EXTRA_CLOBBER)
+diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
+index 82c34ee..940fa40 100644
+--- a/arch/x86/include/asm/sys_ia32.h
++++ b/arch/x86/include/asm/sys_ia32.h
+@@ -20,8 +20,8 @@
+ #include <asm/ia32.h>
+ /* ia32/sys_ia32.c */
+-asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
+-asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
++asmlinkage long sys32_truncate64(const char __user *, unsigned int, unsigned int);
++asmlinkage long sys32_ftruncate64(unsigned int, unsigned int, unsigned int);
+ asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
+ asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
+@@ -42,7 +42,7 @@ long sys32_vm86_warning(void);
+ asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
+ asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
+                                     unsigned, unsigned, int);
+-asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
++asmlinkage long sys32_fadvise64(int, unsigned, unsigned, int, int);
+ asmlinkage long sys32_fallocate(int, int, unsigned,
+                               unsigned, unsigned, unsigned);
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 8b7c8d8e..a60b006 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -39,7 +39,7 @@
+ #  define TOP_OF_KERNEL_STACK_PADDING 8
+ # endif
+ #else
+-# define TOP_OF_KERNEL_STACK_PADDING 0
++# define TOP_OF_KERNEL_STACK_PADDING 16
+ #endif
+ /*
+@@ -53,20 +53,21 @@ struct task_struct;
+ #include <linux/atomic.h>
+ struct thread_info {
+-      struct task_struct      *task;          /* main task structure */
+       __u32                   flags;          /* low level flags */
+       __u32                   status;         /* thread synchronous flags */
+       __u32                   cpu;            /* current CPU */
++      mm_segment_t            addr_limit;
++      unsigned long           lowest_stack;
+ };
+-#define INIT_THREAD_INFO(tsk)                 \
++#define INIT_THREAD_INFO                      \
+ {                                             \
+-      .task           = &tsk,                 \
+       .flags          = 0,                    \
+       .cpu            = 0,                    \
++      .addr_limit     = KERNEL_DS,            \
+ }
+-#define init_thread_info      (init_thread_union.thread_info)
++#define init_thread_info      (init_thread_union.stack)
+ #define init_stack            (init_thread_union.stack)
+ #else /* !__ASSEMBLY__ */
+@@ -106,6 +107,7 @@ struct thread_info {
+ #define TIF_SYSCALL_TRACEPOINT        28      /* syscall tracepoint instrumentation */
+ #define TIF_ADDR32            29      /* 32-bit address space on 64 bits */
+ #define TIF_X32                       30      /* 32-bit native x86-64 binary */
++#define TIF_GRSEC_SETXID      31      /* update credentials on syscall entry/exit */
+ #define _TIF_SYSCALL_TRACE    (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_NOTIFY_RESUME    (1 << TIF_NOTIFY_RESUME)
+@@ -129,6 +131,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_TRACEPOINT       (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_ADDR32           (1 << TIF_ADDR32)
+ #define _TIF_X32              (1 << TIF_X32)
++#define _TIF_GRSEC_SETXID     (1 << TIF_GRSEC_SETXID)
+ /*
+  * work to do in syscall_trace_enter().  Also includes TIF_NOHZ for
+@@ -137,12 +140,12 @@ struct thread_info {
+ #define _TIF_WORK_SYSCALL_ENTRY       \
+       (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT |   \
+        _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT |       \
+-       _TIF_NOHZ)
++       _TIF_NOHZ | _TIF_GRSEC_SETXID)
+ /* work to do on any return to user space */
+ #define _TIF_ALLWORK_MASK                                             \
+       ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT |       \
+-      _TIF_NOHZ)
++      _TIF_NOHZ | _TIF_GRSEC_SETXID)
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW                                                       \
+@@ -160,9 +163,11 @@ struct thread_info {
+  */
+ #ifndef __ASSEMBLY__
++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
++
+ static inline struct thread_info *current_thread_info(void)
+ {
+-      return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
++      return this_cpu_read_stable(current_tinfo);
+ }
+ static inline unsigned long current_stack_pointer(void)
+@@ -181,21 +186,21 @@ static inline unsigned long current_stack_pointer(void)
+  * entirely contained by a single stack frame.
+  *
+  * Returns:
+- *             1 if within a frame
+- *            -1 if placed across a frame boundary (or outside stack)
+- *             0 unable to determine (no frame pointers, etc)
++ *             GOOD_FRAME if within a frame
++ *             BAD_STACK if placed across a frame boundary (or outside stack)
++ *             GOOD_STACK unable to determine (no frame pointers, etc)
+  */
+-static inline int arch_within_stack_frames(const void * const stack,
+-                                         const void * const stackend,
+-                                         const void *obj, unsigned long len)
++static __always_inline int arch_within_stack_frames(unsigned long stack,
++                                         unsigned long stackend,
++                                         unsigned long obj, unsigned long len)
+ {
+ #if defined(CONFIG_FRAME_POINTER)
+-      const void *frame = NULL;
+-      const void *oldframe;
++      unsigned long frame = 0;
++      unsigned long oldframe;
+-      oldframe = __builtin_frame_address(1);
++      oldframe = (unsigned long)__builtin_frame_address(1);
+       if (oldframe)
+-              frame = __builtin_frame_address(2);
++              frame = (unsigned long)__builtin_frame_address(2);
+       /*
+        * low ----------------------------------------------> high
+        * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+@@ -210,48 +215,21 @@ static inline int arch_within_stack_frames(const void * const stack,
+                * the copy as invalid.
+                */
+               if (obj + len <= frame)
+-                      return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
++                      return obj >= oldframe + 2 * sizeof(unsigned long) ? GOOD_FRAME : BAD_STACK;
+               oldframe = frame;
+-              frame = *(const void * const *)frame;
++              frame = *(unsigned long *)frame;
+       }
+-      return -1;
++      return BAD_STACK;
+ #else
+-      return 0;
++      return GOOD_STACK;
+ #endif
+ }
+ #else /* !__ASSEMBLY__ */
+-#ifdef CONFIG_X86_64
+-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+-#endif
+-
+-/*
+- * ASM operand which evaluates to a 'thread_info' address of
+- * the current task, if it is known that "reg" is exactly "off"
+- * bytes below the top of the stack currently.
+- *
+- * ( The kernel stack's size is known at build time, it is usually
+- *   2 or 4 pages, and the bottom  of the kernel stack contains
+- *   the thread_info structure. So to access the thread_info very
+- *   quickly from assembly code we can calculate down from the
+- *   top of the kernel stack to the bottom, using constant,
+- *   build-time calculations only. )
+- *
+- * For example, to fetch the current thread_info->flags value into %eax
+- * on x86-64 defconfig kernels, in syscall entry code where RSP is
+- * currently at exactly SIZEOF_PTREGS bytes away from the top of the
+- * stack:
+- *
+- *      mov ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS), %eax
+- *
+- * will translate to:
+- *
+- *      8b 84 24 b8 c0 ff ff      mov    -0x3f48(%rsp), %eax
+- *
+- * which is below the current RSP by almost 16K.
+- */
+-#define ASM_THREAD_INFO(field, reg, off) ((field)+(off)-THREAD_SIZE)(reg)
++/* Load thread_info address into "reg" */
++#define GET_THREAD_INFO(reg) \
++      _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
+ #endif
+@@ -293,6 +271,13 @@ static inline bool in_ia32_syscall(void)
+ extern void arch_task_cache_init(void);
+ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+ extern void arch_release_task_struct(struct task_struct *tsk);
++
++#define __HAVE_THREAD_FUNCTIONS
++#define task_thread_info(task)        (&(task)->tinfo)
++#define task_stack_page(task) ((task)->stack)
++#define setup_thread_stack(p, org) do {} while (0)
++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
++
+ #endif        /* !__ASSEMBLY__ */
+ #endif /* _ASM_X86_THREAD_INFO_H */
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index dee8a70..270877a 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -89,7 +89,9 @@ static inline void cr4_set_bits(unsigned long mask)
+ {
+       unsigned long cr4;
++//    BUG_ON(!arch_irqs_disabled());
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
++      BUG_ON(cr4 != __read_cr4());
+       if ((cr4 | mask) != cr4) {
+               cr4 |= mask;
+               this_cpu_write(cpu_tlbstate.cr4, cr4);
+@@ -102,7 +104,9 @@ static inline void cr4_clear_bits(unsigned long mask)
+ {
+       unsigned long cr4;
++//    BUG_ON(!arch_irqs_disabled());
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
++      BUG_ON(cr4 != __read_cr4());
+       if ((cr4 & ~mask) != cr4) {
+               cr4 &= ~mask;
+               this_cpu_write(cpu_tlbstate.cr4, cr4);
+@@ -113,6 +117,7 @@ static inline void cr4_clear_bits(unsigned long mask)
+ /* Read the CR4 shadow. */
+ static inline unsigned long cr4_read_shadow(void)
+ {
++//    BUG_ON(!arch_irqs_disabled());
+       return this_cpu_read(cpu_tlbstate.cr4);
+ }
+@@ -135,6 +140,25 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
+ static inline void __native_flush_tlb(void)
+ {
++      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++              u64 descriptor[2];
++
++              descriptor[0] = PCID_KERNEL;
++              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
++              return;
++      }
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
++              unsigned int cpu = raw_get_cpu();
++
++              native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
++              native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
++              raw_put_cpu_no_resched();
++              return;
++      }
++#endif
++
+       /*
+        * If current->mm == NULL then we borrow a mm which may change during a
+        * task switch and therefore we must not be preempted while we write CR3
+@@ -147,13 +171,21 @@ static inline void __native_flush_tlb(void)
+ static inline void __native_flush_tlb_global_irq_disabled(void)
+ {
+-      unsigned long cr4;
++      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++              u64 descriptor[2];
+-      cr4 = this_cpu_read(cpu_tlbstate.cr4);
+-      /* clear PGE */
+-      native_write_cr4(cr4 & ~X86_CR4_PGE);
+-      /* write old PGE again and flush TLBs */
+-      native_write_cr4(cr4);
++              descriptor[0] = PCID_KERNEL;
++              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
++      } else {
++              unsigned long cr4;
++
++              cr4 = this_cpu_read(cpu_tlbstate.cr4);
++              BUG_ON(cr4 != __read_cr4());
++              /* clear PGE */
++              native_write_cr4(cr4 & ~X86_CR4_PGE);
++              /* write old PGE again and flush TLBs */
++              native_write_cr4(cr4);
++      }
+ }
+ static inline void __native_flush_tlb_global(void)
+@@ -183,6 +215,43 @@ static inline void __native_flush_tlb_global(void)
+ static inline void __native_flush_tlb_single(unsigned long addr)
+ {
++      if (static_cpu_has(X86_FEATURE_INVPCID)) {
++              u64 descriptor[2];
++
++              descriptor[0] = PCID_KERNEL;
++              descriptor[1] = addr;
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
++                      if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
++                              if (addr < TASK_SIZE_MAX)
++                                      descriptor[1] += pax_user_shadow_base;
++                              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
++                      }
++
++                      descriptor[0] = PCID_USER;
++                      descriptor[1] = addr;
++              }
++#endif
++
++              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
++              return;
++      }
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
++              unsigned int cpu = raw_get_cpu();
++
++              native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
++              asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++              native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++              raw_put_cpu_no_resched();
++
++              if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
++                      addr += pax_user_shadow_base;
++      }
++#endif
++
+       asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+ }
+diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
+index 9217ab1..90c91bf 100644
+--- a/arch/x86/include/asm/trace/fpu.h
++++ b/arch/x86/include/asm/trace/fpu.h
+@@ -25,8 +25,8 @@ DECLARE_EVENT_CLASS(x86_fpu,
+               __entry->fpstate_active = fpu->fpstate_active;
+               __entry->counter        = fpu->counter;
+               if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+-                      __entry->xfeatures = fpu->state.xsave.header.xfeatures;
+-                      __entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
++                      __entry->xfeatures = fpu->state->xsave.header.xfeatures;
++                      __entry->xcomp_bv  = fpu->state->xsave.header.xcomp_bv;
+               }
+       ),
+       TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
+index c3496619..9b914af 100644
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -10,7 +10,7 @@
+ #define dotraplinkage __visible
+ asmlinkage void divide_error(void);
+-asmlinkage void debug(void);
++asmlinkage void int1(void);
+ asmlinkage void nmi(void);
+ asmlinkage void int3(void);
+ asmlinkage void xen_debug(void);
+@@ -38,6 +38,15 @@ asmlinkage void machine_check(void);
+ #endif /* CONFIG_X86_MCE */
+ asmlinkage void simd_coprocessor_error(void);
++#ifdef CONFIG_PAX_REFCOUNT
++asmlinkage void refcount_error(void);
++#endif
++
++#ifdef CONFIG_PAX_RAP
++asmlinkage void rap_call_error(void);
++asmlinkage void rap_ret_error(void);
++#endif
++
+ #ifdef CONFIG_TRACING
+ asmlinkage void trace_page_fault(void);
+ #define trace_stack_segment stack_segment
+@@ -54,6 +63,7 @@ asmlinkage void trace_page_fault(void);
+ #define trace_alignment_check alignment_check
+ #define trace_simd_coprocessor_error simd_coprocessor_error
+ #define trace_async_page_fault async_page_fault
++#define trace_refcount_error refcount_error
+ #endif
+ dotraplinkage void do_divide_error(struct pt_regs *, long);
+@@ -107,7 +117,7 @@ extern int panic_on_unrecovered_nmi;
+ void math_emulate(struct math_emu_info *);
+ #ifndef CONFIG_X86_32
+-asmlinkage void smp_thermal_interrupt(void);
++asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
+ asmlinkage void smp_threshold_interrupt(void);
+ asmlinkage void smp_deferred_error_interrupt(void);
+ #endif
+@@ -139,6 +149,9 @@ enum {
+       X86_TRAP_AC,            /* 17, Alignment Check */
+       X86_TRAP_MC,            /* 18, Machine Check */
+       X86_TRAP_XF,            /* 19, SIMD Floating-Point Exception */
++      X86_TRAP_VE,            /* 20, Virtualization Exception */
++      X86_TRAP_CP,            /* 21, Control Protection Exception */
++      X86_TRAP_SX = 30,       /* 30, Security Exception */
+       X86_TRAP_IRET = 32,     /* 32, IRET Exception */
+ };
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 2131c4c..120dcaa 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -8,6 +8,7 @@
+ #include <linux/kasan-checks.h>
+ #include <linux/thread_info.h>
+ #include <linux/string.h>
++#include <linux/spinlock.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+ #include <asm/smap.h>
+@@ -29,12 +30,17 @@
+ #define USER_DS       MAKE_MM_SEG(TASK_SIZE_MAX)
+ #define get_ds()      (KERNEL_DS)
+-#define get_fs()      (current->thread.addr_limit)
+-#define set_fs(x)     (current->thread.addr_limit = (x))
++#define get_fs()      (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
++#define set_fs(x)     (current_thread_info()->addr_limit = (x))
++#endif
+ #define segment_eq(a, b)      ((a).seg == (b).seg)
+-#define user_addr_max() (current->thread.addr_limit.seg)
++#define user_addr_max() (current_thread_info()->addr_limit.seg)
+ #define __addr_ok(addr)       \
+       ((unsigned long __force)(addr) < user_addr_max())
+@@ -87,8 +93,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
+  * checks that the pointer is in the user space range - after calling
+  * this function, memory access functions may still return -EFAULT.
+  */
+-#define access_ok(type, addr, size) \
+-      likely(!__range_not_ok(addr, size, user_addr_max()))
++extern int _cond_resched(void);
++#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
++#define access_ok(type, addr, size)                                   \
++({                                                                    \
++      unsigned long __size = size;                                    \
++      unsigned long __addr = (unsigned long)addr;                     \
++      bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
++      if (__ret_ao && __size < 256 * PAGE_SIZE) {                     \
++              unsigned long __addr_ao = __addr & PAGE_MASK;           \
++              unsigned long __end_ao = __addr + __size - 1;           \
++              if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) {     \
++                      while (__addr_ao <= __end_ao) {                 \
++                              char __c_ao;                            \
++                              __addr_ao += PAGE_SIZE;                 \
++                              if (__size > PAGE_SIZE)                 \
++                                      _cond_resched();                \
++                              if (__get_user(__c_ao, (char __user *)__addr))  \
++                                      break;                          \
++                              if ((type) != VERIFY_WRITE) {           \
++                                      __addr = __addr_ao;             \
++                                      continue;                       \
++                              }                                       \
++                              if (__put_user(__c_ao, (char __user *)__addr))  \
++                                      break;                          \
++                              __addr = __addr_ao;                     \
++                      }                                               \
++              }                                                       \
++      }                                                               \
++      __ret_ao;                                                       \
++})
+ /*
+  * The exception table consists of triples of addresses relative to the
+@@ -142,15 +176,27 @@ extern int __get_user_4(void);
+ extern int __get_user_8(void);
+ extern int __get_user_bad(void);
+-#define __uaccess_begin() stac()
+-#define __uaccess_end()   clac()
++#define __uaccess_begin() pax_open_userland(); stac()
++#define __uaccess_end()   clac(); pax_close_userland()
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "gs;"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+ /*
+- * This is a type: either unsigned long, if the argument fits into
+- * that type, or otherwise unsigned long long.
++ * This is a type: either (un)signed int, if the argument fits into
++ * that type, or otherwise (un)signed long long.
+  */
+ #define __inttype(x) \
+-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
++__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U),              \
++      __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
++      __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
+ /**
+  * get_user: - Get a simple variable from user space.
+@@ -201,14 +247,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+       asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+                    : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+-
+-
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret)                      \
+       asm volatile("\n"                                               \
+-                   "1:        movl %%eax,0(%2)\n"                     \
+-                   "2:        movl %%edx,4(%2)\n"                     \
+-                   "3:"                                               \
++                   "1:        "__copyuser_seg"movl %%eax,0(%2)\n"     \
++                   "2:        "__copyuser_seg"movl %%edx,4(%2)\n"     \
++                   "3:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "4:        movl %3,%0\n"                           \
+                    "  jmp 3b\n"                                       \
+@@ -220,9 +264,9 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+ #define __put_user_asm_ex_u64(x, addr)                                        \
+       asm volatile("\n"                                               \
+-                   "1:        movl %%eax,0(%1)\n"                     \
+-                   "2:        movl %%edx,4(%1)\n"                     \
+-                   "3:"                                               \
++                   "1:        "__copyuser_seg"movl %%eax,0(%1)\n"     \
++                   "2:        "__copyuser_seg"movl %%edx,4(%1)\n"     \
++                   "3:\n"                                             \
+                    _ASM_EXTABLE_EX(1b, 2b)                            \
+                    _ASM_EXTABLE_EX(2b, 3b)                            \
+                    : : "A" (x), "r" (addr))
+@@ -269,10 +313,10 @@ extern void __put_user_8(void);
+ #define put_user(x, ptr)                                      \
+ ({                                                            \
+       int __ret_pu;                                           \
+-      __typeof__(*(ptr)) __pu_val;                            \
++      __inttype(*(ptr)) __pu_val;                             \
+       __chk_user_ptr(ptr);                                    \
+       might_fault();                                          \
+-      __pu_val = x;                                           \
++      __pu_val = (__inttype(*(ptr)))(x);                      \
+       switch (sizeof(*(ptr))) {                               \
+       case 1:                                                 \
+               __put_user_x(1, __pu_val, ptr, __ret_pu);       \
+@@ -345,10 +389,9 @@ do {                                                                      \
+ #define __get_user_asm_u64(x, ptr, retval, errret)                    \
+ ({                                                                    \
+       __typeof__(ptr) __ptr = (ptr);                                  \
+-      asm volatile(ASM_STAC "\n"                                      \
+-                   "1:        movl %2,%%eax\n"                        \
++      asm volatile("1:        movl %2,%%eax\n"                        \
+                    "2:        movl %3,%%edx\n"                        \
+-                   "3: " ASM_CLAC "\n"                                \
++                   "3:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "4:        mov %4,%0\n"                            \
+                    "  xorl %%eax,%%eax\n"                             \
+@@ -376,10 +419,10 @@ do {                                                                     \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+-              __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
++              __get_user_asm(x, ptr, retval, "zbl", "k", "=r", errret);\
+               break;                                                  \
+       case 2:                                                         \
+-              __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
++              __get_user_asm(x, ptr, retval, "zwl", "k", "=r", errret);\
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
+@@ -393,17 +436,19 @@ do {                                                                     \
+ } while (0)
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
++do {                                                                  \
+       asm volatile("\n"                                               \
+-                   "1:        mov"itype" %2,%"rtype"1\n"              \
++                   "1:        "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:        mov %3,%0\n"                            \
+-                   "  xor"itype" %"rtype"1,%"rtype"1\n"               \
++                   "  xorl %k1,%k1\n"                                 \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+-                   : "=r" (err), ltype(x)                             \
+-                   : "m" (__m(addr)), "i" (errret), "0" (err))
++                   : "=r" (err), ltype (x)                            \
++                   : "m" (__m(addr)), "i" (errret), "0" (err));       \
++} while (0)
+ /*
+  * This doesn't do __uaccess_begin/end - the exception handling
+@@ -414,10 +459,10 @@ do {                                                                     \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+-              __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
++              __get_user_asm_ex(x, ptr, "zbl", "k", "=r");            \
+               break;                                                  \
+       case 2:                                                         \
+-              __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
++              __get_user_asm_ex(x, ptr, "zwl", "k", "=r");            \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
+@@ -431,10 +476,10 @@ do {                                                                     \
+ } while (0)
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                       \
+-      asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
++      asm volatile("1:        "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+-                     "3:xor"itype" %"rtype"0,%"rtype"0\n"             \
++                   "3:xorl %k0,%k0\n"                                 \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE_EX(1b, 3b)                            \
+@@ -456,13 +501,24 @@ do {                                                                     \
+       __uaccess_begin();                                              \
+       __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
+       __uaccess_end();                                                \
+-      (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
++      (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       __builtin_expect(__gu_err, 0);                                  \
+ })
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x)                                      \
++({                                                    \
++      unsigned long ____x = (unsigned long)(x);       \
++      if (____x < pax_user_shadow_base)               \
++              ____x += pax_user_shadow_base;          \
++      (typeof(x))____x;                               \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+ /*
+  * Tell gcc we read from memory instead of writing: this is because
+@@ -470,8 +526,9 @@ struct __large_struct { unsigned long buf[100]; };
+  * aliasing issues.
+  */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
++do {                                                                  \
+       asm volatile("\n"                                               \
+-                   "1:        mov"itype" %"rtype"1,%2\n"              \
++                   "1:        "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:        mov %3,%0\n"                            \
+@@ -479,10 +536,11 @@ struct __large_struct { unsigned long buf[100]; };
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : "=r"(err)                                        \
+-                   : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++                   : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
++} while (0)
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                       \
+-      asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
++      asm volatile("1:        "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
+                    "2:\n"                                             \
+                    _ASM_EXTABLE_EX(1b, 2b)                            \
+                    : : ltype(x), "m" (__m(addr)))
+@@ -522,8 +580,12 @@ struct __large_struct { unsigned long buf[100]; };
+  * On error, the variable @x is set to zero.
+  */
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr)    get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr)                                            \
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+ /**
+  * __put_user: - Write a simple value into user space, with less checking.
+@@ -546,8 +608,12 @@ struct __large_struct { unsigned long buf[100]; };
+  * Returns zero on success, or -EFAULT on error.
+  */
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr)    put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr)                                            \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -565,7 +631,7 @@ struct __large_struct { unsigned long buf[100]; };
+ #define get_user_ex(x, ptr)   do {                                    \
+       unsigned long __gue_val;                                        \
+       __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
+-      (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
++      (x) = (__typeof__(*(ptr)))__gue_val;                            \
+ } while (0)
+ #define put_user_try          uaccess_try
+@@ -583,7 +649,7 @@ extern __must_check long strlen_user(const char __user *str);
+ extern __must_check long strnlen_user(const char __user *str, long n);
+ unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
++unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
+ extern void __cmpxchg_wrong_size(void)
+       __compiletime_error("Bad argument size for cmpxchg");
+@@ -591,22 +657,22 @@ extern void __cmpxchg_wrong_size(void)
+ #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)     \
+ ({                                                                    \
+       int __ret = 0;                                                  \
+-      __typeof__(ptr) __uval = (uval);                                \
+-      __typeof__(*(ptr)) __old = (old);                               \
+-      __typeof__(*(ptr)) __new = (new);                               \
++      __typeof__(uval) __uval = (uval);                               \
++      __typeof__(*(uval)) __old = (old);                              \
++      __typeof__(*(uval)) __new = (new);                              \
+       __uaccess_begin();                                              \
+       switch (size) {                                                 \
+       case 1:                                                         \
+       {                                                               \
+               asm volatile("\n"                                       \
+-                      "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
++                      "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
+                       "2:\n"                                          \
+                       "\t.section .fixup, \"ax\"\n"                   \
+                       "3:\tmov     %3, %0\n"                          \
+                       "\tjmp     2b\n"                                \
+                       "\t.previous\n"                                 \
+                       _ASM_EXTABLE(1b, 3b)                            \
+-                      : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
++                      : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
+                       : "i" (-EFAULT), "q" (__new), "1" (__old)       \
+                       : "memory"                                      \
+               );                                                      \
+@@ -615,14 +681,14 @@ extern void __cmpxchg_wrong_size(void)
+       case 2:                                                         \
+       {                                                               \
+               asm volatile("\n"                                       \
+-                      "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
++                      "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
+                       "2:\n"                                          \
+                       "\t.section .fixup, \"ax\"\n"                   \
+                       "3:\tmov     %3, %0\n"                          \
+                       "\tjmp     2b\n"                                \
+                       "\t.previous\n"                                 \
+                       _ASM_EXTABLE(1b, 3b)                            \
+-                      : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
++                      : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
+                       : "i" (-EFAULT), "r" (__new), "1" (__old)       \
+                       : "memory"                                      \
+               );                                                      \
+@@ -631,14 +697,14 @@ extern void __cmpxchg_wrong_size(void)
+       case 4:                                                         \
+       {                                                               \
+               asm volatile("\n"                                       \
+-                      "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
++                      "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
+                       "2:\n"                                          \
+                       "\t.section .fixup, \"ax\"\n"                   \
+                       "3:\tmov     %3, %0\n"                          \
+                       "\tjmp     2b\n"                                \
+                       "\t.previous\n"                                 \
+                       _ASM_EXTABLE(1b, 3b)                            \
+-                      : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
++                      : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
+                       : "i" (-EFAULT), "r" (__new), "1" (__old)       \
+                       : "memory"                                      \
+               );                                                      \
+@@ -650,14 +716,14 @@ extern void __cmpxchg_wrong_size(void)
+                       __cmpxchg_wrong_size();                         \
+                                                                       \
+               asm volatile("\n"                                       \
+-                      "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
++                      "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
+                       "2:\n"                                          \
+                       "\t.section .fixup, \"ax\"\n"                   \
+                       "3:\tmov     %3, %0\n"                          \
+                       "\tjmp     2b\n"                                \
+                       "\t.previous\n"                                 \
+                       _ASM_EXTABLE(1b, 3b)                            \
+-                      : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
++                      : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
+                       : "i" (-EFAULT), "r" (__new), "1" (__old)       \
+                       : "memory"                                      \
+               );                                                      \
+@@ -690,17 +756,6 @@ extern struct movsl_mask {
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+-#ifdef CONFIG_X86_32
+-# include <asm/uaccess_32.h>
+-#else
+-# include <asm/uaccess_64.h>
+-#endif
+-
+-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
+-                                         unsigned n);
+-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
+-                                       unsigned n);
+-
+ extern void __compiletime_error("usercopy buffer size is too small")
+ __bad_copy_user(void);
+@@ -709,22 +764,30 @@ static inline void copy_user_overflow(int size, unsigned long count)
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+ }
++#ifdef CONFIG_X86_32
++# include <asm/uaccess_32.h>
++#else
++# include <asm/uaccess_64.h>
++#endif
++
+ static __always_inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-      int sz = __compiletime_object_size(to);
++      size_t sz = __compiletime_object_size(to);
+       might_fault();
+       kasan_check_write(to, n);
+-      if (likely(sz < 0 || sz >= n)) {
+-              check_object_size(to, n, false);
+-              n = _copy_from_user(to, from, n);
+-      } else if (!__builtin_constant_p(n))
+-              copy_user_overflow(sz, n);
+-      else
+-              __bad_copy_user();
++      if (unlikely(sz != (size_t)-1  && sz < n)) {
++              if (!__builtin_constant_p(n))
++                      copy_user_overflow(sz, n);
++              else
++                      __bad_copy_user();
++      } else if (access_ok(VERIFY_READ, from, n))
++              n = __copy_from_user(to, from, n);
++      else if ((long)n > 0)
++              memset(to, 0, n);
+       return n;
+ }
+@@ -732,19 +795,19 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ static __always_inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+-      int sz = __compiletime_object_size(from);
++      size_t sz = __compiletime_object_size(from);
+       kasan_check_read(from, n);
+       might_fault();
+-      if (likely(sz < 0 || sz >= n)) {
+-              check_object_size(from, n, true);
+-              n = _copy_to_user(to, from, n);
+-      } else if (!__builtin_constant_p(n))
+-              copy_user_overflow(sz, n);
+-      else
+-              __bad_copy_user();
++      if (unlikely(sz != (size_t)-1  && sz < n)) {
++              if (!__builtin_constant_p(n))
++                      copy_user_overflow(sz, n);
++              else
++                      __bad_copy_user();
++      } else if (access_ok(VERIFY_WRITE, to, n))
++              n = __copy_to_user(to, from, n);
+       return n;
+ }
+diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
+index 7d3bdd1..67d81f6 100644
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -34,9 +34,12 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+  * The caller should also make sure he pins the user space address
+  * so that we don't result in page fault and sleep.
+  */
+-static __always_inline unsigned long __must_check
++static __always_inline __size_overflow(3) unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
+       check_object_size(from, n, true);
+       return __copy_to_user_ll(to, from, n);
+ }
+@@ -60,12 +63,17 @@ static __always_inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+       might_fault();
++
+       return __copy_to_user_inatomic(to, from, n);
+ }
+-static __always_inline unsigned long
++static __always_inline __size_overflow(3) unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++      if ((long)n < 0)
++              return n;
++
++      check_object_size(to, n, false);
+       return __copy_from_user_ll_nozero(to, from, n);
+ }
+@@ -96,6 +104,10 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+       might_fault();
++
++      if ((long)n < 0)
++              return n;
++
+       check_object_size(to, n, false);
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+@@ -125,6 +137,11 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+                               const void __user *from, unsigned long n)
+ {
+       might_fault();
++
++      if ((long)n < 0)
++              return n;
++
++      check_object_size(to, n, false);
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+@@ -153,7 +170,11 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+                                 unsigned long n)
+ {
+-       return __copy_from_user_ll_nocache_nozero(to, from, n);
++      if ((long)n < 0)
++              return n;
++
++      check_object_size(to, n, false);
++      return __copy_from_user_ll_nocache_nozero(to, from, n);
+ }
+ #endif /* _ASM_X86_UACCESS_32_H */
+diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
+index 673059a..286a5bf 100644
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -11,6 +11,7 @@
+ #include <asm/alternative.h>
+ #include <asm/cpufeatures.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
+ /*
+  * Copy To/From Userspace
+@@ -24,8 +25,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
+ __must_check unsigned long
+ copy_user_generic_unrolled(void *to, const void *from, unsigned len);
+-static __always_inline __must_check unsigned long
+-copy_user_generic(void *to, const void *from, unsigned len)
++static __always_inline __must_check __size_overflow(3) unsigned long
++copy_user_generic(void *to, const void *from, unsigned long len)
+ {
+       unsigned ret;
+@@ -47,68 +48,86 @@ copy_user_generic(void *to, const void *from, unsigned len)
+ }
+ __must_check unsigned long
+-copy_in_user(void __user *to, const void __user *from, unsigned len);
++copy_in_user(void __user *to, const void __user *from, unsigned long len);
+ static __always_inline __must_check
+-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
+ {
+-      int ret = 0;
++      size_t sz = __compiletime_object_size(dst);
++      unsigned ret = 0;
++
++      if (size > INT_MAX)
++              return size;
+       check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!access_ok_noprefault(VERIFY_READ, src, size))
++              return size;
++#endif
++
++      if (unlikely(sz != (size_t)-1 && sz < size)) {
++               if(__builtin_constant_p(size))
++                      __bad_copy_user();
++              else
++                      copy_user_overflow(sz, size);
++              return size;
++      }
++
+       if (!__builtin_constant_p(size))
+-              return copy_user_generic(dst, (__force void *)src, size);
++              return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+       switch (size) {
+       case 1:
+               __uaccess_begin();
+-              __get_user_asm(*(u8 *)dst, (u8 __user *)src,
++              __get_user_asm(*(u8 *)dst, (const u8 __user *)src,
+                             ret, "b", "b", "=q", 1);
+               __uaccess_end();
+               return ret;
+       case 2:
+               __uaccess_begin();
+-              __get_user_asm(*(u16 *)dst, (u16 __user *)src,
++              __get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+                             ret, "w", "w", "=r", 2);
+               __uaccess_end();
+               return ret;
+       case 4:
+               __uaccess_begin();
+-              __get_user_asm(*(u32 *)dst, (u32 __user *)src,
++              __get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+                             ret, "l", "k", "=r", 4);
+               __uaccess_end();
+               return ret;
+       case 8:
+               __uaccess_begin();
+-              __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++              __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+                             ret, "q", "", "=r", 8);
+               __uaccess_end();
+               return ret;
+       case 10:
+               __uaccess_begin();
+-              __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++              __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+                              ret, "q", "", "=r", 10);
+               if (likely(!ret))
+                       __get_user_asm(*(u16 *)(8 + (char *)dst),
+-                                     (u16 __user *)(8 + (char __user *)src),
++                                     (const u16 __user *)(8 + (const char __user *)src),
+                                      ret, "w", "w", "=r", 2);
+               __uaccess_end();
+               return ret;
+       case 16:
+               __uaccess_begin();
+-              __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++              __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+                              ret, "q", "", "=r", 16);
+               if (likely(!ret))
+                       __get_user_asm(*(u64 *)(8 + (char *)dst),
+-                                     (u64 __user *)(8 + (char __user *)src),
++                                     (const u64 __user *)(8 + (const char __user *)src),
+                                      ret, "q", "", "=r", 8);
+               __uaccess_end();
+               return ret;
+       default:
+-              return copy_user_generic(dst, (__force void *)src, size);
++              return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+       }
+ }
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
+ {
+       might_fault();
+       kasan_check_write(dst, size);
+@@ -116,67 +135,85 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
+ }
+ static __always_inline __must_check
+-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
+ {
+-      int ret = 0;
++      size_t sz = __compiletime_object_size(src);
++      unsigned ret = 0;
++
++      if (size > INT_MAX)
++              return size;
+       check_object_size(src, size, true);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
++              return size;
++#endif
++
++      if (unlikely(sz != (size_t)-1 && sz < size)) {
++               if(__builtin_constant_p(size))
++                      __bad_copy_user();
++              else
++                      copy_user_overflow(sz, size);
++              return size;
++      }
++
+       if (!__builtin_constant_p(size))
+-              return copy_user_generic((__force void *)dst, src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+       switch (size) {
+       case 1:
+               __uaccess_begin();
+-              __put_user_asm(*(u8 *)src, (u8 __user *)dst,
++              __put_user_asm(*(const u8 *)src, (u8 __user *)dst,
+                             ret, "b", "b", "iq", 1);
+               __uaccess_end();
+               return ret;
+       case 2:
+               __uaccess_begin();
+-              __put_user_asm(*(u16 *)src, (u16 __user *)dst,
++              __put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+                             ret, "w", "w", "ir", 2);
+               __uaccess_end();
+               return ret;
+       case 4:
+               __uaccess_begin();
+-              __put_user_asm(*(u32 *)src, (u32 __user *)dst,
++              __put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+                             ret, "l", "k", "ir", 4);
+               __uaccess_end();
+               return ret;
+       case 8:
+               __uaccess_begin();
+-              __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++              __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+                             ret, "q", "", "er", 8);
+               __uaccess_end();
+               return ret;
+       case 10:
+               __uaccess_begin();
+-              __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++              __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+                              ret, "q", "", "er", 10);
+               if (likely(!ret)) {
+                       asm("":::"memory");
+-                      __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++                      __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+                                      ret, "w", "w", "ir", 2);
+               }
+               __uaccess_end();
+               return ret;
+       case 16:
+               __uaccess_begin();
+-              __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++              __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+                              ret, "q", "", "er", 16);
+               if (likely(!ret)) {
+                       asm("":::"memory");
+-                      __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++                      __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
+                                      ret, "q", "", "er", 8);
+               }
+               __uaccess_end();
+               return ret;
+       default:
+-              return copy_user_generic((__force void *)dst, src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+       }
+ }
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+       might_fault();
+       kasan_check_read(src, size);
+@@ -184,19 +221,30 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
+ }
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ {
+-      int ret = 0;
++      unsigned ret = 0;
+       might_fault();
++
++      if (size > INT_MAX)
++              return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!access_ok_noprefault(VERIFY_READ, src, size))
++              return size;
++      if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
++              return size;
++#endif
++
+       if (!__builtin_constant_p(size))
+-              return copy_user_generic((__force void *)dst,
+-                                       (__force void *)src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst),
++                                       (__force_kernel const void *)____m(src), size);
+       switch (size) {
+       case 1: {
+               u8 tmp;
+               __uaccess_begin();
+-              __get_user_asm(tmp, (u8 __user *)src,
++              __get_user_asm(tmp, (const u8 __user *)src,
+                              ret, "b", "b", "=q", 1);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u8 __user *)dst,
+@@ -207,7 +255,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+       case 2: {
+               u16 tmp;
+               __uaccess_begin();
+-              __get_user_asm(tmp, (u16 __user *)src,
++              __get_user_asm(tmp, (const u16 __user *)src,
+                              ret, "w", "w", "=r", 2);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u16 __user *)dst,
+@@ -219,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+       case 4: {
+               u32 tmp;
+               __uaccess_begin();
+-              __get_user_asm(tmp, (u32 __user *)src,
++              __get_user_asm(tmp, (const u32 __user *)src,
+                              ret, "l", "k", "=r", 4);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u32 __user *)dst,
+@@ -230,7 +278,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+       case 8: {
+               u64 tmp;
+               __uaccess_begin();
+-              __get_user_asm(tmp, (u64 __user *)src,
++              __get_user_asm(tmp, (const u64 __user *)src,
+                              ret, "q", "", "=r", 8);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u64 __user *)dst,
+@@ -239,45 +287,67 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+               return ret;
+       }
+       default:
+-              return copy_user_generic((__force void *)dst,
+-                                       (__force void *)src, size);
++              return copy_user_generic((__force_kernel void *)____m(dst),
++                                       (__force_kernel const void *)____m(src), size);
+       }
+ }
+-static __must_check __always_inline int
+-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
++static __must_check __always_inline unsigned long
++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
+ {
+       kasan_check_write(dst, size);
+       return __copy_from_user_nocheck(dst, src, size);
+ }
+-static __must_check __always_inline int
+-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
++static __must_check __always_inline unsigned long
++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
+ {
+       kasan_check_read(src, size);
+       return __copy_to_user_nocheck(dst, src, size);
+ }
+-extern long __copy_user_nocache(void *dst, const void __user *src,
+-                              unsigned size, int zerorest);
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
++                              unsigned long size, int zerorest) __size_overflow(3);
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long
++__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
+ {
+       might_fault();
+       kasan_check_write(dst, size);
++
++      if (size > INT_MAX)
++              return size;
++
++      check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!access_ok_noprefault(VERIFY_READ, src, size))
++              return size;
++#endif
++
+       return __copy_user_nocache(dst, src, size, 1);
+ }
+-static inline int
++static inline unsigned long
+ __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+-                                unsigned size)
++                                unsigned long size)
+ {
+       kasan_check_write(dst, size);
++
++      if (size > INT_MAX)
++              return size;
++
++      check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!access_ok_noprefault(VERIFY_READ, src, size))
++              return size;
++#endif
++
+       return __copy_user_nocache(dst, src, size, 0);
+ }
+ unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len);
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
+index 5b238981..77fdd78 100644
+--- a/arch/x86/include/asm/word-at-a-time.h
++++ b/arch/x86/include/asm/word-at-a-time.h
+@@ -11,7 +11,7 @@
+  * and shift, for example.
+  */
+ struct word_at_a_time {
+-      const unsigned long one_bits, high_bits;
++      unsigned long one_bits, high_bits;
+ };
+ #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index 6ba7931..dc843cd 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -126,7 +126,7 @@ struct x86_init_ops {
+       struct x86_init_timers          timers;
+       struct x86_init_iommu           iommu;
+       struct x86_init_pci             pci;
+-};
++} __no_const;
+ /**
+  * struct x86_cpuinit_ops - platform specific cpu hotplug setups
+@@ -137,7 +137,7 @@ struct x86_cpuinit_ops {
+       void (*setup_percpu_clockev)(void);
+       void (*early_percpu_clock_init)(void);
+       void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
+-};
++} __no_const;
+ struct timespec;
+@@ -225,12 +225,12 @@ struct x86_msi_ops {
+       void (*teardown_msi_irq)(unsigned int irq);
+       void (*teardown_msi_irqs)(struct pci_dev *dev);
+       void (*restore_msi_irqs)(struct pci_dev *dev);
+-};
++} __no_const;
+ struct x86_io_apic_ops {
+       unsigned int    (*read)   (unsigned int apic, unsigned int reg);
+       void            (*disable)(void);
+-};
++} __no_const;
+ extern struct x86_init_ops x86_init;
+ extern struct x86_cpuinit_ops x86_cpuinit;
+diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
+index f5fb840..e45184e 100644
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+  * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
+  *   cases needing an extended handling.
+  */
+-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
++static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
+ {
+       unsigned long mfn;
+diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
+index 9dafe59..0293c1d 100644
+--- a/arch/x86/include/uapi/asm/e820.h
++++ b/arch/x86/include/uapi/asm/e820.h
+@@ -69,7 +69,7 @@ struct e820map {
+ #define ISA_START_ADDRESS     0xa0000
+ #define ISA_END_ADDRESS               0x100000
+-#define BIOS_BEGIN            0x000a0000
++#define BIOS_BEGIN            0x000c0000
+ #define BIOS_END              0x00100000
+ #define BIOS_ROM_BASE         0xffe00000
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index 0503f5b..f00b6e8 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -46,7 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL)     += ldt.o
+ obj-y                 += setup.o x86_init.o i8259.o irqinit.o jump_label.o
+ obj-$(CONFIG_IRQ_WORK)  += irq_work.o
+ obj-y                 += probe_roms.o
+-obj-$(CONFIG_X86_32)  += i386_ksyms_32.o
++obj-$(CONFIG_X86_32)  += sys_i386_32.o i386_ksyms_32.o
+ obj-$(CONFIG_X86_64)  += sys_x86_64.o x8664_ksyms_64.o
+ obj-$(CONFIG_X86_64)  += mcount_64.o
+ obj-$(CONFIG_X86_ESPFIX64)    += espfix_64.o
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index fbd1944..7d27c3c 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -1357,7 +1357,7 @@ static void __init acpi_reduced_hw_init(void)
+  * If your system is blacklisted here, but you find that acpi=force
+  * works for you, please contact linux-acpi@vger.kernel.org
+  */
+-static struct dmi_system_id __initdata acpi_dmi_table[] = {
++static const struct dmi_system_id __initconst acpi_dmi_table[] = {
+       /*
+        * Boxes that need ACPI disabled
+        */
+@@ -1432,7 +1432,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
+ };
+ /* second table for DMI checks that should run after early-quirks */
+-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
++static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
+       /*
+        * HP laptops which use a DSDT reporting as HP/SB400/10000,
+        * which includes some code which overrides all temperature
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index adb3eaf..0eb666c 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -100,8 +100,12 @@ int x86_acpi_suspend_lowlevel(void)
+ #else /* CONFIG_64BIT */
+ #ifdef CONFIG_SMP
+       stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
++
++      pax_open_kernel();
+       early_gdt_descr.address =
+                       (unsigned long)get_cpu_gdt_table(smp_processor_id());
++      pax_close_kernel();
++
+       initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+       initial_code = (unsigned long)wakeup_long64;
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index 0c26b1b..9120e26 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -2,6 +2,7 @@
+ #include <linux/linkage.h>
+ #include <asm/segment.h>
+ #include <asm/page_types.h>
++#include <asm/smap.h>
+ # Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
+@@ -31,13 +32,11 @@ wakeup_pmode_return:
+       # and restore the stack ... but you need gdt for this to work
+       movl    saved_context_esp, %esp
+-      movl    %cs:saved_magic, %eax
+-      cmpl    $0x12345678, %eax
++      cmpl    $0x12345678, saved_magic
+       jne     bogus_magic
+       # jump to place where we left off
+-      movl    saved_eip, %eax
+-      jmp     *%eax
++      jmp     *(saved_eip)
+ bogus_magic:
+       jmp     bogus_magic
+@@ -69,6 +68,7 @@ restore_registers:
+       movl    saved_context_edi, %edi
+       pushl   saved_context_eflags
+       popfl
++      ASM_CLAC
+       ret
+ ENTRY(do_suspend_lowlevel)
+diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
+index 169963f..d5caf11 100644
+--- a/arch/x86/kernel/acpi/wakeup_64.S
++++ b/arch/x86/kernel/acpi/wakeup_64.S
+@@ -6,6 +6,7 @@
+ #include <asm/msr.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/frame.h>
++#include <asm/smap.h>
+ # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
+@@ -93,6 +94,7 @@ ENTRY(do_suspend_lowlevel)
+       movq    %rbx, %cr0
+       pushq   pt_regs_flags(%rax)
+       popfq
++      ASM_CLAC
+       movq    pt_regs_sp(%rax), %rsp
+       movq    pt_regs_bp(%rax), %rbp
+       movq    pt_regs_si(%rax), %rsi
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 5cb272a..cddd2e9 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -21,6 +21,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/io.h>
+ #include <asm/fixmap.h>
++#include <asm/boot.h>
+ int __read_mostly alternatives_patched;
+@@ -262,7 +263,9 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
+               unsigned int noplen = len;
+               if (noplen > ASM_NOP_MAX)
+                       noplen = ASM_NOP_MAX;
++              pax_open_kernel();
+               memcpy(insns, ideal_nops[noplen], noplen);
++              pax_close_kernel();
+               insns += noplen;
+               len -= noplen;
+       }
+@@ -290,6 +293,13 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
+       if (a->replacementlen != 5)
+               return;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (orig_insn < (u8 *)_text || (u8 *)_einittext <= orig_insn)
++              orig_insn = (u8 *)ktva_ktla((unsigned long)orig_insn);
++      else
++              orig_insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+       o_dspl = *(s32 *)(insnbuf + 1);
+       /* next_rip of the replacement JMP */
+@@ -365,6 +375,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+ {
+       struct alt_instr *a;
+       u8 *instr, *replacement;
++      u8 *vinstr, *vreplacement;
+       u8 insnbuf[MAX_PATCH_LEN];
+       DPRINTK("alt table %p -> %p", start, end);
+@@ -380,46 +391,71 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+       for (a = start; a < end; a++) {
+               int insnbuf_sz = 0;
+-              instr = (u8 *)&a->instr_offset + a->instr_offset;
+-              replacement = (u8 *)&a->repl_offset + a->repl_offset;
++              vinstr = instr = (u8 *)&a->instr_offset + a->instr_offset;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= instr &&
++                  instr < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) {
++                      instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++                      vinstr = (u8 *)ktla_ktva((unsigned long)instr);
++              } else if ((u8 *)_text <= instr && instr < (u8 *)_einittext) {
++                      vinstr = (u8 *)ktla_ktva((unsigned long)instr);
++              } else {
++                      instr = (u8 *)ktva_ktla((unsigned long)instr);
++              }
++#endif
++
++              vreplacement = replacement = (u8 *)&a->repl_offset + a->repl_offset;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= replacement &&
++                  replacement < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) {
++                      replacement += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++                      vreplacement = (u8 *)ktla_ktva((unsigned long)replacement);
++              } else if ((u8 *)_text <= replacement && replacement < (u8 *)_einittext) {
++                      vreplacement = (u8 *)ktla_ktva((unsigned long)replacement);
++              } else
++                      replacement = (u8 *)ktva_ktla((unsigned long)replacement);
++#endif
++
+               BUG_ON(a->instrlen > sizeof(insnbuf));
+               BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
+               if (!boot_cpu_has(a->cpuid)) {
+                       if (a->padlen > 1)
+-                              optimize_nops(a, instr);
++                              optimize_nops(a, vinstr);
+                       continue;
+               }
+-              DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
++              DPRINTK("feat: %d*32+%d, old: (%p/%p, len: %d), repl: (%p, len: %d), pad: %d",
+                       a->cpuid >> 5,
+                       a->cpuid & 0x1f,
+-                      instr, a->instrlen,
+-                      replacement, a->replacementlen, a->padlen);
++                      instr, vinstr, a->instrlen,
++                      vreplacement, a->replacementlen, a->padlen);
+-              DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
+-              DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
++              DUMP_BYTES(vinstr, a->instrlen, "%p: old_insn: ", vinstr);
++              DUMP_BYTES(vreplacement, a->replacementlen, "%p: rpl_insn: ", vreplacement);
+-              memcpy(insnbuf, replacement, a->replacementlen);
++              memcpy(insnbuf, vreplacement, a->replacementlen);
+               insnbuf_sz = a->replacementlen;
+               /* 0xe8 is a relative jump; fix the offset. */
+               if (*insnbuf == 0xe8 && a->replacementlen == 5) {
+-                      *(s32 *)(insnbuf + 1) += replacement - instr;
++                      *(s32 *)(insnbuf + 1) += vreplacement - instr;
+                       DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
+                               *(s32 *)(insnbuf + 1),
+-                              (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
++                              (unsigned long)vinstr + *(s32 *)(insnbuf + 1) + 5);
+               }
+-              if (a->replacementlen && is_jmp(replacement[0]))
+-                      recompute_jump(a, instr, replacement, insnbuf);
++              if (a->replacementlen && is_jmp(vreplacement[0]))
++                      recompute_jump(a, instr, vreplacement, insnbuf);
+               if (a->instrlen > a->replacementlen) {
+                       add_nops(insnbuf + a->replacementlen,
+                                a->instrlen - a->replacementlen);
+                       insnbuf_sz += a->instrlen - a->replacementlen;
+               }
+-              DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
++              DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", vinstr);
+               text_poke_early(instr, insnbuf, insnbuf_sz);
+       }
+@@ -435,10 +471,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
+       for (poff = start; poff < end; poff++) {
+               u8 *ptr = (u8 *)poff + *poff;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++              if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++                      ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+               if (!*poff || ptr < text || ptr >= text_end)
+                       continue;
+               /* turn DS segment override prefix into lock prefix */
+-              if (*ptr == 0x3e)
++              if (*(u8 *)ktla_ktva((unsigned long)ptr) == 0x3e)
+                       text_poke(ptr, ((unsigned char []){0xf0}), 1);
+       }
+       mutex_unlock(&text_mutex);
+@@ -453,10 +495,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+       for (poff = start; poff < end; poff++) {
+               u8 *ptr = (u8 *)poff + *poff;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++              if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++                      ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+               if (!*poff || ptr < text || ptr >= text_end)
+                       continue;
+               /* turn lock prefix into DS segment override prefix */
+-              if (*ptr == 0xf0)
++              if (*(u8 *)ktla_ktva((unsigned long)ptr) == 0xf0)
+                       text_poke(ptr, ((unsigned char []){0x3E}), 1);
+       }
+       mutex_unlock(&text_mutex);
+@@ -593,7 +641,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+               BUG_ON(p->len > MAX_PATCH_LEN);
+               /* prep the buffer with the original instructions */
+-              memcpy(insnbuf, p->instr, p->len);
++              memcpy(insnbuf, (const void *)ktla_ktva((unsigned long)p->instr), p->len);
+               used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+                                        (unsigned long)p->instr, p->len);
+@@ -640,7 +688,7 @@ void __init alternative_instructions(void)
+       if (!uniproc_patched || num_possible_cpus() == 1)
+               free_init_pages("SMP alternatives",
+                               (unsigned long)__smp_locks,
+-                              (unsigned long)__smp_locks_end);
++                              PAGE_ALIGN((unsigned long)__smp_locks_end));
+ #endif
+       apply_paravirt(__parainstructions, __parainstructions_end);
+@@ -661,13 +709,17 @@ void __init alternative_instructions(void)
+  * instructions. And on the local CPU you need to be protected again NMI or MCE
+  * handlers seeing an inconsistent instruction while you patch.
+  */
+-void *__init_or_module text_poke_early(void *addr, const void *opcode,
++void *__kprobes text_poke_early(void *addr, const void *opcode,
+                                             size_t len)
+ {
+       unsigned long flags;
+       local_irq_save(flags);
+-      memcpy(addr, opcode, len);
++
++      pax_open_kernel();
++      memcpy((void *)ktla_ktva((unsigned long)addr), opcode, len);
+       sync_core();
++      pax_close_kernel();
++
+       local_irq_restore(flags);
+       /* Could also do a CLFLUSH here to speed up CPU recovery; but
+          that causes hangs on some VIA CPUs. */
+@@ -689,20 +741,29 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+  */
+ void *text_poke(void *addr, const void *opcode, size_t len)
+ {
+-      unsigned long flags;
+-      char *vaddr;
++      unsigned char *vaddr = (void *)ktla_ktva((unsigned long)addr);
+       struct page *pages[2];
+-      int i;
++      size_t i;
++
++#ifndef CONFIG_PAX_KERNEXEC
++      unsigned long flags;
++#endif
+       if (!core_kernel_text((unsigned long)addr)) {
+-              pages[0] = vmalloc_to_page(addr);
+-              pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++              pages[0] = vmalloc_to_page(vaddr);
++              pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+       } else {
+-              pages[0] = virt_to_page(addr);
++              pages[0] = virt_to_page(vaddr);
+               WARN_ON(!PageReserved(pages[0]));
+-              pages[1] = virt_to_page(addr + PAGE_SIZE);
++              pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+       }
+       BUG_ON(!pages[0]);
++
++#ifdef CONFIG_PAX_KERNEXEC
++      text_poke_early(addr, opcode, len);
++      for (i = 0; i < len; i++)
++              BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
++#else
+       local_irq_save(flags);
+       set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+       if (pages[1])
+@@ -719,6 +780,7 @@ void *text_poke(void *addr, const void *opcode, size_t len)
+       for (i = 0; i < len; i++)
+               BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+       local_irq_restore(flags);
++#endif
+       return addr;
+ }
+@@ -772,7 +834,7 @@ int poke_int3_handler(struct pt_regs *regs)
+  */
+ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
+ {
+-      unsigned char int3 = 0xcc;
++      const unsigned char int3 = 0xcc;
+       bp_int3_handler = handler;
+       bp_int3_addr = (u8 *)addr + sizeof(int3);
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 076c315..88957c6 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -181,7 +181,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
+ /*
+  * Debug level, exported for io_apic.c
+  */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+ int pic_mode;
+@@ -1905,7 +1905,7 @@ static void __smp_error_interrupt(struct pt_regs *regs)
+               apic_write(APIC_ESR, 0);
+       v = apic_read(APIC_ESR);
+       ack_APIC_irq();
+-      atomic_inc(&irq_err_count);
++      atomic_inc_unchecked(&irq_err_count);
+       apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
+                   smp_processor_id(), v);
+diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
+index 5b2ae10..b3551c0 100644
+--- a/arch/x86/kernel/apic/apic_flat_64.c
++++ b/arch/x86/kernel/apic/apic_flat_64.c
+@@ -25,7 +25,7 @@
+ static struct apic apic_physflat;
+ static struct apic apic_flat;
+-struct apic __read_mostly *apic = &apic_flat;
++struct apic *apic __read_only = &apic_flat;
+ EXPORT_SYMBOL_GPL(apic);
+ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+@@ -154,7 +154,7 @@ static int flat_probe(void)
+       return 1;
+ }
+-static struct apic apic_flat =  {
++static struct apic apic_flat __read_only =  {
+       .name                           = "flat",
+       .probe                          = flat_probe,
+       .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
+@@ -248,7 +248,7 @@ static int physflat_probe(void)
+       return 0;
+ }
+-static struct apic apic_physflat =  {
++static struct apic apic_physflat __read_only =  {
+       .name                           = "physical flat",
+       .probe                          = physflat_probe,
+diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
+index c05688b..a250c5a 100644
+--- a/arch/x86/kernel/apic/apic_noop.c
++++ b/arch/x86/kernel/apic/apic_noop.c
+@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
+       WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
+ }
+-struct apic apic_noop = {
++struct apic apic_noop __read_only = {
+       .name                           = "noop",
+       .probe                          = noop_probe,
+       .acpi_madt_oem_check            = NULL,
+diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
+index 06dbaa4..817a7bb 100644
+--- a/arch/x86/kernel/apic/bigsmp_32.c
++++ b/arch/x86/kernel/apic/bigsmp_32.c
+@@ -142,7 +142,7 @@ static int probe_bigsmp(void)
+       return dmi_bigsmp;
+ }
+-static struct apic apic_bigsmp = {
++static struct apic apic_bigsmp __read_only = {
+       .name                           = "bigsmp",
+       .probe                          = probe_bigsmp,
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 48e6d84..fdefc57 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1683,7 +1683,7 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
+       return was_pending;
+ }
+-atomic_t irq_mis_count;
++atomic_unchecked_t irq_mis_count;
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
+@@ -1822,7 +1822,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
+        * at the cpu.
+        */
+       if (!(v & (1 << (i & 0x1f)))) {
+-              atomic_inc(&irq_mis_count);
++              atomic_inc_unchecked(&irq_mis_count);
+               eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
+       }
+@@ -1868,7 +1868,7 @@ static int ioapic_set_affinity(struct irq_data *irq_data,
+       return ret;
+ }
+-static struct irq_chip ioapic_chip __read_mostly = {
++static struct irq_chip ioapic_chip = {
+       .name                   = "IO-APIC",
+       .irq_startup            = startup_ioapic_irq,
+       .irq_mask               = mask_ioapic_irq,
+@@ -1879,7 +1879,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+ };
+-static struct irq_chip ioapic_ir_chip __read_mostly = {
++static struct irq_chip ioapic_ir_chip = {
+       .name                   = "IR-IO-APIC",
+       .irq_startup            = startup_ioapic_irq,
+       .irq_mask               = mask_ioapic_irq,
+@@ -1937,7 +1937,7 @@ static void ack_lapic_irq(struct irq_data *data)
+       ack_APIC_irq();
+ }
+-static struct irq_chip lapic_chip __read_mostly = {
++static struct irq_chip lapic_chip = {
+       .name           = "local-APIC",
+       .irq_mask       = mask_lapic_irq,
+       .irq_unmask     = unmask_lapic_irq,
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index ade2532..5fc7f4f9 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -269,7 +269,7 @@ static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+       hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
+ }
+-static struct irq_chip hpet_msi_controller = {
++static irq_chip_no_const hpet_msi_controller __read_only = {
+       .name = "HPET-MSI",
+       .irq_unmask = hpet_msi_unmask,
+       .irq_mask = hpet_msi_mask,
+diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
+index 5630962..0ed042c 100644
+--- a/arch/x86/kernel/apic/probe_32.c
++++ b/arch/x86/kernel/apic/probe_32.c
+@@ -72,7 +72,7 @@ static int probe_default(void)
+       return 1;
+ }
+-static struct apic apic_default = {
++static struct apic apic_default __read_only = {
+       .name                           = "default",
+       .probe                          = probe_default,
+@@ -126,7 +126,7 @@ static struct apic apic_default = {
+ apic_driver(apic_default);
+-struct apic *apic = &apic_default;
++struct apic *apic __read_only = &apic_default;
+ EXPORT_SYMBOL_GPL(apic);
+ static int cmdline_apic __initdata;
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 5d30c5e..3c83cc4 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -37,6 +37,7 @@ static struct irq_chip lapic_controller;
+ static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+ #endif
++void lock_vector_lock(void) __acquires(&vector_lock);
+ void lock_vector_lock(void)
+ {
+       /* Used to the online set of cpus does not change
+@@ -45,6 +46,7 @@ void lock_vector_lock(void)
+       raw_spin_lock(&vector_lock);
+ }
++void unlock_vector_lock(void) __releases(&vector_lock);
+ void unlock_vector_lock(void)
+ {
+       raw_spin_unlock(&vector_lock);
+diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
+index 54f35d9..d752bd5 100644
+--- a/arch/x86/kernel/apic/x2apic_cluster.c
++++ b/arch/x86/kernel/apic/x2apic_cluster.c
+@@ -227,7 +227,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
+               cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
+ }
+-static struct apic apic_x2apic_cluster = {
++static struct apic apic_x2apic_cluster __read_only = {
+       .name                           = "cluster x2apic",
+       .probe                          = x2apic_cluster_probe,
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index 4f13f54f..96e4431 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -98,7 +98,7 @@ static int x2apic_phys_probe(void)
+       return apic == &apic_x2apic_phys;
+ }
+-static struct apic apic_x2apic_phys = {
++static struct apic apic_x2apic_phys __read_only = {
+       .name                           = "physical x2apic",
+       .probe                          = x2apic_phys_probe,
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index cb0673c..dc976d7 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -560,7 +560,7 @@ static int uv_probe(void)
+       return apic == &apic_x2apic_uv_x;
+ }
+-static struct apic __refdata apic_x2apic_uv_x = {
++static struct apic apic_x2apic_uv_x __read_only = {
+       .name                           = "UV large system",
+       .probe                          = uv_probe,
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index c7364bd..20cd21a 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
+  * This is for buggy BIOS's that refer to (real mode) segment 0x40
+  * even though they are called in protected mode.
+  */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+                       (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+ static const char driver_version[] = "1.16ac";        /* no spaces */
+@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
+       BUG_ON(cpu != 0);
+       gdt = get_cpu_gdt_table(cpu);
+       save_desc_40 = gdt[0x40 / 8];
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = bad_bios_desc;
++      pax_close_kernel();
+       apm_irq_save(flags);
+       APM_DO_SAVE_SEGS;
+@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
+                         &call->esi);
+       APM_DO_RESTORE_SEGS;
+       apm_irq_restore(flags);
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = save_desc_40;
++      pax_close_kernel();
++
+       put_cpu();
+       return call->eax & 0xff;
+@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
+       BUG_ON(cpu != 0);
+       gdt = get_cpu_gdt_table(cpu);
+       save_desc_40 = gdt[0x40 / 8];
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = bad_bios_desc;
++      pax_close_kernel();
+       apm_irq_save(flags);
+       APM_DO_SAVE_SEGS;
+@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
+                                        &call->eax);
+       APM_DO_RESTORE_SEGS;
+       apm_irq_restore(flags);
++
++      pax_open_kernel();
+       gdt[0x40 / 8] = save_desc_40;
++      pax_close_kernel();
++
+       put_cpu();
+       return error;
+ }
+@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id __initdata apm_dmi_table[] = {
++static const struct dmi_system_id __initconst apm_dmi_table[] = {
+       {
+               print_if_true,
+               KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
+@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
+        * code to that CPU.
+        */
+       gdt = get_cpu_gdt_table(0);
++
++      pax_open_kernel();
+       set_desc_base(&gdt[APM_CS >> 3],
+                (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+       set_desc_base(&gdt[APM_CS_16 >> 3],
+                (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+       set_desc_base(&gdt[APM_DS >> 3],
+                (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++      pax_close_kernel();
+       proc_create("apm", 0, NULL, &apm_file_ops);
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 2bd5c6f..4907fd0 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -31,9 +31,11 @@ void common(void) {
+       BLANK();
+       OFFSET(TI_flags, thread_info, flags);
+       OFFSET(TI_status, thread_info, status);
++      OFFSET(TI_lowest_stack, thread_info, lowest_stack);
++      DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+       BLANK();
+-      OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
++      OFFSET(TASK_addr_limit, task_struct, tinfo.addr_limit);
+       BLANK();
+       OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -68,8 +70,26 @@ void common(void) {
+       OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+       OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+       OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++      OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
+ #endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++      OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++#ifdef CONFIG_X86_64
++      OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
++#endif
++#endif
++
++#endif
++
++      BLANK();
++      DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++      DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++      DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
++
+ #ifdef CONFIG_XEN
+       BLANK();
+       OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+@@ -88,4 +108,5 @@ void common(void) {
+       BLANK();
+       DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
++      DEFINE(TSS_size, sizeof(struct tss_struct));
+ }
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index 4a8697f..8a13428 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -12,10 +12,6 @@ endif
+ KCOV_INSTRUMENT_common.o := n
+ KCOV_INSTRUMENT_perf_event.o := n
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o               := $(nostackp)
+-
+ obj-y                 := intel_cacheinfo.o scattered.o topology.o
+ obj-y                 += common.o
+ obj-y                 += rdrand.o
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index b81fe2d..fa46eca 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -792,7 +792,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+ {
+       /* AMD errata T13 (order #21922) */
+-      if ((c->x86 == 6)) {
++      if (c->x86 == 6) {
+               /* Duron Rev A0 */
+               if (c->x86_model == 3 && c->x86_mask == 0)
+                       size = 64;
+diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
+index a972ac4..938c163 100644
+--- a/arch/x86/kernel/cpu/bugs_64.c
++++ b/arch/x86/kernel/cpu/bugs_64.c
+@@ -10,6 +10,7 @@
+ #include <asm/processor.h>
+ #include <asm/mtrr.h>
+ #include <asm/cacheflush.h>
++#include <asm/sections.h>
+ void __init check_bugs(void)
+ {
+@@ -18,6 +19,7 @@ void __init check_bugs(void)
+       pr_info("CPU: ");
+       print_cpu_info(&boot_cpu_data);
+ #endif
++      set_memory_nx((unsigned long)_sinitdata, (__START_KERNEL_map + KERNEL_IMAGE_SIZE - (unsigned long)_sinitdata) >> PAGE_SHIFT);
+       alternative_instructions();
+       /*
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index bcc9ccc..84b8a82 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -93,60 +93,6 @@ static const struct cpu_dev default_cpu = {
+ static const struct cpu_dev *this_cpu = &default_cpu;
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+-      /*
+-       * We need valid kernel segments for data and code in long mode too
+-       * IRET will check the segment types  kkeil 2000/10/28
+-       * Also sysret mandates a special GDT layout
+-       *
+-       * TLS descriptors are currently at a different place compared to i386.
+-       * Hopefully nobody expects them at a fixed place (Wine?)
+-       */
+-      [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+-      [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+-      [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+-      [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+-      [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+-      [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+-      /*
+-       * Segments used for calling PnP BIOS have byte granularity.
+-       * They code segments and data segments have fixed 64k limits,
+-       * the transfer segment sizes are set at run time.
+-       */
+-      /* 32-bit code */
+-      [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+-      /* 16-bit code */
+-      [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+-      /* 16-bit data */
+-      [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+-      /* 16-bit data */
+-      [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
+-      /* 16-bit data */
+-      [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
+-      /*
+-       * The APM segments have byte granularity and their bases
+-       * are set at run time.  All have 64k limits.
+-       */
+-      /* 32-bit code */
+-      [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+-      /* 16-bit code */
+-      [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+-      /* data */
+-      [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+-      [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+-      [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+-      GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_mpx_setup(char *s)
+ {
+       /* require an exact match without trailing characters */
+@@ -281,6 +227,10 @@ static __always_inline void setup_smep(struct cpuinfo_x86 *c)
+ {
+       if (cpu_has(c, X86_FEATURE_SMEP))
+               cr4_set_bits(X86_CR4_SMEP);
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_NONE
++      else
++              panic("PAX: this KERNEXEC configuration requires SMEP support\n");
++#endif
+ }
+ static __init int setup_disable_smap(char *arg)
+@@ -306,6 +256,109 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+       }
+ }
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#ifdef CONFIG_X86_64
++static bool uderef_enabled __read_only = true;
++unsigned long pax_user_shadow_base __read_only;
++EXPORT_SYMBOL(pax_user_shadow_base);
++extern char pax_enter_kernel_user[];
++extern char pax_exit_kernel_user[];
++
++static int __init setup_pax_weakuderef(char *str)
++{
++      if (uderef_enabled)
++              pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++      return 1;
++}
++__setup("pax_weakuderef", setup_pax_weakuderef);
++#endif
++
++static int __init setup_pax_nouderef(char *str)
++{
++#ifdef CONFIG_X86_32
++      unsigned int cpu;
++      struct desc_struct *gdt;
++
++      for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++              gdt = get_cpu_gdt_table(cpu);
++              gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++              gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++              gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
++              gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
++      }
++      loadsegment(ds, __KERNEL_DS);
++      loadsegment(es, __KERNEL_DS);
++      loadsegment(ss, __KERNEL_DS);
++#else
++      memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
++      memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
++      clone_pgd_mask = ~(pgdval_t)0UL;
++      pax_user_shadow_base = 0UL;
++      setup_clear_cpu_cap(X86_FEATURE_PCIDUDEREF);
++      uderef_enabled = false;
++#endif
++
++      return 0;
++}
++early_param("pax_nouderef", setup_pax_nouderef);
++#endif
++
++#ifdef CONFIG_X86_64
++static __init int setup_disable_pcid(char *arg)
++{
++      setup_clear_cpu_cap(X86_FEATURE_PCID);
++      setup_clear_cpu_cap(X86_FEATURE_INVPCID);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (uderef_enabled)
++              pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++#endif
++
++      return 1;
++}
++__setup("nopcid", setup_disable_pcid);
++
++static void setup_pcid(struct cpuinfo_x86 *c)
++{
++      if (cpu_has(c, X86_FEATURE_PCID)) {
++              printk("PAX: PCID detected\n");
++              cr4_set_bits(X86_CR4_PCIDE);
++      } else
++              clear_cpu_cap(c, X86_FEATURE_INVPCID);
++
++      if (cpu_has(c, X86_FEATURE_INVPCID))
++              printk("PAX: INVPCID detected\n");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      if (!uderef_enabled) {
++              printk("PAX: UDEREF disabled\n");
++              return;
++      }
++
++      if (!cpu_has(c, X86_FEATURE_PCID)) {
++              pax_open_kernel();
++              pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++              pax_close_kernel();
++              printk("PAX: slow and weak UDEREF enabled\n");
++              return;
++      }
++
++      set_cpu_cap(c, X86_FEATURE_PCIDUDEREF);
++
++      pax_open_kernel();
++      clone_pgd_mask = ~(pgdval_t)0UL;
++      pax_close_kernel();
++      if (pax_user_shadow_base)
++              printk("PAX: weak UDEREF enabled\n");
++      else {
++              set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
++              printk("PAX: strong UDEREF enabled\n");
++      }
++#endif
++
++}
++#endif
++
+ /*
+  * Protection Keys are not available in 32-bit mode.
+  */
+@@ -451,7 +504,7 @@ void switch_to_new_gdt(int cpu)
+ {
+       struct desc_ptr gdt_descr;
+-      gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++      gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+       gdt_descr.size = GDT_SIZE - 1;
+       load_gdt(&gdt_descr);
+       /* Reload the per-cpu base */
+@@ -972,9 +1025,11 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
+        * in case CQM bits really aren't there in this CPU.
+        */
+       if (c != &boot_cpu_data) {
++              pax_open_kernel();
+               boot_cpu_data.x86_cache_max_rmid =
+                       min(boot_cpu_data.x86_cache_max_rmid,
+                           c->x86_cache_max_rmid);
++              pax_close_kernel();
+       }
+ }
+@@ -1041,6 +1096,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+       setup_smep(c);
+       setup_smap(c);
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(__supported_pte_mask & _PAGE_NX))
++              clear_cpu_cap(c, X86_FEATURE_PSE);
++#endif
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      clear_cpu_cap(c, X86_FEATURE_SEP);
++#endif
++#endif
++
++#ifdef CONFIG_X86_64
++      setup_pcid(c);
++#endif
++
+       /*
+        * The vendor-specific functions might have changed features.
+        * Now we do "generic changes."
+@@ -1086,10 +1155,14 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+        * executed, c == &boot_cpu_data.
+        */
+       if (c != &boot_cpu_data) {
++              pax_open_kernel();
++
+               /* AND the already accumulated flags with these */
+               for (i = 0; i < NCAPINTS; i++)
+                       boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++              pax_close_kernel();
++
+               /* OR, i.e. replicate the bug flags */
+               for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
+                       c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
+@@ -1121,7 +1194,7 @@ void enable_sep_cpu(void)
+               return;
+       cpu = get_cpu();
+-      tss = &per_cpu(cpu_tss, cpu);
++      tss = cpu_tss + cpu;
+       /*
+        * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
+@@ -1263,10 +1336,12 @@ static __init int setup_disablecpuid(char *arg)
+ }
+ __setup("clearcpuid=", setup_disablecpuid);
++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
++EXPORT_PER_CPU_SYMBOL(current_tinfo);
++
+ #ifdef CONFIG_X86_64
+-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
+-                                  (unsigned long) debug_idt_table };
++struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
++const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
+ DEFINE_PER_CPU_FIRST(union irq_stack_union,
+                    irq_stack_union) __aligned(PAGE_SIZE) __visible;
+@@ -1378,21 +1453,21 @@ EXPORT_PER_CPU_SYMBOL(current_task);
+ DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
+ EXPORT_PER_CPU_SYMBOL(__preempt_count);
++#ifdef CONFIG_CC_STACKPROTECTOR
++DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
++#endif
++
++#endif        /* CONFIG_X86_64 */
++
+ /*
+  * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
+  * the top of the kernel stack.  Use an extra percpu variable to track the
+  * top of the kernel stack directly.
+  */
+ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
+-      (unsigned long)&init_thread_union + THREAD_SIZE;
++      (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
+-#ifdef CONFIG_CC_STACKPROTECTOR
+-DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
+-#endif
+-
+-#endif        /* CONFIG_X86_64 */
+-
+ /*
+  * Clear all 6 debug registers:
+  */
+@@ -1468,7 +1543,7 @@ void cpu_init(void)
+        */
+       load_ucode_ap();
+-      t = &per_cpu(cpu_tss, cpu);
++      t = cpu_tss + cpu;
+       oist = &per_cpu(orig_ist, cpu);
+ #ifdef CONFIG_NUMA
+@@ -1500,7 +1575,6 @@ void cpu_init(void)
+       wrmsrl(MSR_KERNEL_GS_BASE, 0);
+       barrier();
+-      x86_configure_nx();
+       x2apic_setup();
+       /*
+@@ -1552,7 +1626,7 @@ void cpu_init(void)
+ {
+       int cpu = smp_processor_id();
+       struct task_struct *curr = current;
+-      struct tss_struct *t = &per_cpu(cpu_tss, cpu);
++      struct tss_struct *t = cpu_tss + cpu;
+       struct thread_struct *thread = &curr->thread;
+       wait_for_master_cpu(cpu);
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index de6626c..c84e8c1 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -519,25 +519,23 @@ cache_private_attrs_is_visible(struct kobject *kobj,
+       return 0;
+ }
++static struct attribute *amd_l3_attrs[4];
++
+ static struct attribute_group cache_private_group = {
+       .is_visible = cache_private_attrs_is_visible,
++      .attrs = amd_l3_attrs,
+ };
+ static void init_amd_l3_attrs(void)
+ {
+       int n = 1;
+-      static struct attribute **amd_l3_attrs;
+-
+-      if (amd_l3_attrs) /* already initialized */
+-              return;
+       if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+               n += 2;
+       if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+               n += 1;
+-      amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
+-      if (!amd_l3_attrs)
++      if (n > 1 && amd_l3_attrs[0]) /* already initialized */
+               return;
+       n = 0;
+@@ -547,8 +545,6 @@ static void init_amd_l3_attrs(void)
+       }
+       if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+               amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
+-
+-      cache_private_group.attrs = amd_l3_attrs;
+ }
+ const struct attribute_group *
+@@ -559,7 +555,7 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
+       if (this_leaf->level < 3 || !nb)
+               return NULL;
+-      if (nb && nb->l3_cache.indices)
++      if (nb->l3_cache.indices)
+               init_amd_l3_attrs();
+       return &cache_private_group;
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 79d8ec8..ba9ae33 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -47,6 +47,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+ #include "mce-internal.h"
+@@ -209,8 +210,7 @@ static struct notifier_block mce_srao_nb;
+ void mce_register_decode_chain(struct notifier_block *nb)
+ {
+       /* Ensure SRAO notifier has the highest priority in the decode chain. */
+-      if (nb != &mce_srao_nb && nb->priority == INT_MAX)
+-              nb->priority -= 1;
++      BUG_ON(nb != &mce_srao_nb && nb->priority == INT_MAX);
+       atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+ }
+@@ -262,7 +262,7 @@ static inline u32 smca_misc_reg(int bank)
+       return MSR_AMD64_SMCA_MCx_MISC(bank);
+ }
+-struct mca_msr_regs msr_ops = {
++struct mca_msr_regs msr_ops __read_only = {
+       .ctl    = ctl_reg,
+       .status = status_reg,
+       .addr   = addr_reg,
+@@ -281,7 +281,7 @@ static void print_mce(struct mce *m)
+                       !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+                               m->cs, m->ip);
+-              if (m->cs == __KERNEL_CS)
++              if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+                       print_symbol("{%s}", m->ip);
+               pr_cont("\n");
+       }
+@@ -314,10 +314,10 @@ static void print_mce(struct mce *m)
+ #define PANIC_TIMEOUT 5 /* 5 seconds */
+-static atomic_t mce_panicked;
++static atomic_unchecked_t mce_panicked;
+ static int fake_panic;
+-static atomic_t mce_fake_panicked;
++static atomic_unchecked_t mce_fake_panicked;
+ /* Panic in progress. Enable interrupts and wait for final IPI */
+ static void wait_for_panic(void)
+@@ -343,7 +343,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+               /*
+                * Make sure only one CPU runs in machine check panic
+                */
+-              if (atomic_inc_return(&mce_panicked) > 1)
++              if (atomic_inc_return_unchecked(&mce_panicked) > 1)
+                       wait_for_panic();
+               barrier();
+@@ -351,7 +351,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+               console_verbose();
+       } else {
+               /* Don't log too much for fake panic */
+-              if (atomic_inc_return(&mce_fake_panicked) > 1)
++              if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
+                       return;
+       }
+       pending = mce_gen_pool_prepare_records();
+@@ -387,7 +387,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+       if (!fake_panic) {
+               if (panic_timeout == 0)
+                       panic_timeout = mca_cfg.panic_timeout;
+-              panic(msg);
++              panic("%s", msg);
+       } else
+               pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
+ }
+@@ -761,7 +761,7 @@ static int mce_timed_out(u64 *t, const char *msg)
+        * might have been modified by someone else.
+        */
+       rmb();
+-      if (atomic_read(&mce_panicked))
++      if (atomic_read_unchecked(&mce_panicked))
+               wait_for_panic();
+       if (!mca_cfg.monarch_timeout)
+               goto out;
+@@ -1691,10 +1691,12 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
+                * Install proper ops for Scalable MCA enabled processors
+                */
+               if (mce_flags.smca) {
++                      pax_open_kernel();
+                       msr_ops.ctl     = smca_ctl_reg;
+                       msr_ops.status  = smca_status_reg;
+                       msr_ops.addr    = smca_addr_reg;
+                       msr_ops.misc    = smca_misc_reg;
++                      pax_close_kernel();
+               }
+               mce_amd_feature_init(c);
+@@ -1747,7 +1749,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+ }
+ /* Call the installed machine check handler for this CPU setup. */
+-void (*machine_check_vector)(struct pt_regs *, long error_code) =
++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
+                                               unexpected_machine_check;
+ /*
+@@ -1776,7 +1778,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
+               return;
+       }
++      pax_open_kernel();
+       machine_check_vector = do_machine_check;
++      pax_close_kernel();
+       __mcheck_cpu_init_generic();
+       __mcheck_cpu_init_vendor(c);
+@@ -1808,7 +1812,7 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
+  */
+ static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+-static int mce_chrdev_open_count;     /* #times opened */
++static local_t mce_chrdev_open_count; /* #times opened */
+ static int mce_chrdev_open_exclu;     /* already open exclusive? */
+ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1816,7 +1820,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+       spin_lock(&mce_chrdev_state_lock);
+       if (mce_chrdev_open_exclu ||
+-          (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
++          (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
+               spin_unlock(&mce_chrdev_state_lock);
+               return -EBUSY;
+@@ -1824,7 +1828,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+       if (file->f_flags & O_EXCL)
+               mce_chrdev_open_exclu = 1;
+-      mce_chrdev_open_count++;
++      local_inc(&mce_chrdev_open_count);
+       spin_unlock(&mce_chrdev_state_lock);
+@@ -1835,7 +1839,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
+ {
+       spin_lock(&mce_chrdev_state_lock);
+-      mce_chrdev_open_count--;
++      local_dec(&mce_chrdev_open_count);
+       mce_chrdev_open_exclu = 0;
+       spin_unlock(&mce_chrdev_state_lock);
+@@ -2529,7 +2533,7 @@ static __init void mce_init_banks(void)
+       for (i = 0; i < mca_cfg.banks; i++) {
+               struct mce_bank *b = &mce_banks[i];
+-              struct device_attribute *a = &b->attr;
++              device_attribute_no_const *a = &b->attr;
+               sysfs_attr_init(&a->attr);
+               a->attr.name    = b->attrname;
+@@ -2636,7 +2640,7 @@ struct dentry *mce_get_debugfs_dir(void)
+ static void mce_reset(void)
+ {
+       cpu_missing = 0;
+-      atomic_set(&mce_fake_panicked, 0);
++      atomic_set_unchecked(&mce_fake_panicked, 0);
+       atomic_set(&mce_executing, 0);
+       atomic_set(&mce_callin, 0);
+       atomic_set(&global_nwo, 0);
+diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
+index 2a0717b..7fbc641 100644
+--- a/arch/x86/kernel/cpu/mcheck/p5.c
++++ b/arch/x86/kernel/cpu/mcheck/p5.c
+@@ -12,6 +12,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+ /* By default disabled */
+ int mce_p5_enabled __read_mostly;
+@@ -52,7 +53,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+       if (!cpu_has(c, X86_FEATURE_MCE))
+               return;
++      pax_open_kernel();
+       machine_check_vector = pentium_machine_check;
++      pax_close_kernel();
+       /* Make sure the vector pointer is visible before we enable MCEs: */
+       wmb();
+diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
+index c6a722e..4016140 100644
+--- a/arch/x86/kernel/cpu/mcheck/winchip.c
++++ b/arch/x86/kernel/cpu/mcheck/winchip.c
+@@ -11,6 +11,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+ /* Machine check handler for WinChip C6: */
+ static void winchip_machine_check(struct pt_regs *regs, long error_code)
+@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
+ {
+       u32 lo, hi;
++      pax_open_kernel();
+       machine_check_vector = winchip_machine_check;
++      pax_close_kernel();
+       /* Make sure the vector pointer is visible before we enable MCEs: */
+       wmb();
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index cdc0dea..ada8a20 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -1072,13 +1072,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ static int get_ucode_user(void *to, const void *from, size_t n)
+ {
+-      return copy_from_user(to, from, n);
++      return copy_from_user(to, (const void __force_user *)from, n);
+ }
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
+-      return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
++      return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
+ }
+ static void microcode_fini_cpu(int cpu)
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 8f44c5a..ed71f8c 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -206,7 +206,7 @@ static void __init ms_hyperv_init_platform(void)
+               x86_platform.get_nmi_reason = hv_get_nmi_reason;
+ }
+-const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
++const struct hypervisor_x86 x86_hyper_ms_hyperv = {
+       .name                   = "Microsoft HyperV",
+       .detect                 = ms_hyperv_platform,
+       .init_platform          = ms_hyperv_init_platform,
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index fdc5521..d31149c 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -726,7 +726,8 @@ static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
+  * The caller must ensure that local interrupts are disabled and
+  * are reenabled after post_set() has been called.
+  */
+-static void prepare_set(void) __acquires(set_atomicity_lock)
++static void prepare_set(void) __acquires(&set_atomicity_lock);
++static void prepare_set(void)
+ {
+       unsigned long cr0;
+@@ -762,7 +763,8 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
+       wbinvd();
+ }
+-static void post_set(void) __releases(set_atomicity_lock)
++static void post_set(void) __releases(&set_atomicity_lock);
++static void post_set(void)
+ {
+       /* Flush TLBs (no need to flush caches - they are disabled) */
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index 28f1b54..1004b6d 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -72,7 +72,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+ const struct mtrr_ops *mtrr_if;
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
+index 6c7ced0..55ee554 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -25,7 +25,7 @@ struct mtrr_ops {
+       int     (*validate_add_page)(unsigned long base, unsigned long size,
+                                    unsigned int type);
+       int     (*have_wrcomb)(void);
+-};
++} __do_const;
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+                                  int replace_reg);
+diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
+index 1ff0598..5ef5631 100644
+--- a/arch/x86/kernel/cpu/vmware.c
++++ b/arch/x86/kernel/cpu/vmware.c
+@@ -137,7 +137,7 @@ static bool __init vmware_legacy_x2apic_available(void)
+              (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
+ }
+-const __refconst struct hypervisor_x86 x86_hyper_vmware = {
++const struct hypervisor_x86 x86_hyper_vmware = {
+       .name                   = "VMware",
+       .detect                 = vmware_platform,
+       .set_cpu_features       = vmware_set_cpu_features,
+diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
+index afa64ad..dce67dd 100644
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+               return -ENOMEM;
+       if (userbuf) {
+-              if (copy_to_user(buf, vaddr + offset, csize)) {
++              if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
+                       iounmap(vaddr);
+                       return -EFAULT;
+               }
+diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
+index f6dfd93..892ade4 100644
+--- a/arch/x86/kernel/doublefault.c
++++ b/arch/x86/kernel/doublefault.c
+@@ -12,7 +12,7 @@
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+@@ -22,7 +22,7 @@ static void doublefault_fn(void)
+       unsigned long gdt, tss;
+       native_store_gdt(&gdt_desc);
+-      gdt = gdt_desc.address;
++      gdt = (unsigned long)gdt_desc.address;
+       printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
+               /* 0x2 bit is always set */
+               .flags          = X86_EFLAGS_SF | 0x2,
+               .sp             = STACK_START,
+-              .es             = __USER_DS,
++              .es             = __KERNEL_DS,
+               .cs             = __KERNEL_CS,
+               .ss             = __KERNEL_DS,
+-              .ds             = __USER_DS,
++              .ds             = __KERNEL_DS,
+               .fs             = __KERNEL_PERCPU,
+               .__cr3          = __pa_nodebug(swapper_pg_dir),
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index 92e8f0a..a2430f0 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -2,6 +2,9 @@
+  *  Copyright (C) 1991, 1992  Linus Torvalds
+  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
+@@ -35,7 +38,7 @@ static void printk_stack_address(unsigned long address, int reliable,
+ void printk_address(unsigned long address)
+ {
+-      pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
++      pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
+ }
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+@@ -77,10 +80,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+  */
+-static inline int valid_stack_ptr(struct task_struct *task,
+-                      void *p, unsigned int size, void *end)
++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
+ {
+-      void *t = task_stack_page(task);
+       if (end) {
+               if (p < end && p >= (end-THREAD_SIZE))
+                       return 1;
+@@ -91,7 +92,7 @@ static inline int valid_stack_ptr(struct task_struct *task,
+ }
+ unsigned long
+-print_context_stack(struct task_struct *task,
++print_context_stack(struct task_struct *task, void *stack_start,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data,
+               unsigned long *end, int *graph)
+@@ -106,7 +107,7 @@ print_context_stack(struct task_struct *task,
+           PAGE_SIZE)
+               stack = (unsigned long *)task_stack_page(task);
+-      while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
++      while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
+               unsigned long addr;
+               addr = *stack;
+@@ -127,7 +128,7 @@ print_context_stack(struct task_struct *task,
+ EXPORT_SYMBOL_GPL(print_context_stack);
+ unsigned long
+-print_context_stack_bp(struct task_struct *task,
++print_context_stack_bp(struct task_struct *task, void *stack_start,
+                      unsigned long *stack, unsigned long bp,
+                      const struct stacktrace_ops *ops, void *data,
+                      unsigned long *end, int *graph)
+@@ -135,7 +136,7 @@ print_context_stack_bp(struct task_struct *task,
+       struct stack_frame *frame = (struct stack_frame *)bp;
+       unsigned long *ret_addr = &frame->return_address;
+-      while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
++      while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
+               unsigned long addr = *ret_addr;
+               if (!__kernel_text_address(addr))
+@@ -240,6 +241,7 @@ EXPORT_SYMBOL_GPL(oops_begin);
+ NOKPROBE_SYMBOL(oops_begin);
+ void __noreturn rewind_stack_do_exit(int signr);
++extern void gr_handle_kernel_exploit(void);
+ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+@@ -263,6 +265,8 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+       if (panic_on_oops)
+               panic("Fatal exception");
++      gr_handle_kernel_exploit();
++
+       /*
+        * We're not going to return, but we might be on an IST stack or
+        * have very little stack space left.  Rewind the stack and kill
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index 0967571..84666bc 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -15,6 +15,7 @@
+ #include <linux/nmi.h>
+ #include <asm/stacktrace.h>
++#include <asm/desc.h>
+ static void *is_irq_stack(void *p, void *irq)
+ {
+@@ -61,13 +62,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+               bp = stack_frame(task, regs);
+       for (;;) {
++              void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+               void *end_stack;
+               end_stack = is_hardirq_stack(stack, cpu);
+               if (!end_stack)
+                       end_stack = is_softirq_stack(stack, cpu);
+-              bp = ops->walk_stack(task, stack, bp, ops, data,
++              bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
+                                    end_stack, &graph);
+               /* Stop if not on irq stack */
+@@ -137,16 +139,17 @@ void show_regs(struct pt_regs *regs)
+               unsigned int code_len = code_bytes;
+               unsigned char c;
+               u8 *ip;
++              unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
+               pr_emerg("Stack:\n");
+               show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+               pr_emerg("Code:");
+-              ip = (u8 *)regs->ip - code_prologue;
++              ip = (u8 *)regs->ip - code_prologue + cs_base;
+               if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+                       /* try starting at IP */
+-                      ip = (u8 *)regs->ip;
++                      ip = (u8 *)regs->ip + cs_base;
+                       code_len = code_len - code_prologue + 1;
+               }
+               for (i = 0; i < code_len; i++, ip++) {
+@@ -155,7 +158,7 @@ void show_regs(struct pt_regs *regs)
+                               pr_cont("  Bad EIP value.");
+                               break;
+                       }
+-                      if (ip == (u8 *)regs->ip)
++                      if (ip == (u8 *)regs->ip + cs_base)
+                               pr_cont(" <%02x>", c);
+                       else
+                               pr_cont(" %02x", c);
+@@ -168,6 +171,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+       unsigned short ud2;
++      ip = ktla_ktva(ip);
+       if (ip < PAGE_OFFSET)
+               return 0;
+       if (probe_kernel_address((unsigned short *)ip, ud2))
+@@ -175,3 +179,15 @@ int is_valid_bugaddr(unsigned long ip)
+       return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void __used pax_check_alloca(unsigned long size)
++{
++      unsigned long sp = (unsigned long)&sp, stack_left;
++
++      /* all kernel stacks are of the same size */
++      stack_left = sp & (THREAD_SIZE - 1);
++      BUG_ON(stack_left < 256 || size >= stack_left - 256);
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 9ee4520..bacb90c 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -158,6 +158,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+       unsigned used = 0;
+       int graph = 0;
+       int done = 0;
++      void *stack_start;
+       if (!task)
+               task = current;
+@@ -190,17 +191,19 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+               done = 1;
+               switch (stype) {
+-
+-              /* Break out early if we are on the thread stack */
+               case STACK_IS_NORMAL:
++                      /*
++                       * This handles the process stack:
++                       */
++                      stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++                      bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+                       break;
+               case STACK_IS_EXCEPTION:
+-
+                       if (ops->stack(data, id) < 0)
+                               break;
+-                      bp = ops->walk_stack(task, stack, bp, ops,
++                      bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
+                                            data, stack_end, &graph);
+                       ops->stack(data, "<EOE>");
+                       /*
+@@ -208,15 +211,16 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+                        * second-to-last pointer (index -2 to end) in the
+                        * exception stack:
+                        */
++                      if ((u16)stack_end[-1] != __KERNEL_DS)
++                              goto out;
+                       stack = (unsigned long *) stack_end[-2];
+                       done = 0;
+                       break;
+               case STACK_IS_IRQ:
+-
+                       if (ops->stack(data, "IRQ") < 0)
+                               break;
+-                      bp = ops->walk_stack(task, stack, bp,
++                      bp = ops->walk_stack(task, irq_stack, stack, bp,
+                                    ops, data, stack_end, &graph);
+                       /*
+                        * We link to the next stack (which would be
+@@ -235,10 +239,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+               }
+       }
+-      /*
+-       * This handles the process stack:
+-       */
+-      bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
++out:
+       put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -355,8 +356,55 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+       unsigned short ud2;
+-      if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
++      if (probe_kernel_address((unsigned short *)ip, ud2))
+               return 0;
+       return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void __used pax_check_alloca(unsigned long size)
++{
++      unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
++      unsigned cpu, used;
++      char *id;
++
++      /* check the process stack first */
++      stack_start = (unsigned long)task_stack_page(current);
++      stack_end = stack_start + THREAD_SIZE;
++      if (likely(stack_start <= sp && sp < stack_end)) {
++              unsigned long stack_left = sp & (THREAD_SIZE - 1);
++              BUG_ON(stack_left < 256 || size >= stack_left - 256);
++              return;
++      }
++
++      cpu = get_cpu();
++
++      /* check the irq stacks */
++      stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
++      stack_start = stack_end - IRQ_STACK_SIZE;
++      if (stack_start <= sp && sp < stack_end) {
++              unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
++              put_cpu();
++              BUG_ON(stack_left < 256 || size >= stack_left - 256);
++              return;
++      }
++
++      /* check the exception stacks */
++      used = 0;
++      stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
++      stack_start = stack_end - EXCEPTION_STKSZ;
++      if (stack_end && stack_start <= sp && sp < stack_end) {
++              unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
++              put_cpu();
++              BUG_ON(stack_left < 256 || size >= stack_left - 256);
++              return;
++      }
++
++      put_cpu();
++
++      /* unknown stack */
++      BUG();
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 625eb69..e12a513 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
+ static void early_panic(char *msg)
+ {
+-      early_printk(msg);
+-      panic(msg);
++      early_printk("%s", msg);
++      panic("%s", msg);
+ }
+ static int userdef __initdata;
+diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
+index 8a12199..e63bebf 100644
+--- a/arch/x86/kernel/early_printk.c
++++ b/arch/x86/kernel/early_printk.c
+@@ -7,6 +7,7 @@
+ #include <linux/pci_regs.h>
+ #include <linux/pci_ids.h>
+ #include <linux/errno.h>
++#include <linux/sched.h>
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/fcntl.h>
+diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
+index 04f89ca..43ad7de 100644
+--- a/arch/x86/kernel/espfix_64.c
++++ b/arch/x86/kernel/espfix_64.c
+@@ -41,6 +41,7 @@
+ #include <asm/pgalloc.h>
+ #include <asm/setup.h>
+ #include <asm/espfix.h>
++#include <asm/bug.h>
+ /*
+  * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
+@@ -70,8 +71,10 @@ static DEFINE_MUTEX(espfix_init_mutex);
+ #define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
+ static void *espfix_pages[ESPFIX_MAX_PAGES];
+-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
+-      __aligned(PAGE_SIZE);
++static __page_aligned_rodata pud_t espfix_pud_page[PTRS_PER_PUD];
++static __page_aligned_rodata pmd_t espfix_pmd_page[PTRS_PER_PMD];
++static __page_aligned_rodata pte_t espfix_pte_page[PTRS_PER_PTE];
++static __page_aligned_rodata char espfix_stack_page[ESPFIX_MAX_PAGES][PAGE_SIZE];
+ static unsigned int page_random, slot_random;
+@@ -122,10 +125,19 @@ static void init_espfix_random(void)
+ void __init init_espfix_bsp(void)
+ {
+       pgd_t *pgd_p;
++      pud_t *pud_p;
++      unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
+       /* Install the espfix pud into the kernel page directory */
+-      pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+-      pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
++      pgd_p = &init_level4_pgt[index];
++      pud_p = espfix_pud_page;
++      paravirt_alloc_pud(&init_mm, __pa(pud_p) >> PAGE_SHIFT);
++      set_pgd(pgd_p, __pgd(PGTABLE_PROT | __pa(pud_p)));
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
++      clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
++#endif
+       /* Randomize the locations */
+       init_espfix_random();
+@@ -170,35 +182,39 @@ void init_espfix_ap(int cpu)
+       pud_p = &espfix_pud_page[pud_index(addr)];
+       pud = *pud_p;
+       if (!pud_present(pud)) {
+-              struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+-
+-              pmd_p = (pmd_t *)page_address(page);
++              if (cpu)
++                      pmd_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0));
++              else
++                      pmd_p = espfix_pmd_page;
+               pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
+               paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+               for (n = 0; n < ESPFIX_PUD_CLONES; n++)
+                       set_pud(&pud_p[n], pud);
+-      }
++      } else
++              BUG_ON(!cpu);
+       pmd_p = pmd_offset(&pud, addr);
+       pmd = *pmd_p;
+       if (!pmd_present(pmd)) {
+-              struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+-
+-              pte_p = (pte_t *)page_address(page);
++              if (cpu)
++                      pte_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0));
++              else
++                      pte_p = espfix_pte_page;
+               pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
+               paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+               for (n = 0; n < ESPFIX_PMD_CLONES; n++)
+                       set_pmd(&pmd_p[n], pmd);
+-      }
++      } else
++              BUG_ON(!cpu);
+       pte_p = pte_offset_kernel(&pmd, addr);
+-      stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
++      stack_page = espfix_stack_page[page];
+       pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
+       for (n = 0; n < ESPFIX_PTE_CLONES; n++)
+               set_pte(&pte_p[n*PTE_STRIDE], pte);
+       /* Job is done for this CPU and any CPU which shares this page */
+-      ACCESS_ONCE(espfix_pages[page]) = stack_page;
++      ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
+ unlock_done:
+       mutex_unlock(&espfix_init_mutex);
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 3fc03a0..37177e4 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -135,7 +135,7 @@ void __kernel_fpu_end(void)
+       struct fpu *fpu = &current->thread.fpu;
+       if (fpu->fpregs_active)
+-              copy_kernel_to_fpregs(&fpu->state);
++              copy_kernel_to_fpregs(fpu->state);
+       else
+               __fpregs_deactivate_hw();
+@@ -200,7 +200,7 @@ void fpu__save(struct fpu *fpu)
+       if (fpu->fpregs_active) {
+               if (!copy_fpregs_to_fpstate(fpu)) {
+                       if (use_eager_fpu())
+-                              copy_kernel_to_fpregs(&fpu->state);
++                              copy_kernel_to_fpregs(fpu->state);
+                       else
+                               fpregs_deactivate(fpu);
+               }
+@@ -260,7 +260,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+        * leak into the child task:
+        */
+       if (use_eager_fpu())
+-              memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
++              memset(&dst_fpu->state->xsave, 0, fpu_kernel_xstate_size);
+       /*
+        * Save current FPU registers directly into the child
+@@ -279,11 +279,10 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+        */
+       preempt_disable();
+       if (!copy_fpregs_to_fpstate(dst_fpu)) {
+-              memcpy(&src_fpu->state, &dst_fpu->state,
+-                     fpu_kernel_xstate_size);
++              memcpy(src_fpu->state, dst_fpu->state, fpu_kernel_xstate_size);
+               if (use_eager_fpu())
+-                      copy_kernel_to_fpregs(&src_fpu->state);
++                      copy_kernel_to_fpregs(src_fpu->state);
+               else
+                       fpregs_deactivate(src_fpu);
+       }
+@@ -304,7 +303,7 @@ void fpu__activate_curr(struct fpu *fpu)
+       WARN_ON_FPU(fpu != &current->thread.fpu);
+       if (!fpu->fpstate_active) {
+-              fpstate_init(&fpu->state);
++              fpstate_init(fpu->state);
+               trace_x86_fpu_init_state(fpu);
+               trace_x86_fpu_activate_state(fpu);
+@@ -332,7 +331,7 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
+               fpu__save(fpu);
+       } else {
+               if (!fpu->fpstate_active) {
+-                      fpstate_init(&fpu->state);
++                      fpstate_init(fpu->state);
+                       trace_x86_fpu_init_state(fpu);
+                       trace_x86_fpu_activate_state(fpu);
+@@ -367,7 +366,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
+               /* Invalidate any lazy state: */
+               fpu->last_cpu = -1;
+       } else {
+-              fpstate_init(&fpu->state);
++              fpstate_init(fpu->state);
+               trace_x86_fpu_init_state(fpu);
+               trace_x86_fpu_activate_state(fpu);
+@@ -430,7 +429,7 @@ void fpu__current_fpstate_write_end(void)
+        * an XRSTOR if they are active.
+        */
+       if (fpregs_active())
+-              copy_kernel_to_fpregs(&fpu->state);
++              copy_kernel_to_fpregs(fpu->state);
+       /*
+        * Our update is done and the fpregs/fpstate are in sync
+@@ -457,7 +456,7 @@ void fpu__restore(struct fpu *fpu)
+       kernel_fpu_disable();
+       trace_x86_fpu_before_restore(fpu);
+       fpregs_activate(fpu);
+-      copy_kernel_to_fpregs(&fpu->state);
++      copy_kernel_to_fpregs(fpu->state);
+       fpu->counter++;
+       trace_x86_fpu_after_restore(fpu);
+       kernel_fpu_enable();
+@@ -550,11 +549,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
+                * fully reproduce the context of the exception.
+                */
+               if (boot_cpu_has(X86_FEATURE_FXSR)) {
+-                      cwd = fpu->state.fxsave.cwd;
+-                      swd = fpu->state.fxsave.swd;
++                      cwd = fpu->state->fxsave.cwd;
++                      swd = fpu->state->fxsave.swd;
+               } else {
+-                      cwd = (unsigned short)fpu->state.fsave.cwd;
+-                      swd = (unsigned short)fpu->state.fsave.swd;
++                      cwd = (unsigned short)fpu->state->fsave.cwd;
++                      swd = (unsigned short)fpu->state->fsave.swd;
+               }
+               err = swd & ~cwd;
+@@ -568,7 +567,7 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
+               unsigned short mxcsr = MXCSR_DEFAULT;
+               if (boot_cpu_has(X86_FEATURE_XMM))
+-                      mxcsr = fpu->state.fxsave.mxcsr;
++                      mxcsr = fpu->state->fxsave.mxcsr;
+               err = ~(mxcsr >> 7) & mxcsr;
+       }
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 93982ae..086162e 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -45,7 +45,7 @@ static void fpu__init_cpu_generic(void)
+       /* Flush out any pending x87 state: */
+ #ifdef CONFIG_MATH_EMULATION
+       if (!boot_cpu_has(X86_FEATURE_FPU))
+-              fpstate_init_soft(&current->thread.fpu.state.soft);
++              fpstate_init_soft(&current->thread.fpu.state->soft);
+       else
+ #endif
+               asm volatile ("fninit");
+@@ -148,51 +148,7 @@ static void __init fpu__init_system_generic(void)
+ unsigned int fpu_kernel_xstate_size;
+ EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
+-/* Get alignment of the TYPE. */
+-#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
+-
+-/*
+- * Enforce that 'MEMBER' is the last field of 'TYPE'.
+- *
+- * Align the computed size with alignment of the TYPE,
+- * because that's how C aligns structs.
+- */
+-#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
+-      BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
+-                                         TYPE_ALIGN(TYPE)))
+-
+-/*
+- * We append the 'struct fpu' to the task_struct:
+- */
+-static void __init fpu__init_task_struct_size(void)
+-{
+-      int task_size = sizeof(struct task_struct);
+-
+-      /*
+-       * Subtract off the static size of the register state.
+-       * It potentially has a bunch of padding.
+-       */
+-      task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
+-
+-      /*
+-       * Add back the dynamically-calculated register state
+-       * size.
+-       */
+-      task_size += fpu_kernel_xstate_size;
+-
+-      /*
+-       * We dynamically size 'struct fpu', so we require that
+-       * it be at the end of 'thread_struct' and that
+-       * 'thread_struct' be at the end of 'task_struct'.  If
+-       * you hit a compile error here, check the structure to
+-       * see if something got added to the end.
+-       */
+-      CHECK_MEMBER_AT_END_OF(struct fpu, state);
+-      CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
+-      CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
+-
+-      arch_task_struct_size = task_size;
+-}
++union fpregs_state init_fpregs_state;
+ /*
+  * Set up the user and kernel xstate sizes based on the legacy FPU context size.
+@@ -387,7 +343,6 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
+       fpu__init_system_generic();
+       fpu__init_system_xstate_size_legacy();
+       fpu__init_system_xstate();
+-      fpu__init_task_struct_size();
+       fpu__init_system_ctx_switch();
+ }
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index c114b13..0b0d959 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -41,7 +41,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
+       fpstate_sanitize_xstate(fpu);
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+-                                 &fpu->state.fxsave, 0, -1);
++                                 &fpu->state->fxsave, 0, -1);
+ }
+ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
+@@ -58,19 +58,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
+       fpstate_sanitize_xstate(fpu);
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-                               &fpu->state.fxsave, 0, -1);
++                               &fpu->state->fxsave, 0, -1);
+       /*
+        * mxcsr reserved bits must be masked to zero for security reasons.
+        */
+-      fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
++      fpu->state->fxsave.mxcsr &= mxcsr_feature_mask;
+       /*
+        * update the header bits in the xsave header, indicating the
+        * presence of FP and SSE state.
+        */
+       if (boot_cpu_has(X86_FEATURE_XSAVE))
+-              fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
++              fpu->state->xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
+       return ret;
+ }
+@@ -86,7 +86,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
+       if (!boot_cpu_has(X86_FEATURE_XSAVE))
+               return -ENODEV;
+-      xsave = &fpu->state.xsave;
++      xsave = &fpu->state->xsave;
+       fpu__activate_fpstate_read(fpu);
+@@ -126,7 +126,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+       if ((pos != 0) || (count < fpu_user_xstate_size))
+               return -EFAULT;
+-      xsave = &fpu->state.xsave;
++      xsave = &fpu->state->xsave;
+       fpu__activate_fpstate_write(fpu);
+@@ -139,7 +139,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+        * In case of failure, mark all states as init:
+        */
+       if (ret)
+-              fpstate_init(&fpu->state);
++              fpstate_init(fpu->state);
+       /*
+        * mxcsr reserved bits must be masked to zero for security reasons.
+@@ -229,7 +229,7 @@ static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
+ void
+ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+ {
+-      struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
++      struct fxregs_state *fxsave = &tsk->thread.fpu.state->fxsave;
+       struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
+       struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
+       int i;
+@@ -267,7 +267,7 @@ void convert_to_fxsr(struct task_struct *tsk,
+                    const struct user_i387_ia32_struct *env)
+ {
+-      struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
++      struct fxregs_state *fxsave = &tsk->thread.fpu.state->fxsave;
+       struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
+       struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
+       int i;
+@@ -305,7 +305,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
+       if (!boot_cpu_has(X86_FEATURE_FXSR))
+               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+-                                         &fpu->state.fsave, 0,
++                                         &fpu->state->fsave, 0,
+                                          -1);
+       fpstate_sanitize_xstate(fpu);
+@@ -336,7 +336,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
+       if (!boot_cpu_has(X86_FEATURE_FXSR))
+               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-                                        &fpu->state.fsave, 0,
++                                        &fpu->state->fsave, 0,
+                                         -1);
+       if (pos > 0 || count < sizeof(env))
+@@ -351,7 +351,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
+        * presence of FP.
+        */
+       if (boot_cpu_has(X86_FEATURE_XSAVE))
+-              fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
++              fpu->state->xsave.header.xfeatures |= XFEATURE_MASK_FP;
+       return ret;
+ }
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index a184c21..a1731b7 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -56,7 +56,7 @@ static inline int check_for_xstate(struct fxregs_state __user *buf,
+ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
+ {
+       if (use_fxsr()) {
+-              struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
++              struct xregs_state *xsave = &tsk->thread.fpu.state->xsave;
+               struct user_i387_ia32_struct env;
+               struct _fpstate_32 __user *fp = buf;
+@@ -85,19 +85,19 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
+       /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
+       sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
+-      err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
++      err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
+       if (!use_xsave())
+               return err;
+       err |= __put_user(FP_XSTATE_MAGIC2,
+-                        (__u32 *)(buf + fpu_user_xstate_size));
++                        (__u32 __user *)(buf + fpu_user_xstate_size));
+       /*
+        * Read the xfeatures which we copied (directly from the cpu or
+        * from the state in task struct) to the user buffers.
+        */
+-      err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
++      err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
+       /*
+        * For legacy compatible, we always set FP/SSE bits in the bit
+@@ -112,7 +112,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
+        */
+       xfeatures |= XFEATURE_MASK_FPSSE;
+-      err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
++      err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
+       return err;
+ }
+@@ -121,6 +121,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
+ {
+       int err;
++      buf = (struct xregs_state __user *)____m(buf);
+       if (use_xsave())
+               err = copy_xregs_to_user(buf);
+       else if (use_fxsr())
+@@ -155,7 +156,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
+  */
+ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
+ {
+-      struct xregs_state *xsave = &current->thread.fpu.state.xsave;
++      struct xregs_state *xsave = &current->thread.fpu.state->xsave;
+       struct task_struct *tsk = current;
+       int ia32_fxstate = (buf != buf_fx);
+@@ -209,7 +210,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
+                        struct user_i387_ia32_struct *ia32_env,
+                        u64 xfeatures, int fx_only)
+ {
+-      struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
++      struct xregs_state *xsave = &tsk->thread.fpu.state->xsave;
+       struct xstate_header *header = &xsave->header;
+       if (use_xsave()) {
+@@ -242,6 +243,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
+  */
+ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
+ {
++      buf = (void __user *)____m(buf);
+       if (use_xsave()) {
+               if ((unsigned long)buf % 64 || fx_only) {
+                       u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+@@ -325,14 +327,14 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+               if (using_compacted_format()) {
+                       err = copyin_to_xsaves(NULL, buf_fx,
+-                                             &fpu->state.xsave);
++                                             &fpu->state->xsave);
+               } else {
+-                      err = __copy_from_user(&fpu->state.xsave,
++                      err = __copy_from_user(&fpu->state->xsave,
+                                              buf_fx, state_size);
+               }
+               if (err || __copy_from_user(&env, buf, sizeof(env))) {
+-                      fpstate_init(&fpu->state);
++                      fpstate_init(fpu->state);
+                       trace_x86_fpu_init_state(fpu);
+                       err = -1;
+               } else {
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 01567aa..4583b36 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -151,14 +151,14 @@ static int xfeature_is_user(int xfeature_nr)
+  */
+ void fpstate_sanitize_xstate(struct fpu *fpu)
+ {
+-      struct fxregs_state *fx = &fpu->state.fxsave;
++      struct fxregs_state *fx = &fpu->state->fxsave;
+       int feature_bit;
+       u64 xfeatures;
+       if (!use_xsaveopt())
+               return;
+-      xfeatures = fpu->state.xsave.header.xfeatures;
++      xfeatures = fpu->state->xsave.header.xfeatures;
+       /*
+        * None of the feature bits are in init state. So nothing else
+@@ -863,7 +863,7 @@ const void *get_xsave_field_ptr(int xsave_state)
+        */
+       fpu__save(fpu);
+-      return get_xsave_addr(&fpu->state.xsave, xsave_state);
++      return get_xsave_addr(&fpu->state->xsave, xsave_state);
+ }
+ #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index d036cfb..cb4c991 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
+        * kernel identity mapping to modify code.
+        */
+       if (within(ip, (unsigned long)_text, (unsigned long)_etext))
+-              ip = (unsigned long)__va(__pa_symbol(ip));
++              ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
+       return ip;
+ }
+@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
+ {
+       unsigned char replaced[MCOUNT_INSN_SIZE];
++      ip = ktla_ktva(ip);
++
+       ftrace_expected = old_code;
+       /*
+@@ -233,7 +235,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
+       unsigned char old[MCOUNT_INSN_SIZE];
+       int ret;
+-      memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
++      memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
+       ftrace_update_func = ip;
+       /* Make sure the breakpoints see the ftrace_update_func update */
+@@ -314,7 +316,7 @@ static int add_break(unsigned long ip, const char *old)
+       unsigned char replaced[MCOUNT_INSN_SIZE];
+       unsigned char brk = BREAKPOINT_INSTRUCTION;
+-      if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
++      if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
+               return -EFAULT;
+       ftrace_expected = old;
+@@ -681,11 +683,11 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+ /* Module allocation simplifies allocating memory for code */
+ static inline void *alloc_tramp(unsigned long size)
+ {
+-      return module_alloc(size);
++      return module_alloc_exec(size);
+ }
+ static inline void tramp_free(void *tramp)
+ {
+-      module_memfree(tramp);
++      module_memfree_exec(tramp);
+ }
+ #else
+ /* Trampolines can only be created if modules are supported */
+@@ -763,7 +765,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+       *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
+       /* Copy ftrace_caller onto the trampoline memory */
++      pax_open_kernel();
+       ret = probe_kernel_read(trampoline, (void *)start_offset, size);
++      pax_close_kernel();
+       if (WARN_ON(ret < 0)) {
+               tramp_free(trampoline);
+               return 0;
+@@ -773,6 +777,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+       /* The trampoline ends with a jmp to ftrace_epilogue */
+       jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
++      pax_open_kernel();
+       memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
+       /*
+@@ -785,6 +790,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+       ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
+       *ptr = (unsigned long)ops;
++      pax_close_kernel();
+       op_offset -= start_offset;
+       memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
+@@ -802,7 +808,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+       op_ptr.offset = offset;
+       /* put in the new offset to the ftrace_ops */
++      pax_open_kernel();
+       memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
++      pax_close_kernel();
+       /* ALLOC_TRAMP flags lets us know we created it */
+       ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 54a2372..46504a4 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -62,12 +62,12 @@ again:
+       pgd = *pgd_p;
+       /*
+-       * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
+-       * critical -- __PAGE_OFFSET would point us back into the dynamic
++       * The use of __early_va rather than __va here is critical:
++       * __va would point us back into the dynamic
+        * range and we might end up looping forever...
+        */
+       if (pgd)
+-              pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
++              pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
+       else {
+               if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
+                       reset_early_page_tables();
+@@ -76,13 +76,13 @@ again:
+               pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
+               memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+-              *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
++              *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
+       }
+       pud_p += pud_index(address);
+       pud = *pud_p;
+       if (pud)
+-              pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
++              pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
+       else {
+               if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
+                       reset_early_page_tables();
+@@ -91,7 +91,7 @@ again:
+               pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
+               memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
+-              *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
++              *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
+       }
+       pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+       pmd_p[pmd_index(address)] = pmd;
+@@ -155,8 +155,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
+       clear_bss();
+-      clear_page(init_level4_pgt);
+-
+       kasan_early_init();
+       for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 6f8902b..5d42150 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -27,6 +27,12 @@
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /*
+  * References to members of the new_cpu_data structure.
+  */
+@@ -56,11 +62,7 @@
+  * and small than max_low_pfn, otherwise will waste some page table entries
+  */
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+ /*
+  * Number of possible pages in the lowmem region.
+@@ -86,6 +88,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+  * 32-bit kernel entrypoint; only used by the boot CPU.  On entry,
+  * %esi points to the real-mode code as a 32-bit pointer.
+  * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -93,6 +101,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+  * can.
+  */
+ __HEAD
++
++#ifdef CONFIG_PAX_KERNEXEC
++      jmp startup_32
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill PAGE_SIZE-5,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+       movl pa(stack_start),%ecx
+       
+@@ -114,6 +129,66 @@ ENTRY(startup_32)
+ 2:
+       leal -__PAGE_OFFSET(%ecx),%esp
++#ifdef CONFIG_SMP
++      movl $pa(cpu_gdt_table),%edi
++      movl $__per_cpu_load,%eax
++      movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
++      rorl $16,%eax
++      movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
++      movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
++      movl $__per_cpu_end - 1,%eax
++      subl $__per_cpu_start,%eax
++      cmpl $0x100000,%eax
++      jb 1f
++      shrl $PAGE_SHIFT,%eax
++      orb $0x80,GDT_ENTRY_PERCPU * 8 + 6(%edi)
++1:
++      movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
++      shrl $16,%eax
++      orb %al,GDT_ENTRY_PERCPU * 8 + 6(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      movl $NR_CPUS,%ecx
++      movl $pa(cpu_gdt_table),%edi
++1:
++      movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++      movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
++      movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
++      addl $PAGE_SIZE_asm,%edi
++      loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++      movl $pa(boot_gdt),%edi
++      movl $__LOAD_PHYSICAL_ADDR,%eax
++      movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
++      rorl $16,%eax
++      movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
++      movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
++      rorl $16,%eax
++
++      ljmp $(__BOOT_CS),$1f
++1:
++
++      movl $NR_CPUS,%ecx
++      movl $pa(cpu_gdt_table),%edi
++      addl $__PAGE_OFFSET,%eax
++1:
++      movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
++      movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
++      movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
++      movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
++      rorl $16,%eax
++      movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
++      movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
++      movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
++      movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
++      rorl $16,%eax
++      addl $PAGE_SIZE_asm,%edi
++      loop 1b
++#endif
++
+ /*
+  * Clear BSS first so that there are no surprises...
+  */
+@@ -209,8 +284,11 @@ ENTRY(startup_32)
+       movl %eax, pa(max_pfn_mapped)
+       /* Do early initialization of the fixmap area */
+-      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+-      movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#else
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -240,8 +318,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+       movl %eax, pa(max_pfn_mapped)
+       /* Do early initialization of the fixmap area */
+-      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+-      movl %eax,pa(initial_page_table+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
++#else
++      movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
++#endif
+ #endif
+ #ifdef CONFIG_PARAVIRT
+@@ -255,9 +336,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+       cmpl $num_subarch_entries, %eax
+       jae bad_subarch
+-      movl pa(subarch_entries)(,%eax,4), %eax
+-      subl $__PAGE_OFFSET, %eax
+-      jmp *%eax
++      jmp *pa(subarch_entries)(,%eax,4)
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -269,10 +348,10 @@ WEAK(xen_entry)
+       __INITDATA
+ subarch_entries:
+-      .long default_entry             /* normal x86/PC */
+-      .long lguest_entry              /* lguest hypervisor */
+-      .long xen_entry                 /* Xen hypervisor */
+-      .long default_entry             /* Moorestown MID */
++      .long ta(default_entry)         /* normal x86/PC */
++      .long ta(lguest_entry)          /* lguest hypervisor */
++      .long ta(xen_entry)             /* Xen hypervisor */
++      .long ta(default_entry)         /* Moorestown MID */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #else
+@@ -361,6 +440,7 @@ default_entry:
+       movl pa(mmu_cr4_features),%eax
+       movl %eax,%cr4
++#ifdef CONFIG_X86_PAE
+       testb $X86_CR4_PAE, %al         # check if PAE is enabled
+       jz enable_paging
+@@ -389,6 +469,9 @@ default_entry:
+       /* Make changes effective */
+       wrmsr
++      btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++#endif
++
+ enable_paging:
+ /*
+@@ -456,14 +539,20 @@ is486:
+ 1:    movl $(__KERNEL_DS),%eax        # reload all the segment registers
+       movl %eax,%ss                   # after changing gdt.
+-      movl $(__USER_DS),%eax          # DS/ES contains default USER segment
++#     movl $(__KERNEL_DS),%eax        # DS/ES contains default KERNEL segment
+       movl %eax,%ds
+       movl %eax,%es
+       movl $(__KERNEL_PERCPU), %eax
+       movl %eax,%fs                   # set this cpu's percpu
++#ifdef CONFIG_CC_STACKPROTECTOR
+       movl $(__KERNEL_STACK_CANARY),%eax
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++      movl $(__USER_DS),%eax
++#else
++      xorl %eax,%eax
++#endif
+       movl %eax,%gs
+       xorl %eax,%eax                  # Clear LDT
+@@ -520,8 +609,11 @@ setup_once:
+        * relocation.  Manually set base address in stack canary
+        * segment descriptor.
+        */
+-      movl $gdt_page,%eax
++      movl $cpu_gdt_table,%eax
+       movl $stack_canary,%ecx
++#ifdef CONFIG_SMP
++      addl $__per_cpu_load,%ecx
++#endif
+       movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+       shrl $16, %ecx
+       movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+@@ -608,8 +700,11 @@ ENDPROC(early_idt_handler_common)
+ /* This is the default interrupt "handler" :-) */
+       ALIGN
+ ignore_int:
+-      cld
+ #ifdef CONFIG_PRINTK
++      cmpl $2,%ss:early_recursion_flag
++      je hlt_loop
++      incl %ss:early_recursion_flag
++      cld
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+@@ -618,9 +713,6 @@ ignore_int:
+       movl $(__KERNEL_DS),%eax
+       movl %eax,%ds
+       movl %eax,%es
+-      cmpl $2,early_recursion_flag
+-      je hlt_loop
+-      incl early_recursion_flag
+       pushl 16(%esp)
+       pushl 24(%esp)
+       pushl 32(%esp)
+@@ -655,11 +747,8 @@ ENTRY(initial_code)
+ ENTRY(setup_once_ref)
+       .long setup_once
+-/*
+- * BSS section
+- */
+-__PAGE_ALIGNED_BSS
+-      .align PAGE_SIZE
++__READ_ONLY
++      .balign PAGE_SIZE
+ #ifdef CONFIG_X86_PAE
+ initial_pg_pmd:
+       .fill 1024*KPMDS,4,0
+@@ -672,15 +761,18 @@ initial_pg_fixmap:
+ ENTRY(empty_zero_page)
+       .fill 4096,1,0
+ ENTRY(swapper_pg_dir)
+-      .fill 1024,4,0
++#ifdef CONFIG_X86_PAE
++      .fill PTRS_PER_PGD,8,0
++#else
++      .fill PTRS_PER_PGD,4,0
++#endif
+ /*
+  * This starts the data section.
+  */
+ #ifdef CONFIG_X86_PAE
+-__PAGE_ALIGNED_DATA
+-      /* Page-aligned for the benefit of paravirt? */
+-      .align PAGE_SIZE
++__READ_ONLY
++      .balign PAGE_SIZE
+ ENTRY(initial_page_table)
+       .long   pa(initial_pg_pmd+PGD_IDENT_ATTR),0     /* low identity map */
+ # if KPMDS == 3
+@@ -698,13 +790,21 @@ ENTRY(initial_page_table)
+ # else
+ #  error "Kernel PMDs should be 1, 2 or 3"
+ # endif
+-      .align PAGE_SIZE                /* needs to be page-sized too */
++      .balign PAGE_SIZE               /* needs to be page-sized too */
++
++# ifdef CONFIG_PAX_PER_CPU_PGD
++ENTRY(cpu_pgd)
++      .rept 2*NR_CPUS
++      .fill   PTRS_PER_PGD,8,0
++      .endr
++# endif
++
+ #endif
+ .data
+ .balign 4
+ ENTRY(stack_start)
+-      .long init_thread_union+THREAD_SIZE
++      .long init_thread_union+THREAD_SIZE-8
+ __INITRODATA
+ int_msg:
+@@ -719,7 +819,7 @@ int_msg:
+  * segment size, and 32-bit linear address value:
+  */
+-      .data
++__READ_ONLY
+ .globl boot_gdt_descr
+ .globl idt_descr
+@@ -728,7 +828,7 @@ int_msg:
+       .word 0                         # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+       .word __BOOT_DS+7
+-      .long boot_gdt - __PAGE_OFFSET
++      .long pa(boot_gdt)
+       .word 0                         # 32-bit align idt_desc.address
+ idt_descr:
+@@ -739,7 +839,7 @@ idt_descr:
+       .word 0                         # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+       .word GDT_ENTRIES*8-1
+-      .long gdt_page                  /* Overwritten for secondary CPUs */
++      .long cpu_gdt_table             /* Overwritten for secondary CPUs */
+ /*
+  * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -748,5 +848,65 @@ ENTRY(early_gdt_descr)
+       .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+       .fill GDT_ENTRY_BOOT_CS,8,0
+-      .quad 0x00cf9a000000ffff        /* kernel 4GB code at 0x00000000 */
+-      .quad 0x00cf92000000ffff        /* kernel 4GB data at 0x00000000 */
++      .quad 0x00cf9b000000ffff        /* kernel 4GB code at 0x00000000 */
++      .quad 0x00cf93000000ffff        /* kernel 4GB data at 0x00000000 */
++
++      .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++      .rept NR_CPUS
++      .quad 0x0000000000000000        /* NULL descriptor */
++      .quad 0x0000000000000000        /* 0x0b reserved */
++      .quad 0x0000000000000000        /* 0x13 reserved */
++      .quad 0x0000000000000000        /* 0x1b reserved */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      .quad 0x00cf9b000000ffff        /* 0x20 alternate kernel 4GB code at 0x00000000 */
++#else
++      .quad 0x0000000000000000        /* 0x20 unused */
++#endif
++
++      .quad 0x0000000000000000        /* 0x28 unused */
++      .quad 0x0000000000000000        /* 0x33 TLS entry 1 */
++      .quad 0x0000000000000000        /* 0x3b TLS entry 2 */
++      .quad 0x0000000000000000        /* 0x43 TLS entry 3 */
++      .quad 0x0000000000000000        /* 0x4b reserved */
++      .quad 0x0000000000000000        /* 0x53 reserved */
++      .quad 0x0000000000000000        /* 0x5b reserved */
++
++      .quad 0x00cf9b000000ffff        /* 0x60 kernel 4GB code at 0x00000000 */
++      .quad 0x00cf93000000ffff        /* 0x68 kernel 4GB data at 0x00000000 */
++      .quad 0x00cffb000000ffff        /* 0x73 user 4GB code at 0x00000000 */
++      .quad 0x00cff3000000ffff        /* 0x7b user 4GB data at 0x00000000 */
++
++      .quad 0x0000000000000000        /* 0x80 TSS descriptor */
++      .quad 0x0000000000000000        /* 0x88 LDT descriptor */
++
++      /*
++       * Segments used for calling PnP BIOS have byte granularity.
++       * The code segments and data segments have fixed 64k limits,
++       * the transfer segment sizes are set at run time.
++       */
++      .quad 0x00409b000000ffff        /* 0x90 32-bit code */
++      .quad 0x00009b000000ffff        /* 0x98 16-bit code */
++      .quad 0x000093000000ffff        /* 0xa0 16-bit data */
++      .quad 0x0000930000000000        /* 0xa8 16-bit data */
++      .quad 0x0000930000000000        /* 0xb0 16-bit data */
++
++      /*
++       * The APM segments have byte granularity and their bases
++       * are set at run time.  All have 64k limits.
++       */
++      .quad 0x00409b000000ffff        /* 0xb8 APM CS    code */
++      .quad 0x00009b000000ffff        /* 0xc0 APM CS 16 code (16 bit) */
++      .quad 0x004093000000ffff        /* 0xc8 APM DS    data */
++
++      .quad 0x00c093000000ffff        /* 0xd0 - ESPFIX SS */
++      .quad 0x0040930000000000        /* 0xd8 - PERCPU */
++      .quad 0x0040910000000017        /* 0xe0 - STACK_CANARY */
++      .quad 0x0000000000000000        /* 0xe8 - PCIBIOS_CS */
++      .quad 0x0000000000000000        /* 0xf0 - PCIBIOS_DS */
++      .quad 0x0000000000000000        /* 0xf8 - GDT entry 31: double-fault TSS */
++
++      /* Be sure this is zeroed to avoid false validations in Xen */
++      .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++      .endr
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index 9f8efc9..e1942f9 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -20,6 +20,8 @@
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
+ #include <asm/nops.h>
++#include <asm/cpufeatures.h>
++#include <asm/alternative-asm.h>
+ #include "../entry/calling.h"
+ #ifdef CONFIG_PARAVIRT
+@@ -41,6 +43,12 @@
+ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMALLOC_END = pgd_index(VMALLOC_END)
++L3_VMALLOC_END = pud_index(VMALLOC_END)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+       .text
+       __HEAD
+@@ -98,11 +106,36 @@ startup_64:
+        * Fixup the physical addresses in the page table
+        */
+       addq    %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMALLOC_START*8) + 8(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMALLOC_START*8) + 16(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMALLOC_START*8) + 24(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
++      addq    %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+-      addq    %rbp, level3_kernel_pgt + (510*8)(%rip)
+-      addq    %rbp, level3_kernel_pgt + (511*8)(%rip)
++      addq    %rbp, level3_ident_pgt + (0*8)(%rip)
++#ifndef CONFIG_XEN
++      addq    %rbp, level3_ident_pgt + (1*8)(%rip)
++#endif
++      addq    %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
++
++      addq    %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++      addq    %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
++
++      addq    %rbp, level2_ident_pgt + (0*8)(%rip)
++
++      addq    %rbp, level2_fixmap_pgt + (0*8)(%rip)
++      addq    %rbp, level2_fixmap_pgt + (1*8)(%rip)
++      addq    %rbp, level2_fixmap_pgt + (2*8)(%rip)
++      addq    %rbp, level2_fixmap_pgt + (3*8)(%rip)
++
++      addq    %rbp, level2_fixmap_pgt + (504*8)(%rip)
++      addq    %rbp, level2_fixmap_pgt + (505*8)(%rip)
+       addq    %rbp, level2_fixmap_pgt + (506*8)(%rip)
++      addq    %rbp, level2_fixmap_pgt + (507*8)(%rip)
+       /*
+        * Set up the identity mapping for the switchover.  These
+@@ -186,11 +219,12 @@ ENTRY(secondary_startup_64)
+       /* Sanitize CPU configuration */
+       call verify_cpu
++      orq     $-1, %rbp
+       movq    $(init_level4_pgt - __START_KERNEL_map), %rax
+ 1:
+-      /* Enable PAE mode and PGE */
+-      movl    $(X86_CR4_PAE | X86_CR4_PGE), %ecx
++      /* Enable PAE mode and PSE/PGE */
++      movl    $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
+       movq    %rcx, %cr4
+       /* Setup early boot stage 4 level pagetables. */
+@@ -211,10 +245,24 @@ ENTRY(secondary_startup_64)
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btsl    $_EFER_SCE, %eax        /* Enable System Call */
+-      btl     $20,%edi                /* No Execute supported? */
++      btl     $(X86_FEATURE_NX & 31),%edi     /* No Execute supported? */
+       jnc     1f
+       btsl    $_EFER_NX, %eax
++      cmpq    $-1, %rbp
++      je      1f
+       btsq    $_PAGE_BIT_NX,early_pmd_flags(%rip)
++      btsq    $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
++      btsq    $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START)(%rip)
++      btsq    $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 8(%rip)
++      btsq    $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 16(%rip)
++      btsq    $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 24(%rip)
++      btsq    $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
++      btsq    $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
++      btsq    $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
++      btsq    $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
++      btsq    $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
++      btsq    $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
++      btsq    $_PAGE_BIT_NX, __supported_pte_mask(%rip)
+ 1:    wrmsr                           /* Make changes effective */
+       /* Setup cr0 */
+@@ -294,6 +342,7 @@ ENTRY(secondary_startup_64)
+        *      REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
+        *              address given in m16:64.
+        */
++      pax_set_fptr_mask
+       movq    initial_code(%rip),%rax
+       pushq   $0              # fake return address to stop unwinder
+       pushq   $__KERNEL_CS    # set correct cs
+@@ -328,7 +377,7 @@ ENDPROC(start_cpu0)
+       .quad   INIT_PER_CPU_VAR(irq_stack_union)
+       GLOBAL(stack_start)
+-      .quad  init_thread_union+THREAD_SIZE-8
++      .quad  init_thread_union+THREAD_SIZE-16
+       .word  0
+       __FINITDATA
+@@ -417,40 +466,70 @@ GLOBAL(name)
+       __INITDATA
+ NEXT_PAGE(early_level4_pgt)
+       .fill   511,8,0
+-      .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
++      .quad   level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ NEXT_PAGE(early_dynamic_pgts)
+       .fill   512*EARLY_DYNAMIC_PAGE_TABLES,8,0
+-      .data
++      __READ_ONLY
+-#ifndef CONFIG_XEN
+ NEXT_PAGE(init_level4_pgt)
+-      .fill   512,8,0
+-#else
+-NEXT_PAGE(init_level4_pgt)
+-      .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
+       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++      .org    init_level4_pgt + L4_VMALLOC_START*8, 0
++      .quad   level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*0 + _KERNPG_TABLE
++      .quad   level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*1 + _KERNPG_TABLE
++      .quad   level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*2 + _KERNPG_TABLE
++      .quad   level3_vmalloc_start_pgt - __START_KERNEL_map + PAGE_SIZE*3 + _KERNPG_TABLE
++      .org    init_level4_pgt + L4_VMALLOC_END*8, 0
++      .quad   level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
++      .org    init_level4_pgt + L4_VMEMMAP_START*8, 0
++      .quad   level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .org    init_level4_pgt + L4_START_KERNEL*8, 0
+       /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+-      .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
++      .quad   level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++NEXT_PAGE(cpu_pgd)
++      .rept 2*NR_CPUS
++      .fill   512,8,0
++      .endr
++#endif
+ NEXT_PAGE(level3_ident_pgt)
+       .quad   level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++#ifdef CONFIG_XEN
+       .fill   511, 8, 0
++#else
++      .quad   level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++      .fill   510,8,0
++#endif
++
++NEXT_PAGE(level3_vmalloc_start_pgt)
++      .fill   4*512,8,0
++
++NEXT_PAGE(level3_vmalloc_end_pgt)
++      .fill   512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++      .fill   L3_VMEMMAP_START,8,0
++      .quad   level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
++
+ NEXT_PAGE(level2_ident_pgt)
+-      /* Since I easily can, map the first 1G.
++      .quad   level1_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++      /* Since I easily can, map the first 2G.
+        * Don't set NX because code runs from these pages.
+        */
+-      PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+-#endif
++      PMDS(PMD_SIZE, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD - 1)
+ NEXT_PAGE(level3_kernel_pgt)
+       .fill   L3_START_KERNEL,8,0
+       /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
+       .quad   level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+-      .quad   level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
++      .quad   level2_fixmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
++
++NEXT_PAGE(level2_vmemmap_pgt)
++      .fill   512,8,0
+ NEXT_PAGE(level2_kernel_pgt)
+       /*
+@@ -467,31 +546,79 @@ NEXT_PAGE(level2_kernel_pgt)
+               KERNEL_IMAGE_SIZE/PMD_SIZE)
+ NEXT_PAGE(level2_fixmap_pgt)
+-      .fill   506,8,0
+-      .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+-      /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+-      .fill   5,8,0
++      .quad   level1_modules_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _KERNPG_TABLE
++      .quad   level1_modules_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _KERNPG_TABLE
++      .quad   level1_modules_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _KERNPG_TABLE
++      .quad   level1_modules_pgt - __START_KERNEL_map + 3 * PAGE_SIZE + _KERNPG_TABLE
++      .fill   500,8,0
++      .quad   level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _KERNPG_TABLE
++      .quad   level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _KERNPG_TABLE
++      .quad   level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _KERNPG_TABLE
++      .quad   level1_vsyscall_pgt - __START_KERNEL_map + _KERNPG_TABLE
++      /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++      .fill   4,8,0
++
++NEXT_PAGE(level1_ident_pgt)
++      .fill   512,8,0
++
++NEXT_PAGE(level1_modules_pgt)
++      .fill   4*512,8,0
+ NEXT_PAGE(level1_fixmap_pgt)
++      .fill   3*512,8,0
++
++NEXT_PAGE(level1_vsyscall_pgt)
+       .fill   512,8,0
+ #undef PMDS
+-      .data
++      .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++      .rept NR_CPUS
++      .quad   0x0000000000000000      /* NULL descriptor */
++      .quad   0x00cf9b000000ffff      /* __KERNEL32_CS */
++      .quad   0x00af9b000000ffff      /* __KERNEL_CS */
++      .quad   0x00cf93000000ffff      /* __KERNEL_DS */
++      .quad   0x00cffb000000ffff      /* __USER32_CS */
++      .quad   0x00cff3000000ffff      /* __USER_DS, __USER32_DS  */
++      .quad   0x00affb000000ffff      /* __USER_CS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++      .quad   0x00af9b000000ffff      /* __KERNEXEC_KERNEL_CS */
++#else
++      .quad   0x0                     /* unused */
++#endif
++
++      .quad   0,0                     /* TSS */
++      .quad   0,0                     /* LDT */
++      .quad   0,0,0                   /* three TLS descriptors */
++      .quad   0x0000f40000000000      /* node/CPU stored in limit */
++      /* asm/segment.h:GDT_ENTRIES must match this */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      .quad   0x00cf93000000ffff      /* __UDEREF_KERNEL_DS */
++#else
++      .quad   0x0                     /* unused */
++#endif
++
++      /* zero the remaining page */
++      .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++      .endr
++
+       .align 16
+       .globl early_gdt_descr
+ early_gdt_descr:
+       .word   GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+-      .quad   INIT_PER_CPU_VAR(gdt_page)
++      .quad   cpu_gdt_table
+ ENTRY(phys_base)
+       /* This must match the first entry in level2_kernel_pgt */
+       .quad   0x0000000000000000
+ #include "../../x86/xen/xen-head.S"
+-      
+-      __PAGE_ALIGNED_BSS
++
++      .section .rodata,"a",@progbits
+ NEXT_PAGE(empty_zero_page)
+       .skip PAGE_SIZE
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index c6dfd80..5df5ed1 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -136,7 +136,7 @@ int is_hpet_enabled(void)
+ }
+ EXPORT_SYMBOL_GPL(is_hpet_enabled);
+-static void _hpet_print_config(const char *function, int line)
++static void __nocapture(1) _hpet_print_config(const char *function, int line)
+ {
+       u32 i, timers, l, h;
+       printk(KERN_INFO "hpet: %s(%d):\n", function, line);
+diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
+index 1f9b878..895e3ed 100644
+--- a/arch/x86/kernel/i386_ksyms_32.c
++++ b/arch/x86/kernel/i386_ksyms_32.c
+@@ -21,8 +21,12 @@ extern void cmpxchg8b_emu(void);
+ EXPORT_SYMBOL(cmpxchg8b_emu);
+ #endif
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -45,3 +49,11 @@ EXPORT_SYMBOL(___preempt_schedule_notrace);
+ #endif
+ EXPORT_SYMBOL(__sw_hweight32);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index be22f5a..a04fa14 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
+ static void make_8259A_irq(unsigned int irq)
+ {
+       disable_irq_nosync(irq);
+-      io_apic_irqs &= ~(1<<irq);
++      io_apic_irqs &= ~(1UL<<irq);
+       irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+       enable_irq(irq);
+ }
+@@ -208,7 +208,7 @@ spurious_8259A_irq:
+                              "spurious 8259A interrupt: IRQ%d.\n", irq);
+                       spurious_irq_mask |= irqmask;
+               }
+-              atomic_inc(&irq_err_count);
++              atomic_inc_unchecked(&irq_err_count);
+               /*
+                * Theoretically we do not have to handle this IRQ,
+                * but in Linux this does not cause problems and is
+@@ -356,14 +356,16 @@ static void init_8259A(int auto_eoi)
+       /* (slave's support for AEOI in flat mode is to be investigated) */
+       outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
++      pax_open_kernel();
+       if (auto_eoi)
+               /*
+                * In AEOI mode we just have to mask the interrupt
+                * when acking.
+                */
+-              i8259A_chip.irq_mask_ack = disable_8259A_irq;
++              const_cast(i8259A_chip.irq_mask_ack) = disable_8259A_irq;
+       else
+-              i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
++              const_cast(i8259A_chip.irq_mask_ack) = mask_and_ack_8259A;
++      pax_close_kernel();
+       udelay(100);            /* wait for 8259A to initialize */
+diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
+index 50c89e8..e148d28 100644
+--- a/arch/x86/kernel/io_delay.c
++++ b/arch/x86/kernel/io_delay.c
+@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
+  * Quirk table for systems that misbehave (lock up, etc.) if port
+  * 0x80 is used:
+  */
+-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
++static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
+       {
+               .callback       = dmi_io_delay_0xed_port,
+               .ident          = "Compaq Presario V6000",
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 589b319..41d6575 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -6,6 +6,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+               return -EINVAL;
+       if (turn_on && !capable(CAP_SYS_RAWIO))
+               return -EPERM;
++#ifdef CONFIG_GRKERNSEC_IO
++      if (turn_on && grsec_disable_privio) {
++              gr_handle_ioperm();
++              return -ENODEV;
++      }
++#endif
+       /*
+        * If it's the first ioperm() call in this thread's lifetime, set the
+@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+        * because the ->io_bitmap_max value must match the bitmap
+        * contents:
+        */
+-      tss = &per_cpu(cpu_tss, get_cpu());
++      tss = cpu_tss + get_cpu();
+       if (turn_on)
+               bitmap_clear(t->io_bitmap_ptr, from, num);
+@@ -110,6 +117,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
+       if (level > old) {
+               if (!capable(CAP_SYS_RAWIO))
+                       return -EPERM;
++#ifdef CONFIG_GRKERNSEC_IO
++              if (grsec_disable_privio) {
++                      gr_handle_iopl();
++                      return -ENODEV;
++              }
++#endif
+       }
+       regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
+               (level << X86_EFLAGS_IOPL_BIT);
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 9f669fd..00354af 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -28,7 +28,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
+ DEFINE_PER_CPU(struct pt_regs *, irq_regs);
+ EXPORT_PER_CPU_SYMBOL(irq_regs);
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+ /* Function pointer for generic interrupt vector handling */
+ void (*x86_platform_ipi_callback)(void) = NULL;
+@@ -146,9 +146,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+               seq_puts(p, "  Hypervisor callback interrupts\n");
+       }
+ #endif
+-      seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++      seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ #if defined(CONFIG_X86_IO_APIC)
+-      seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
++      seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
+ #endif
+ #ifdef CONFIG_HAVE_KVM
+       seq_printf(p, "%*s: ", prec, "PIN");
+@@ -200,7 +200,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+ u64 arch_irq_stat(void)
+ {
+-      u64 sum = atomic_read(&irq_err_count);
++      u64 sum = atomic_read_unchecked(&irq_err_count);
+       return sum;
+ }
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 1f38d9a..0eb6e6f 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -22,6 +22,8 @@
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
++extern void gr_handle_kernel_exploit(void);
++
+ int sysctl_panic_on_stackoverflow __read_mostly;
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+@@ -32,13 +34,14 @@ static int check_stack_overflow(void)
+       __asm__ __volatile__("andl %%esp,%0" :
+                            "=r" (sp) : "0" (THREAD_SIZE - 1));
+-      return sp < (sizeof(struct thread_info) + STACK_WARN);
++      return sp < STACK_WARN;
+ }
+ static void print_stack_overflow(void)
+ {
+       printk(KERN_WARNING "low stack detected by irq handler\n");
+       dump_stack();
++      gr_handle_kernel_exploit();
+       if (sysctl_panic_on_stackoverflow)
+               panic("low stack detected by irq handler - check messages\n");
+ }
+@@ -69,10 +72,9 @@ static inline void *current_stack(void)
+ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+ {
+-      struct irq_stack *curstk, *irqstk;
++      struct irq_stack *irqstk;
+       u32 *isp, *prev_esp, arg1;
+-      curstk = (struct irq_stack *) current_stack();
+       irqstk = __this_cpu_read(hardirq_stack);
+       /*
+@@ -81,15 +83,19 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+        * handler) we can't do that and just have to keep using the
+        * current stack (which is the irq stack already after all)
+        */
+-      if (unlikely(curstk == irqstk))
++      if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
+               return 0;
+-      isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
++      isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
+       /* Save the next esp at the bottom of the stack */
+       prev_esp = (u32 *)irqstk;
+       *prev_esp = current_stack_pointer();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(MAKE_MM_SEG(0));
++#endif
++
+       if (unlikely(overflow))
+               call_on_stack(print_stack_overflow, isp);
+@@ -100,6 +106,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+                    :  "0" (desc),   "1" (isp),
+                       "D" (desc->handle_irq)
+                    : "memory", "cc", "ecx");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(current_thread_info()->addr_limit);
++#endif
++
+       return 1;
+ }
+@@ -108,23 +119,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+  */
+ void irq_ctx_init(int cpu)
+ {
+-      struct irq_stack *irqstk;
+-
+       if (per_cpu(hardirq_stack, cpu))
+               return;
+-      irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
+-                                             THREADINFO_GFP,
+-                                             THREAD_SIZE_ORDER));
+-      per_cpu(hardirq_stack, cpu) = irqstk;
+-
+-      irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
+-                                             THREADINFO_GFP,
+-                                             THREAD_SIZE_ORDER));
+-      per_cpu(softirq_stack, cpu) = irqstk;
+-
+-      printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+-             cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
++      per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
++      per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
+ }
+ void do_softirq_own_stack(void)
+@@ -141,7 +140,16 @@ void do_softirq_own_stack(void)
+       prev_esp = (u32 *)irqstk;
+       *prev_esp = current_stack_pointer();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(MAKE_MM_SEG(0));
++#endif
++
+       call_on_stack(__do_softirq, isp);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ }
+ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
+index 4a79037..0c1319e 100644
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -19,6 +19,8 @@
+ #include <asm/idle.h>
+ #include <asm/apic.h>
++extern void gr_handle_kernel_exploit(void);
++
+ int sysctl_panic_on_stackoverflow;
+ /*
+@@ -45,9 +47,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+           regs->sp <= curbase + THREAD_SIZE)
+               return;
+-      irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) +
+-                      STACK_TOP_MARGIN;
+       irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr);
++      irq_stack_top = irq_stack_bottom - IRQ_STACK_SIZE + 64 + STACK_TOP_MARGIN;
+       if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
+               return;
+@@ -62,6 +63,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+               irq_stack_top, irq_stack_bottom,
+               estack_top, estack_bottom);
++      gr_handle_kernel_exploit();
++
+       if (sysctl_panic_on_stackoverflow)
+               panic("low stack detected by irq handler - check messages\n");
+ #endif
+diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
+index fc25f69..d31d60c 100644
+--- a/arch/x86/kernel/jump_label.c
++++ b/arch/x86/kernel/jump_label.c
+@@ -32,6 +32,8 @@ static void bug_at(unsigned char *ip, int line)
+        * Something went wrong. Crash the box, as something could be
+        * corrupting the kernel.
+        */
++      ip = (unsigned char *)ktla_ktva((unsigned long)ip);
++      pr_warning("Unexpected op at %pS [%p] %s:%d\n", ip, ip, __FILE__, line);
+       pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
+              ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
+       BUG();
+@@ -52,7 +54,7 @@ static void __jump_label_transform(struct jump_entry *entry,
+                        * Jump label is enabled for the first time.
+                        * So we expect a default_nop...
+                        */
+-                      if (unlikely(memcmp((void *)entry->code, default_nop, 5)
++                      if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
+                                    != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               } else {
+@@ -60,7 +62,7 @@ static void __jump_label_transform(struct jump_entry *entry,
+                        * ...otherwise expect an ideal_nop. Otherwise
+                        * something went horribly wrong.
+                        */
+-                      if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
++                      if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
+                                    != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               }
+@@ -76,13 +78,13 @@ static void __jump_label_transform(struct jump_entry *entry,
+                * are converting the default nop to the ideal nop.
+                */
+               if (init) {
+-                      if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
++                      if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               } else {
+                       code.jump = 0xe9;
+                       code.offset = entry->target -
+                               (entry->code + JUMP_LABEL_NOP_SIZE);
+-                      if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
++                      if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               }
+               memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index 04cde52..8b2900b 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
+               bp->attr.bp_addr = breakinfo[breakno].addr;
+               bp->attr.bp_len = breakinfo[breakno].len;
+               bp->attr.bp_type = breakinfo[breakno].type;
+-              info->address = breakinfo[breakno].addr;
++              if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
++                      info->address = ktla_ktva(breakinfo[breakno].addr);
++              else
++                      info->address = breakinfo[breakno].addr;
+               info->len = breakinfo[breakno].len;
+               info->type = breakinfo[breakno].type;
+               val = arch_install_hw_breakpoint(bp);
+@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+       case 'k':
+               /* clear the trace bit */
+               linux_regs->flags &= ~X86_EFLAGS_TF;
+-              atomic_set(&kgdb_cpu_doing_single_step, -1);
++              atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
+               /* set the trace bit if we're stepping */
+               if (remcomInBuffer[0] == 's') {
+                       linux_regs->flags |= X86_EFLAGS_TF;
+-                      atomic_set(&kgdb_cpu_doing_single_step,
++                      atomic_set_unchecked(&kgdb_cpu_doing_single_step,
+                                  raw_smp_processor_id());
+               }
+@@ -551,7 +554,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+       switch (cmd) {
+       case DIE_DEBUG:
+-              if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
++              if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+                       if (user_mode(regs))
+                               return single_step_cont(regs, args);
+                       break;
+@@ -754,11 +757,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+       char opc[BREAK_INSTR_SIZE];
+       bpt->type = BP_BREAKPOINT;
+-      err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
++      err = probe_kernel_read(bpt->saved_instr, (const void *)ktla_ktva(bpt->bpt_addr),
+                               BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+-      err = probe_kernel_write((char *)bpt->bpt_addr,
++      err = probe_kernel_write((void *)ktla_ktva(bpt->bpt_addr),
+                                arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+       if (!err)
+               return err;
+@@ -770,7 +773,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+               return -EBUSY;
+       text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+                 BREAK_INSTR_SIZE);
+-      err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++      err = probe_kernel_read(opc, (const void *)ktla_ktva(bpt->bpt_addr), BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+       if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
+@@ -794,13 +797,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+       if (mutex_is_locked(&text_mutex))
+               goto knl_write;
+       text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
+-      err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++      err = probe_kernel_read(opc, (const void *)ktla_ktva(bpt->bpt_addr), BREAK_INSTR_SIZE);
+       if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
+               goto knl_write;
+       return err;
+ knl_write:
+-      return probe_kernel_write((char *)bpt->bpt_addr,
++      return probe_kernel_write((void *)ktla_ktva(bpt->bpt_addr),
+                                 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+ }
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 7847e5c..cec50fd 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -122,9 +122,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
+               s32 raddr;
+       } __packed *insn;
+-      insn = (struct __arch_relative_insn *)from;
++      insn = (struct __arch_relative_insn *)ktla_ktva((unsigned long)from);
++
++      pax_open_kernel();
+       insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+       insn->op = op;
++      pax_close_kernel();
+ }
+ /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+@@ -170,7 +173,7 @@ int can_boost(kprobe_opcode_t *opcodes)
+       kprobe_opcode_t opcode;
+       kprobe_opcode_t *orig_opcodes = opcodes;
+-      if (search_exception_tables((unsigned long)opcodes))
++      if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
+               return 0;       /* Page fault may occur on this address. */
+ retry:
+@@ -262,12 +265,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
+        * Fortunately, we know that the original code is the ideal 5-byte
+        * long NOP.
+        */
+-      memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++      memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       if (faddr)
+               memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
+       else
+               buf[0] = kp->opcode;
+-      return (unsigned long)buf;
++      return ktva_ktla((unsigned long)buf);
+ }
+ /*
+@@ -369,7 +372,9 @@ int __copy_instruction(u8 *dest, u8 *src)
+       /* Another subsystem puts a breakpoint, failed to recover */
+       if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+               return 0;
++      pax_open_kernel();
+       memcpy(dest, insn.kaddr, length);
++      pax_close_kernel();
+ #ifdef CONFIG_X86_64
+       if (insn_rip_relative(&insn)) {
+@@ -396,7 +401,9 @@ int __copy_instruction(u8 *dest, u8 *src)
+                       return 0;
+               }
+               disp = (u8 *) dest + insn_offset_displacement(&insn);
++              pax_open_kernel();
+               *(s32 *) disp = (s32) newdisp;
++              pax_close_kernel();
+       }
+ #endif
+       return length;
+@@ -538,7 +545,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+                * nor set current_kprobe, because it doesn't use single
+                * stepping.
+                */
+-              regs->ip = (unsigned long)p->ainsn.insn;
++              regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+               preempt_enable_no_resched();
+               return;
+       }
+@@ -555,9 +562,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+       regs->flags &= ~X86_EFLAGS_IF;
+       /* single step inline if the instruction is an int3 */
+       if (p->opcode == BREAKPOINT_INSTRUCTION)
+-              regs->ip = (unsigned long)p->addr;
++              regs->ip = ktla_ktva((unsigned long)p->addr);
+       else
+-              regs->ip = (unsigned long)p->ainsn.insn;
++              regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+ NOKPROBE_SYMBOL(setup_singlestep);
+@@ -642,7 +649,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
+                               setup_singlestep(p, regs, kcb, 0);
+                       return 1;
+               }
+-      } else if (*addr != BREAKPOINT_INSTRUCTION) {
++      } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+               /*
+                * The breakpoint instruction was removed right
+                * after we hit it.  Another cpu has removed
+@@ -688,6 +695,9 @@ asm(
+       "       movq %rax, 152(%rsp)\n"
+       RESTORE_REGS_STRING
+       "       popfq\n"
++#ifdef KERNEXEC_PLUGIN
++      "       btsq $63,(%rsp)\n"
++#endif
+ #else
+       "       pushf\n"
+       SAVE_REGS_STRING
+@@ -829,7 +839,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
+                            struct kprobe_ctlblk *kcb)
+ {
+       unsigned long *tos = stack_addr(regs);
+-      unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++      unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+       unsigned long orig_ip = (unsigned long)p->addr;
+       kprobe_opcode_t *insn = p->ainsn.insn;
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index 4425f59..34a112f 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -80,6 +80,7 @@ found:
+ /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
+ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
+ {
++      pax_open_kernel();
+ #ifdef CONFIG_X86_64
+       *addr++ = 0x48;
+       *addr++ = 0xbf;
+@@ -87,6 +88,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
+       *addr++ = 0xb8;
+ #endif
+       *(unsigned long *)addr = val;
++      pax_close_kernel();
+ }
+ asm (
+@@ -343,7 +345,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+        * Verify if the address gap is in 2GB range, because this uses
+        * a relative jump.
+        */
+-      rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
++      rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
+       if (abs(rel) > 0x7fffffff) {
+               __arch_remove_optimized_kprobe(op, 0);
+               return -ERANGE;
+@@ -360,16 +362,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+       op->optinsn.size = ret;
+       /* Copy arch-dep-instance from template */
+-      memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
++      pax_open_kernel();
++      memcpy(buf, (u8 *)ktla_ktva((unsigned long)&optprobe_template_entry), TMPL_END_IDX);
++      pax_close_kernel();
+       /* Set probe information */
+       synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+       /* Set probe function call */
+-      synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
++      synthesize_relcall((u8 *)ktva_ktla((unsigned long)buf) + TMPL_CALL_IDX, optimized_callback);
+       /* Set returning jmp instruction at the tail of out-of-line buffer */
+-      synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
++      synthesize_reljump((u8 *)ktva_ktla((unsigned long)buf) + TMPL_END_IDX + op->optinsn.size,
+                          (u8 *)op->kp.addr + op->optinsn.size);
+       flush_icache_range((unsigned long) buf,
+@@ -394,7 +398,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
+               WARN_ON(kprobe_disabled(&op->kp));
+               /* Backup instructions which will be replaced by jump address */
+-              memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
++              memcpy(op->optinsn.copied_insn, (u8 *)ktla_ktva((unsigned long)op->kp.addr) + INT3_SIZE,
+                      RELATIVE_ADDR_SIZE);
+               insn_buf[0] = RELATIVEJUMP_OPCODE;
+@@ -442,7 +446,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
+               /* This kprobe is really able to run optimized path. */
+               op = container_of(p, struct optimized_kprobe, kp);
+               /* Detour through copied instructions */
+-              regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
++              regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
+               if (!reenter)
+                       reset_current_kprobe();
+               preempt_enable_no_resched();
+diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
+index c2bedae..25e7ab60 100644
+--- a/arch/x86/kernel/ksysfs.c
++++ b/arch/x86/kernel/ksysfs.c
+@@ -184,7 +184,7 @@ out:
+ static struct kobj_attribute type_attr = __ATTR_RO(type);
+-static struct bin_attribute data_attr = {
++static bin_attribute_no_const data_attr __read_only = {
+       .attr = {
+               .name = "data",
+               .mode = S_IRUGO,
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 1726c4c..feda8055 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -544,7 +544,7 @@ static uint32_t __init kvm_detect(void)
+       return kvm_cpuid_base();
+ }
+-const struct hypervisor_x86 x86_hyper_kvm __refconst = {
++const struct hypervisor_x86 x86_hyper_kvm = {
+       .name                   = "KVM",
+       .detect                 = kvm_detect,
+       .x2apic_available       = kvm_para_available,
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 3692249..d2966c7 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -29,7 +29,7 @@
+ #include <asm/x86_init.h>
+ #include <asm/reboot.h>
+-static int kvmclock = 1;
++static int kvmclock __read_only = 1;
+ static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
+ static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
+ static cycle_t kvm_sched_clock_offset;
+@@ -42,7 +42,7 @@ static int parse_no_kvmclock(char *arg)
+ early_param("no-kvmclock", parse_no_kvmclock);
+ /* The hypervisor will put information about time periodically here */
+-static struct pvclock_vsyscall_time_info *hv_clock;
++static struct pvclock_vsyscall_time_info hv_clock[NR_CPUS] __page_aligned_bss;
+ static struct pvclock_wall_clock wall_clock;
+ struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
+@@ -161,7 +161,7 @@ bool kvm_check_and_clear_guest_paused(void)
+       struct pvclock_vcpu_time_info *src;
+       int cpu = smp_processor_id();
+-      if (!hv_clock)
++      if (!kvmclock)
+               return ret;
+       src = &hv_clock[cpu].pvti;
+@@ -188,7 +188,7 @@ int kvm_register_clock(char *txt)
+       int low, high, ret;
+       struct pvclock_vcpu_time_info *src;
+-      if (!hv_clock)
++      if (!kvmclock)
+               return 0;
+       src = &hv_clock[cpu].pvti;
+@@ -248,7 +248,6 @@ static void kvm_shutdown(void)
+ void __init kvmclock_init(void)
+ {
+       struct pvclock_vcpu_time_info *vcpu_time;
+-      unsigned long mem;
+       int size, cpu;
+       u8 flags;
+@@ -266,15 +265,8 @@ void __init kvmclock_init(void)
+       printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+               msr_kvm_system_time, msr_kvm_wall_clock);
+-      mem = memblock_alloc(size, PAGE_SIZE);
+-      if (!mem)
+-              return;
+-      hv_clock = __va(mem);
+-      memset(hv_clock, 0, size);
+-
+       if (kvm_register_clock("primary cpu clock")) {
+-              hv_clock = NULL;
+-              memblock_free(mem, size);
++              kvmclock = 0;
+               return;
+       }
+@@ -315,7 +307,7 @@ int __init kvm_setup_vsyscall_timeinfo(void)
+       struct pvclock_vcpu_time_info *vcpu_time;
+       unsigned int size;
+-      if (!hv_clock)
++      if (!kvmclock)
+               return 0;
+       size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index 6707039..254f32c 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -11,6 +11,7 @@
+ #include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/mm.h>
++#include <linux/ratelimit.h>
+ #include <linux/smp.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+@@ -21,6 +22,14 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
++#ifdef CONFIG_GRKERNSEC
++int sysctl_modify_ldt __read_only = 0;
++#elif defined(CONFIG_DEFAULT_MODIFY_LDT_SYSCALL)
++int sysctl_modify_ldt __read_only = 1;
++#else
++int sysctl_modify_ldt __read_only = 0;
++#endif
++
+ /* context.lock is held for us, so we don't need any locking. */
+ static void flush_ldt(void *current_mm)
+ {
+@@ -109,6 +118,23 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
+       struct mm_struct *old_mm;
+       int retval = 0;
++      if (tsk == current) {
++              mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++              mm->context.user_cs_base = 0UL;
++              mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++              cpumask_clear(&mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++      }
++
+       mutex_init(&mm->context.lock);
+       old_mm = current->mm;
+       if (!old_mm) {
+@@ -235,6 +261,14 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+               /* The user wants to clear the entry. */
+               memset(&ldt, 0, sizeof(ldt));
+       } else {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++                      error = -EINVAL;
++                      goto out;
++              }
++#endif
++
+               if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+                       error = -EINVAL;
+                       goto out;
+@@ -276,6 +310,15 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
+ {
+       int ret = -ENOSYS;
++      if (!sysctl_modify_ldt) {
++              printk_ratelimited(KERN_INFO
++                      "Denied a call to modify_ldt() from %s[%d] (uid: %d)."
++                      " Adjust sysctl if this was not an exploit attempt.\n",
++                      current->comm, task_pid_nr(current),
++                      from_kuid_munged(current_user_ns(), current_uid()));
++              return ret;
++      }
++
+       switch (func) {
+       case 0:
+               ret = read_ldt(ptr, bytecount);
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index 469b23d..5449cfe 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -26,7 +26,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/debugreg.h>
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+       struct desc_ptr curidt;
+@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
+ }
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+       struct desc_ptr curgdt;
+@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
+       }
+       control_page = page_address(image->control_code_page);
+-      memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++      memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+       relocate_kernel_ptr = control_page;
+       page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
+index 61924222..0e4856e 100644
+--- a/arch/x86/kernel/mcount_64.S
++++ b/arch/x86/kernel/mcount_64.S
+@@ -7,7 +7,7 @@
+ #include <linux/linkage.h>
+ #include <asm/ptrace.h>
+ #include <asm/ftrace.h>
+-
++#include <asm/alternative-asm.h>
+       .code64
+       .section .entry.text, "ax"
+@@ -148,8 +148,9 @@
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ ENTRY(function_hook)
++      pax_force_retaddr
+       retq
+-END(function_hook)
++ENDPROC(function_hook)
+ ENTRY(ftrace_caller)
+       /* save_mcount_regs fills in first two parameters */
+@@ -183,9 +184,10 @@ GLOBAL(ftrace_graph_call)
+ #endif
+ /* This is weak to keep gas from relaxing the jumps */
+-WEAK(ftrace_stub)
++RAP_WEAK(ftrace_stub)
++      pax_force_retaddr
+       retq
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+ ENTRY(ftrace_regs_caller)
+       /* Save the current flags before any operations that can change them */
+@@ -256,7 +258,7 @@ GLOBAL(ftrace_regs_caller_end)
+       jmp ftrace_epilogue
+-END(ftrace_regs_caller)
++ENDPROC(ftrace_regs_caller)
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+@@ -275,6 +277,7 @@ fgraph_trace:
+ #endif
+ GLOBAL(ftrace_stub)
++      pax_force_retaddr
+       retq
+ trace:
+@@ -287,12 +290,13 @@ trace:
+        * ip and parent ip are used and the list function is called when
+        * function tracing is enabled.
+        */
++      pax_force_fptr ftrace_trace_function
+       call   *ftrace_trace_function
+       restore_mcount_regs
+       jmp fgraph_trace
+-END(function_hook)
++ENDPROC(function_hook)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+@@ -314,8 +318,9 @@ ENTRY(ftrace_graph_caller)
+       restore_mcount_regs
++      pax_force_retaddr
+       retq
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+ GLOBAL(return_to_handler)
+       subq  $24, %rsp
+@@ -331,5 +336,7 @@ GLOBAL(return_to_handler)
+       movq 8(%rsp), %rdx
+       movq (%rsp), %rax
+       addq $24, %rsp
++      pax_force_fptr %rdi
+       jmp *%rdi
++ENDPROC(return_to_handler)
+ #endif
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 477ae80..a280c67 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -76,17 +76,17 @@ static unsigned long int get_module_load_offset(void)
+ }
+ #endif
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+       void *p;
+-      if (PAGE_ALIGN(size) > MODULES_LEN)
++      if (!size || PAGE_ALIGN(size) > MODULES_LEN)
+               return NULL;
+       p = __vmalloc_node_range(size, MODULE_ALIGN,
+                                   MODULES_VADDR + get_module_load_offset(),
+-                                  MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
+-                                  PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++                                  MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
++                                  prot, 0, NUMA_NO_NODE,
+                                   __builtin_return_address(0));
+       if (p && (kasan_module_alloc(p, size) < 0)) {
+               vfree(p);
+@@ -96,6 +96,51 @@ void *module_alloc(unsigned long size)
+       return p;
+ }
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++      return __module_alloc(size, PAGE_KERNEL);
++#else
++      return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++void *module_alloc_exec(unsigned long size)
++{
++      struct vm_struct *area;
++
++      if (size == 0)
++              return NULL;
++
++      area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
++      return area ? area->addr : NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++
++void module_memfree_exec(void *module_region)
++{
++      vunmap(module_region);
++}
++EXPORT_SYMBOL(module_memfree_exec);
++#else
++void module_memfree_exec(void *module_region)
++{
++      module_memfree(module_region);
++}
++EXPORT_SYMBOL(module_memfree_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++      return __module_alloc(size, PAGE_KERNEL_RX);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
++#endif
++
+ #ifdef CONFIG_X86_32
+ int apply_relocate(Elf32_Shdr *sechdrs,
+                  const char *strtab,
+@@ -106,14 +151,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+       unsigned int i;
+       Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+       Elf32_Sym *sym;
+-      uint32_t *location;
++      uint32_t *plocation, location;
+       DEBUGP("Applying relocate section %u to %u\n",
+              relsec, sechdrs[relsec].sh_info);
+       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+               /* This is where to make the change */
+-              location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+-                      + rel[i].r_offset;
++              plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++              location = (uint32_t)plocation;
++              if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++                      plocation = (uint32_t *)ktla_ktva((unsigned long)plocation);
+               /* This is the symbol it is referring to.  Note that all
+                  undefined symbols have been resolved.  */
+               sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -122,11 +169,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+               switch (ELF32_R_TYPE(rel[i].r_info)) {
+               case R_386_32:
+                       /* We add the value into the location given */
+-                      *location += sym->st_value;
++                      pax_open_kernel();
++                      *plocation += sym->st_value;
++                      pax_close_kernel();
+                       break;
+               case R_386_PC32:
+                       /* Add the value, subtract its position */
+-                      *location += sym->st_value - (uint32_t)location;
++                      pax_open_kernel();
++                      *plocation += sym->st_value - location;
++                      pax_close_kernel();
+                       break;
+               default:
+                       pr_err("%s: Unknown relocation: %u\n",
+@@ -171,21 +222,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+               case R_X86_64_NONE:
+                       break;
+               case R_X86_64_64:
++                      pax_open_kernel();
+                       *(u64 *)loc = val;
++                      pax_close_kernel();
+                       break;
+               case R_X86_64_32:
++                      pax_open_kernel();
+                       *(u32 *)loc = val;
++                      pax_close_kernel();
+                       if (val != *(u32 *)loc)
+                               goto overflow;
+                       break;
+               case R_X86_64_32S:
++                      pax_open_kernel();
+                       *(s32 *)loc = val;
++                      pax_close_kernel();
+                       if ((s64)val != *(s32 *)loc)
+                               goto overflow;
+                       break;
+               case R_X86_64_PC32:
+                       val -= (u64)loc;
++                      pax_open_kernel();
+                       *(u32 *)loc = val;
++                      pax_close_kernel();
++
+ #if 0
+                       if ((s64)val != *(s32 *)loc)
+                               goto overflow;
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index 7f3550a..e535783 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -39,6 +39,7 @@
+ #include <linux/notifier.h>
+ #include <linux/uaccess.h>
+ #include <linux/gfp.h>
++#include <linux/grsecurity.h>
+ #include <asm/cpufeature.h>
+ #include <asm/msr.h>
+@@ -83,6 +84,13 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
+       int err = 0;
+       ssize_t bytes = 0;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      if (reg != MSR_IA32_ENERGY_PERF_BIAS) {
++              gr_handle_msr_write();
++              return -EPERM;
++      }
++#endif
++
+       if (count % 8)
+               return -EINVAL; /* Invalid chunk size */
+@@ -130,6 +138,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
+                       err = -EBADF;
+                       break;
+               }
++#ifdef CONFIG_GRKERNSEC_KMEM
++              gr_handle_msr_write();
++              return -EPERM;
++#endif
+               if (copy_from_user(&regs, uregs, sizeof regs)) {
+                       err = -EFAULT;
+                       break;
+@@ -213,7 +225,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
+       return notifier_from_errno(err);
+ }
+-static struct notifier_block __refdata msr_class_cpu_notifier = {
++static struct notifier_block msr_class_cpu_notifier = {
+       .notifier_call = msr_class_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index bfe4d6c..1c3f03c 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -101,16 +101,16 @@ fs_initcall(nmi_warning_debugfs);
+ static void nmi_max_handler(struct irq_work *w)
+ {
+-      struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
++      struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
+       int remainder_ns, decimal_msecs;
+-      u64 whole_msecs = ACCESS_ONCE(a->max_duration);
++      u64 whole_msecs = ACCESS_ONCE(n->max_duration);
+       remainder_ns = do_div(whole_msecs, (1000 * 1000));
+       decimal_msecs = remainder_ns / 1000;
+       printk_ratelimited(KERN_INFO
+               "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
+-              a->handler, whole_msecs, decimal_msecs);
++              n->action->handler, whole_msecs, decimal_msecs);
+ }
+ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+@@ -137,11 +137,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+               delta = sched_clock() - delta;
+               trace_nmi_handler(a->handler, (int)delta, thishandled);
+-              if (delta < nmi_longest_ns || delta < a->max_duration)
++              if (delta < nmi_longest_ns || delta < a->work->max_duration)
+                       continue;
+-              a->max_duration = delta;
+-              irq_work_queue(&a->irq_work);
++              a->work->max_duration = delta;
++              irq_work_queue(&a->work->irq_work);
+       }
+       rcu_read_unlock();
+@@ -151,7 +151,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(nmi_handle);
+-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
++int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
+ {
+       struct nmi_desc *desc = nmi_to_desc(type);
+       unsigned long flags;
+@@ -159,7 +159,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
+       if (!action->handler)
+               return -EINVAL;
+-      init_irq_work(&action->irq_work, nmi_max_handler);
++      action->work->action = action;
++      init_irq_work(&action->work->irq_work, nmi_max_handler);
+       spin_lock_irqsave(&desc->lock, flags);
+@@ -177,9 +178,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
+        * event confuses some handlers (kdump uses this flag)
+        */
+       if (action->flags & NMI_FLAG_FIRST)
+-              list_add_rcu(&action->list, &desc->head);
++              pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
+       else
+-              list_add_tail_rcu(&action->list, &desc->head);
++              pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
+       
+       spin_unlock_irqrestore(&desc->lock, flags);
+       return 0;
+@@ -202,7 +203,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
+               if (!strcmp(n->name, name)) {
+                       WARN(in_nmi(),
+                               "Trying to free NMI (%s) from NMI context!\n", n->name);
+-                      list_del_rcu(&n->list);
++                      pax_list_del_rcu((struct list_head *)&n->list);
+                       break;
+               }
+       }
+@@ -503,6 +504,17 @@ static DEFINE_PER_CPU(int, update_debug_stack);
+ dotraplinkage notrace void
+ do_nmi(struct pt_regs *regs, long error_code)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (!user_mode(regs)) {
++              unsigned long cs = regs->cs & 0xFFFF;
++              unsigned long ip = ktva_ktla(regs->ip);
++
++              if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
++                      regs->ip = ip;
++      }
++#endif
++
+       if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
+               this_cpu_write(nmi_state, NMI_LATCHED);
+               return;
+diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
+index 6d9582e..f746287 100644
+--- a/arch/x86/kernel/nmi_selftest.c
++++ b/arch/x86/kernel/nmi_selftest.c
+@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
+ {
+       /* trap all the unknown NMIs we may generate */
+       register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
+-                      __initdata);
++                      __initconst);
+ }
+ static void __init cleanup_nmi_testsuite(void)
+@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
+       unsigned long timeout;
+       if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
+-                               NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
++                               NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
+               nmi_fail = FAILURE;
+               return;
+       }
+diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
+index 1939a02..7e81a8f 100644
+--- a/arch/x86/kernel/paravirt-spinlocks.c
++++ b/arch/x86/kernel/paravirt-spinlocks.c
+@@ -23,16 +23,32 @@ bool pv_is_native_spin_unlock(void)
+ }
+ #endif
+-struct pv_lock_ops pv_lock_ops = {
++#ifdef CONFIG_SMP
++#ifdef CONFIG_QUEUED_SPINLOCKS
++static void native_wait(u8 *ptr, u8 val)
++{
++}
++
++static void native_kick(int cpu)
++{
++}
++#else /* !CONFIG_QUEUED_SPINLOCKS */
++static void native_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
++{
++}
++#endif /* !CONFIG_QUEUED_SPINLOCKS */
++#endif /* SMP */
++
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ #ifdef CONFIG_QUEUED_SPINLOCKS
+       .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
+       .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
+-      .wait = paravirt_nop,
+-      .kick = paravirt_nop,
++      .wait = native_wait,
++      .kick = native_kick,
+ #else /* !CONFIG_QUEUED_SPINLOCKS */
+       .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
+-      .unlock_kick = paravirt_nop,
++      .unlock_kick = native_unlock_kick,
+ #endif /* !CONFIG_QUEUED_SPINLOCKS */
+ #endif /* SMP */
+ };
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 1acfd76..8a3a86d 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -65,6 +65,9 @@ u64 notrace _paravirt_ident_64(u64 x)
+ {
+       return x;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
++#endif
+ void __init default_banner(void)
+ {
+@@ -140,15 +143,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+       if (opfunc == NULL)
+               /* If there's no function, patch it with a ud2a (BUG) */
+-              ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+-      else if (opfunc == _paravirt_nop)
++              ret = paravirt_patch_insns(insnbuf, len, (const char *)ktva_ktla((unsigned long)ud2a), ud2a+sizeof(ud2a));
++      else if (opfunc == (void *)_paravirt_nop)
+               ret = 0;
+       /* identity functions just return their single argument */
+-      else if (opfunc == _paravirt_ident_32)
++      else if (opfunc == (void *)_paravirt_ident_32)
+               ret = paravirt_patch_ident_32(insnbuf, len);
+-      else if (opfunc == _paravirt_ident_64)
++      else if (opfunc == (void *)_paravirt_ident_64)
+               ret = paravirt_patch_ident_64(insnbuf, len);
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++      else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
++              ret = paravirt_patch_ident_64(insnbuf, len);
++#endif
+       else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+                type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
+@@ -171,7 +178,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+       if (insn_len > len || start == NULL)
+               insn_len = len;
+       else
+-              memcpy(insnbuf, start, insn_len);
++              memcpy(insnbuf, (const char *)ktla_ktva((unsigned long)start), insn_len);
+       return insn_len;
+ }
+@@ -293,7 +300,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+       return this_cpu_read(paravirt_lazy_mode);
+ }
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+       .name = "bare hardware",
+       .kernel_rpl = 0,
+       .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
+@@ -303,16 +310,16 @@ struct pv_info pv_info = {
+ #endif
+ };
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+       .patch = native_patch,
+ };
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+       .sched_clock = native_sched_clock,
+       .steal_clock = native_steal_clock,
+ };
+-__visible struct pv_irq_ops pv_irq_ops = {
++__visible struct pv_irq_ops pv_irq_ops __read_only = {
+       .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+       .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+       .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+@@ -324,7 +331,23 @@ __visible struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+-__visible struct pv_cpu_ops pv_cpu_ops = {
++static void native_alloc_ldt(struct desc_struct *ldt, unsigned entries)
++{
++}
++
++static void native_free_ldt(struct desc_struct *ldt, unsigned entries)
++{
++}
++
++static void native_start_context_switch(struct task_struct *prev)
++{
++}
++
++static void native_end_context_switch(struct task_struct *next)
++{
++}
++
++__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
+       .cpuid = native_cpuid,
+       .get_debugreg = native_get_debugreg,
+       .set_debugreg = native_set_debugreg,
+@@ -358,8 +381,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
+       .write_gdt_entry = native_write_gdt_entry,
+       .write_idt_entry = native_write_idt_entry,
+-      .alloc_ldt = paravirt_nop,
+-      .free_ldt = paravirt_nop,
++      .alloc_ldt = native_alloc_ldt,
++      .free_ldt = native_free_ldt,
+       .load_sp0 = native_load_sp0,
+@@ -372,8 +395,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
+       .set_iopl_mask = native_set_iopl_mask,
+       .io_delay = native_io_delay,
+-      .start_context_switch = paravirt_nop,
+-      .end_context_switch = paravirt_nop,
++      .start_context_switch = native_start_context_switch,
++      .end_context_switch = native_end_context_switch,
+ };
+ /* At this point, native_get/set_debugreg has real function entries */
+@@ -381,15 +404,64 @@ NOKPROBE_SYMBOL(native_get_debugreg);
+ NOKPROBE_SYMBOL(native_set_debugreg);
+ NOKPROBE_SYMBOL(native_load_idt);
+-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
++/* 64-bit pagetable entries */
++#define PTE_IDENT     PV_CALLEE_SAVE(_paravirt_ident_64)
++#else
+ /* 32-bit pagetable entries */
+ #define PTE_IDENT     __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
++#endif
+ #else
+ /* 64-bit pagetable entries */
+ #define PTE_IDENT     __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+-struct pv_mmu_ops pv_mmu_ops = {
++static void native_pgd_free(struct mm_struct *mm, pgd_t *pgd)
++{
++}
++
++static void native_alloc_pte(struct mm_struct *mm, unsigned long pfn)
++{
++}
++
++static void native_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
++{
++}
++
++static void native_alloc_pud(struct mm_struct *mm, unsigned long pfn)
++{
++}
++
++static void native_release_pte(unsigned long pfn)
++{
++}
++
++static void native_release_pmd(unsigned long pfn)
++{
++}
++
++static void native_release_pud(unsigned long pfn)
++{
++}
++
++static void native_pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++}
++
++static void native_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
++{
++}
++
++static void native_exit_mmap(struct mm_struct *mm)
++{
++}
++
++static void native_activate_mm(struct mm_struct *prev, struct mm_struct *next)
++{
++}
++
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+       .read_cr2 = native_read_cr2,
+       .write_cr2 = native_write_cr2,
+@@ -402,20 +474,20 @@ struct pv_mmu_ops pv_mmu_ops = {
+       .flush_tlb_others = native_flush_tlb_others,
+       .pgd_alloc = __paravirt_pgd_alloc,
+-      .pgd_free = paravirt_nop,
++      .pgd_free = native_pgd_free,
+-      .alloc_pte = paravirt_nop,
+-      .alloc_pmd = paravirt_nop,
+-      .alloc_pud = paravirt_nop,
+-      .release_pte = paravirt_nop,
+-      .release_pmd = paravirt_nop,
+-      .release_pud = paravirt_nop,
++      .alloc_pte = native_alloc_pte,
++      .alloc_pmd = native_alloc_pmd,
++      .alloc_pud = native_alloc_pud,
++      .release_pte = native_release_pte,
++      .release_pmd = native_release_pmd,
++      .release_pud = native_release_pud,
+       .set_pte = native_set_pte,
+       .set_pte_at = native_set_pte_at,
+       .set_pmd = native_set_pmd,
+       .set_pmd_at = native_set_pmd_at,
+-      .pte_update = paravirt_nop,
++      .pte_update = native_pte_update,
+       .ptep_modify_prot_start = __ptep_modify_prot_start,
+       .ptep_modify_prot_commit = __ptep_modify_prot_commit,
+@@ -436,6 +508,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+       .make_pud = PTE_IDENT,
+       .set_pgd = native_set_pgd,
++      .set_pgd_batched = native_set_pgd_batched,
+ #endif
+ #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
+@@ -445,9 +518,9 @@ struct pv_mmu_ops pv_mmu_ops = {
+       .make_pte = PTE_IDENT,
+       .make_pgd = PTE_IDENT,
+-      .dup_mmap = paravirt_nop,
+-      .exit_mmap = paravirt_nop,
+-      .activate_mm = paravirt_nop,
++      .dup_mmap = native_dup_mmap,
++      .exit_mmap = native_exit_mmap,
++      .activate_mm = native_activate_mm,
+       .lazy_mode = {
+               .enter = paravirt_nop,
+@@ -456,6 +529,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+       },
+       .set_fixmap = native_set_fixmap,
++
++#ifdef CONFIG_PAX_KERNEXEC
++      .pax_open_kernel = native_pax_open_kernel,
++      .pax_close_kernel = native_pax_close_kernel,
++#endif
++
+ };
+ EXPORT_SYMBOL_GPL(pv_time_ops);
+diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
+index e70087a..b083377 100644
+--- a/arch/x86/kernel/paravirt_patch_64.c
++++ b/arch/x86/kernel/paravirt_patch_64.c
+@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
+ DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
+ DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
+ DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
++
++#ifndef CONFIG_PAX_MEMORY_UDEREF
+ DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
++#endif
++
+ DEF_NATIVE(pv_cpu_ops, clts, "clts");
+ DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
+@@ -59,7 +63,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+               PATCH_SITE(pv_mmu_ops, read_cr3);
+               PATCH_SITE(pv_mmu_ops, write_cr3);
+               PATCH_SITE(pv_cpu_ops, clts);
++
++#ifndef CONFIG_PAX_MEMORY_UDEREF
+               PATCH_SITE(pv_mmu_ops, flush_tlb_single);
++#endif
++
+               PATCH_SITE(pv_cpu_ops, wbinvd);
+ #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+               case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
+diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
+index 5d400ba..eaad6f6 100644
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
+                       tce_space = be64_to_cpu(readq(target));
+                       tce_space = tce_space & TAR_SW_BITS;
+-                      tce_space = tce_space & (~specified_table_size);
++                      tce_space = tce_space & (~(unsigned long)specified_table_size);
+                       info->tce_space = (u64 *)__va(tce_space);
+               }
+       }
+diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
+index f712dfd..0172a75 100644
+--- a/arch/x86/kernel/pci-iommu_table.c
++++ b/arch/x86/kernel/pci-iommu_table.c
+@@ -2,7 +2,7 @@
+ #include <asm/iommu_table.h>
+ #include <linux/string.h>
+ #include <linux/kallsyms.h>
+-
++#include <linux/sched.h>
+ #define DEBUG 1
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 62c0b0e..43bd8da 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -16,6 +16,7 @@
+ #include <linux/dmi.h>
+ #include <linux/utsname.h>
+ #include <linux/stackprotector.h>
++#include <linux/kthread.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <trace/events/power.h>
+@@ -40,7 +41,8 @@
+  * section. Since TSS's are completely CPU-local, we want them
+  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+  */
+-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
++struct tss_struct cpu_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = {
++      [0 ... NR_CPUS-1] = {
+       .x86_tss = {
+               .sp0 = TOP_OF_INIT_STACK,
+ #ifdef CONFIG_X86_32
+@@ -61,6 +63,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+ #ifdef CONFIG_X86_32
+       .SYSENTER_stack_canary  = STACK_END_MAGIC,
+ #endif
++}
+ };
+ EXPORT_PER_CPU_SYMBOL(cpu_tss);
+@@ -81,13 +84,26 @@ void idle_notifier_unregister(struct notifier_block *n)
+ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+ #endif
++struct kmem_cache *fpregs_state_cachep;
++EXPORT_SYMBOL(fpregs_state_cachep);
++
++void __init arch_task_cache_init(void)
++{
++      /* create a slab on which task_structs can be allocated */
++      fpregs_state_cachep =
++              kmem_cache_create_usercopy("fpregs_state", fpu_kernel_xstate_size,
++                      ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, 0, fpu_kernel_xstate_size, NULL);
++}
++
+ /*
+  * this gets called so that we can store lazy state into memory and copy the
+  * current task into the new thread.
+  */
+ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+ {
+-      memcpy(dst, src, arch_task_struct_size);
++      *dst = *src;
++      dst->thread.fpu.state = kmem_cache_alloc_node(fpregs_state_cachep, GFP_KERNEL, tsk_fork_get_node(src));
++      memcpy(dst->thread.fpu.state, src->thread.fpu.state, fpu_kernel_xstate_size);
+ #ifdef CONFIG_VM86
+       dst->thread.vm86 = NULL;
+ #endif
+@@ -95,6 +111,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+       return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
+ }
++void arch_release_task_struct(struct task_struct *tsk)
++{
++      kmem_cache_free(fpregs_state_cachep, tsk->thread.fpu.state);
++      tsk->thread.fpu.state = NULL;
++}
++
+ /*
+  * Free current thread data structures etc..
+  */
+@@ -105,7 +127,7 @@ void exit_thread(struct task_struct *tsk)
+       struct fpu *fpu = &t->fpu;
+       if (bp) {
+-              struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
++              struct tss_struct *tss = cpu_tss + get_cpu();
+               t->io_bitmap_ptr = NULL;
+               clear_thread_flag(TIF_IO_BITMAP);
+@@ -127,6 +149,9 @@ void flush_thread(void)
+ {
+       struct task_struct *tsk = current;
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
++      loadsegment(gs, 0);
++#endif
+       flush_ptrace_hw_breakpoint(tsk);
+       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+@@ -268,7 +293,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+       /* idle loop has pid 0 */
+-      if (current->pid)
++      if (task_pid_nr(current))
+               return;
+       __exit_idle();
+ }
+@@ -321,7 +346,7 @@ bool xen_set_default_idle(void)
+       return ret;
+ }
+ #endif
+-void stop_this_cpu(void *dummy)
++__noreturn void stop_this_cpu(void *dummy)
+ {
+       local_irq_disable();
+       /*
+@@ -499,13 +524,6 @@ static int __init idle_setup(char *str)
+ }
+ early_param("idle", idle_setup);
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+-      if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+-              sp -= get_random_int() % 8192;
+-      return sp & ~0xf;
+-}
+-
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+       unsigned long range_end = mm->brk + 0x02000000;
+@@ -537,9 +555,7 @@ unsigned long get_wchan(struct task_struct *p)
+        * PADDING
+        * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+        * stack
+-       * ----------- bottom = start + sizeof(thread_info)
+-       * thread_info
+-       * ----------- start
++       * ----------- bottom = start
+        *
+        * The tasks stack pointer points at the location where the
+        * framepointer is stored. The data on the stack is:
+@@ -550,7 +566,7 @@ unsigned long get_wchan(struct task_struct *p)
+        */
+       top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+       top -= 2 * sizeof(unsigned long);
+-      bottom = start + sizeof(struct thread_info);
++      bottom = start;
+       sp = READ_ONCE(p->thread.sp);
+       if (sp < bottom || sp > top)
+@@ -567,3 +583,35 @@ unsigned long get_wchan(struct task_struct *p)
+       } while (count++ < 16 && p->state != TASK_RUNNING);
+       return 0;
+ }
++
++#ifdef CONFIG_PAX_RANDKSTACK
++void pax_randomize_kstack(struct pt_regs *regs)
++{
++      struct thread_struct *thread = &current->thread;
++      unsigned long time;
++
++      if (!randomize_va_space)
++              return;
++
++      if (v8086_mode(regs))
++              return;
++
++      time = rdtsc();
++
++      /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++      time &= 0x3EUL;
++      time <<= 2;
++#elif defined(CONFIG_X86_64)
++      time &= 0xFUL;
++      time <<= 4;
++#else
++      time &= 0x1FUL;
++      time <<= 3;
++#endif
++
++      thread->sp0 ^= time;
++      load_sp0(cpu_tss + smp_processor_id(), thread);
++      this_cpu_write(cpu_current_top_of_stack, thread->sp0);
++}
++#endif
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index d86be29..eb6012e 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+       return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+ void __show_regs(struct pt_regs *regs, int all)
+@@ -76,16 +77,15 @@ void __show_regs(struct pt_regs *regs, int all)
+       if (user_mode(regs)) {
+               sp = regs->sp;
+               ss = regs->ss & 0xffff;
+-              gs = get_user_gs(regs);
+       } else {
+               sp = kernel_stack_pointer(regs);
+               savesegment(ss, ss);
+-              savesegment(gs, gs);
+       }
++      gs = get_user_gs(regs);
+       printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
+                       (u16)regs->cs, regs->ip, regs->flags,
+-                      smp_processor_id());
++                      raw_smp_processor_id());
+       print_symbol("EIP is at %s\n", regs->ip);
+       printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
+ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+       unsigned long arg, struct task_struct *p, unsigned long tls)
+ {
+-      struct pt_regs *childregs = task_pt_regs(p);
++      struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+       struct task_struct *tsk;
+       int err;
+       p->thread.sp = (unsigned long) childregs;
+       p->thread.sp0 = (unsigned long) (childregs+1);
++      p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
+       memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+       if (unlikely(p->flags & PF_KTHREAD)) {
+               /* kernel thread */
+               memset(childregs, 0, sizeof(struct pt_regs));
+               p->thread.ip = (unsigned long) ret_from_kernel_thread;
+-              task_user_gs(p) = __KERNEL_STACK_CANARY;
+-              childregs->ds = __USER_DS;
+-              childregs->es = __USER_DS;
++              savesegment(gs, childregs->gs);
++              childregs->ds = __KERNEL_DS;
++              childregs->es = __KERNEL_DS;
+               childregs->fs = __KERNEL_PERCPU;
+               childregs->bx = sp;     /* function */
+               childregs->bp = arg;
+@@ -246,7 +247,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       struct fpu *prev_fpu = &prev->fpu;
+       struct fpu *next_fpu = &next->fpu;
+       int cpu = smp_processor_id();
+-      struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
++      struct tss_struct *tss = cpu_tss + cpu;
+       fpu_switch_t fpu_switch;
+       /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+@@ -265,6 +266,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+        */
+       lazy_save_gs(prev->gs);
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(task_thread_info(next_p)->addr_limit);
++#endif
++
+       /*
+        * Load the per-thread Thread-Local Storage descriptor.
+        */
+@@ -300,9 +305,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+        * current_thread_info().
+        */
+       load_sp0(tss, next);
+-      this_cpu_write(cpu_current_top_of_stack,
+-                     (unsigned long)task_stack_page(next_p) +
+-                     THREAD_SIZE);
++      this_cpu_write(current_task, next_p);
++      this_cpu_write(current_tinfo, &next_p->tinfo);
++      this_cpu_write(cpu_current_top_of_stack, next->sp0);
+       /*
+        * Restore %gs if needed (which is common)
+@@ -312,7 +317,5 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       switch_fpu_finish(next_fpu, fpu_switch);
+-      this_cpu_write(current_task, next_p);
+-
+       return prev_p;
+ }
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index a21068e..3f3a2eb 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -144,9 +144,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+       struct pt_regs *childregs;
+       struct task_struct *me = current;
+-      p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
++      p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
+       childregs = task_pt_regs(p);
+       p->thread.sp = (unsigned long) childregs;
++      p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
+       set_tsk_thread_flag(p, TIF_FORK);
+       p->thread.io_bitmap_ptr = NULL;
+@@ -156,6 +157,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+       p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
+       savesegment(es, p->thread.es);
+       savesegment(ds, p->thread.ds);
++      savesegment(ss, p->thread.ss);
++      BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
+       memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+       if (unlikely(p->flags & PF_KTHREAD)) {
+@@ -263,7 +266,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       struct fpu *prev_fpu = &prev->fpu;
+       struct fpu *next_fpu = &next->fpu;
+       int cpu = smp_processor_id();
+-      struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
++      struct tss_struct *tss = cpu_tss + cpu;
+       unsigned prev_fsindex, prev_gsindex;
+       fpu_switch_t fpu_switch;
+@@ -314,6 +317,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+       if (unlikely(next->ds | prev->ds))
+               loadsegment(ds, next->ds);
++      savesegment(ss, prev->ss);
++      if (unlikely(next->ss != prev->ss))
++              loadsegment(ss, next->ss);
++
+       /*
+        * Switch FS and GS.
+        *
+@@ -423,10 +430,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+        * Switch the PDA and FPU contexts.
+        */
+       this_cpu_write(current_task, next_p);
++      this_cpu_write(current_tinfo, &next_p->tinfo);
+       /* Reload esp0 and ss1.  This changes current_thread_info(). */
+       load_sp0(tss, next);
++      this_cpu_write(cpu_current_top_of_stack, next->sp0);
++
+       /*
+        * Now maybe reload the debug registers and handle I/O bitmaps
+        */
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index a1606ea..3e7a408 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -169,7 +169,7 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
+       unsigned long sp = (unsigned long)&regs->sp;
+       u32 *prev_esp;
+-      if (context == (sp & ~(THREAD_SIZE - 1)))
++      if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
+               return sp;
+       prev_esp = (u32 *)(context);
+@@ -411,6 +411,20 @@ static int putreg(struct task_struct *child,
+               if (child->thread.gsbase != value)
+                       return do_arch_prctl(child, ARCH_SET_GS, value);
+               return 0;
++
++      case offsetof(struct user_regs_struct,ip):
++              /*
++               * Protect against any attempt to set ip to an
++               * impossible address.  There are dragons lurking if the
++               * address is noncanonical.  (This explicitly allows
++               * setting ip to TASK_SIZE_MAX, because user code can do
++               * that all by itself by running off the end of its
++               * address space.
++               */
++              if (value > TASK_SIZE_MAX)
++                      return -EIO;
++              break;
++
+ #endif
+       }
+@@ -533,7 +547,7 @@ static void ptrace_triggered(struct perf_event *bp,
+ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
+ {
+       int i;
+-      int dr7 = 0;
++      unsigned long dr7 = 0;
+       struct arch_hw_breakpoint *info;
+       for (i = 0; i < HBP_NUM; i++) {
+@@ -767,7 +781,7 @@ long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
+ {
+       int ret;
+-      unsigned long __user *datap = (unsigned long __user *)data;
++      unsigned long __user *datap = (__force unsigned long __user *)data;
+       switch (request) {
+       /* read the word at location addr in the USER area. */
+@@ -852,14 +866,14 @@ long arch_ptrace(struct task_struct *child, long request,
+               if ((int) addr < 0)
+                       return -EIO;
+               ret = do_get_thread_area(child, addr,
+-                                      (struct user_desc __user *)data);
++                                      (__force struct user_desc __user *) data);
+               break;
+       case PTRACE_SET_THREAD_AREA:
+               if ((int) addr < 0)
+                       return -EIO;
+               ret = do_set_thread_area(child, addr,
+-                                      (struct user_desc __user *)data, 0);
++                                      (__force struct user_desc __user *) data, 0);
+               break;
+ #endif
+@@ -1250,7 +1264,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ #ifdef CONFIG_X86_64
+-static struct user_regset x86_64_regsets[] __read_mostly = {
++static user_regset_no_const x86_64_regsets[] __read_only = {
+       [REGSET_GENERAL] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = sizeof(struct user_regs_struct) / sizeof(long),
+@@ -1291,7 +1305,7 @@ static const struct user_regset_view user_x86_64_view = {
+ #endif        /* CONFIG_X86_64 */
+ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+-static struct user_regset x86_32_regsets[] __read_mostly = {
++static user_regset_no_const x86_32_regsets[] __read_only = {
+       [REGSET_GENERAL] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = sizeof(struct user_regs_struct32) / sizeof(u32),
+@@ -1344,7 +1358,7 @@ static const struct user_regset_view user_x86_32_view = {
+  */
+ u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
+-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
++void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
+ {
+ #ifdef CONFIG_X86_64
+       x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
+@@ -1379,7 +1393,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
+       memset(info, 0, sizeof(*info));
+       info->si_signo = SIGTRAP;
+       info->si_code = si_code;
+-      info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
++      info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
+ }
+ void user_single_step_siginfo(struct task_struct *tsk,
+diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
+index 3599404..ebc784f 100644
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
+       reset_hung_task_detector();
+ }
+-static atomic64_t last_value = ATOMIC64_INIT(0);
++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
+ void pvclock_resume(void)
+ {
+-      atomic64_set(&last_value, 0);
++      atomic64_set_unchecked(&last_value, 0);
+ }
+ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
+@@ -107,11 +107,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+        * updating at the same time, and one of them could be slightly behind,
+        * making the assumption that last_value always go forward fail to hold.
+        */
+-      last = atomic64_read(&last_value);
++      last = atomic64_read_unchecked(&last_value);
+       do {
+               if (ret < last)
+                       return last;
+-              last = atomic64_cmpxchg(&last_value, last, ret);
++              last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
+       } while (unlikely(last != ret));
+       return ret;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 63bf27d..a75d12b 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -83,6 +83,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
+ void __noreturn machine_real_restart(unsigned int type)
+ {
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++      struct desc_struct *gdt;
++#endif
++
+       local_irq_disable();
+       /*
+@@ -110,7 +115,29 @@ void __noreturn machine_real_restart(unsigned int type)
+       /* Jump to the identity-mapped low memory code */
+ #ifdef CONFIG_X86_32
+-      asm volatile("jmpl *%0" : :
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++      gdt = get_cpu_gdt_table(smp_processor_id());
++      pax_open_kernel();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++      gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++      loadsegment(ds, __KERNEL_DS);
++      loadsegment(es, __KERNEL_DS);
++      loadsegment(ss, __KERNEL_DS);
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++      gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
++      gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
++      gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
++      gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
++      gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
++      gdt[GDT_ENTRY_KERNEL_CS].g = 1;
++#endif
++      pax_close_kernel();
++#endif
++
++      asm volatile("ljmpl *%0" : :
+                    "rm" (real_mode_header->machine_real_restart_asm),
+                    "a" (type));
+ #else
+@@ -150,7 +177,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
+ /*
+  * This is a single dmi_table handling all reboot quirks.
+  */
+-static struct dmi_system_id __initdata reboot_dmi_table[] = {
++static const struct dmi_system_id __initconst reboot_dmi_table[] = {
+       /* Acer */
+       {       /* Handle reboot issue on Acer Aspire one */
+@@ -540,7 +567,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
+  * This means that this function can never return, it can misbehave
+  * by not rebooting properly and hanging.
+  */
+-static void native_machine_emergency_restart(void)
++static void __noreturn native_machine_emergency_restart(void)
+ {
+       int i;
+       int attempt = 0;
+@@ -669,13 +696,13 @@ void native_machine_shutdown(void)
+ #endif
+ }
+-static void __machine_emergency_restart(int emergency)
++static void __noreturn __machine_emergency_restart(int emergency)
+ {
+       reboot_emergency = emergency;
+       machine_ops.emergency_restart();
+ }
+-static void native_machine_restart(char *__unused)
++static void __noreturn native_machine_restart(char *__unused)
+ {
+       pr_notice("machine restart\n");
+@@ -684,7 +711,7 @@ static void native_machine_restart(char *__unused)
+       __machine_emergency_restart(0);
+ }
+-static void native_machine_halt(void)
++static void __noreturn native_machine_halt(void)
+ {
+       /* Stop other cpus and apics */
+       machine_shutdown();
+@@ -694,7 +721,7 @@ static void native_machine_halt(void)
+       stop_this_cpu(NULL);
+ }
+-static void native_machine_power_off(void)
++static void __noreturn native_machine_power_off(void)
+ {
+       if (pm_power_off) {
+               if (!reboot_force)
+@@ -703,9 +730,10 @@ static void native_machine_power_off(void)
+       }
+       /* A fallback in case there is no PM info available */
+       tboot_shutdown(TB_SHUTDOWN_HALT);
++      unreachable();
+ }
+-struct machine_ops machine_ops = {
++struct machine_ops machine_ops __read_only = {
+       .power_off = native_machine_power_off,
+       .shutdown = native_machine_shutdown,
+       .emergency_restart = native_machine_emergency_restart,
+diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
+index c8e41e9..64049ef 100644
+--- a/arch/x86/kernel/reboot_fixups_32.c
++++ b/arch/x86/kernel/reboot_fixups_32.c
+@@ -57,7 +57,7 @@ struct device_fixup {
+       unsigned int vendor;
+       unsigned int device;
+       void (*reboot_fixup)(struct pci_dev *);
+-};
++} __do_const;
+ /*
+  * PCI ids solely used for fixups_table go here
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index 98111b3..73ca125 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -96,8 +96,7 @@ relocate_kernel:
+       /* jump to identity mapped page */
+       addq    $(identity_mapped - relocate_kernel), %r8
+-      pushq   %r8
+-      ret
++      jmp     *%r8
+ identity_mapped:
+       /* set return address to 0 if not preserving context */
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 98c9cd6..c32f54c 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -114,6 +114,7 @@
+ #include <asm/microcode.h>
+ #include <asm/mmu_context.h>
+ #include <asm/kaslr.h>
++#include <asm/boot.h>
+ /*
+  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
+@@ -178,7 +179,7 @@ struct cpuinfo_x86 new_cpu_data = {
+       .wp_works_ok = -1,
+ };
+ /* common cpu data for all cpus */
+-struct cpuinfo_x86 boot_cpu_data __read_mostly = {
++struct cpuinfo_x86 boot_cpu_data __read_only = {
+       .wp_works_ok = -1,
+ };
+ EXPORT_SYMBOL(boot_cpu_data);
+@@ -202,17 +203,19 @@ struct ist_info ist_info;
+ #endif
+ #else
+-struct cpuinfo_x86 boot_cpu_data __read_mostly = {
++struct cpuinfo_x86 boot_cpu_data __read_only = {
+       .x86_phys_bits = MAX_PHYSMEM_BITS,
+ };
+ EXPORT_SYMBOL(boot_cpu_data);
+ #endif
+-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
+-__visible unsigned long mmu_cr4_features;
++#ifdef CONFIG_X86_64
++__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
++#elif defined(CONFIG_X86_PAE)
++__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
+ #else
+-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
++__visible unsigned long mmu_cr4_features __read_only;
+ #endif
+ /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
+@@ -761,7 +764,7 @@ static void __init trim_bios_range(void)
+        * area (640->1Mb) as ram even though it is not.
+        * take them out.
+        */
+-      e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++      e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
+       sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+@@ -769,7 +772,7 @@ static void __init trim_bios_range(void)
+ /* called before trim_bios_range() to spare extra sanitize */
+ static void __init e820_add_kernel_range(void)
+ {
+-      u64 start = __pa_symbol(_text);
++      u64 start = __pa_symbol(ktla_ktva((unsigned long)_text));
+       u64 size = __pa_symbol(_end) - start;
+       /*
+@@ -850,8 +853,8 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
+ void __init setup_arch(char **cmdline_p)
+ {
+-      memblock_reserve(__pa_symbol(_text),
+-                       (unsigned long)__bss_stop - (unsigned long)_text);
++      memblock_reserve(__pa_symbol(ktla_ktva((unsigned long)_text)),
++                       (unsigned long)__bss_stop - ktla_ktva((unsigned long)_text));
+       early_reserve_initrd();
+@@ -944,16 +947,16 @@ void __init setup_arch(char **cmdline_p)
+       if (!boot_params.hdr.root_flags)
+               root_mountflags &= ~MS_RDONLY;
+-      init_mm.start_code = (unsigned long) _text;
+-      init_mm.end_code = (unsigned long) _etext;
+-      init_mm.end_data = (unsigned long) _edata;
++      init_mm.start_code = ktla_ktva((unsigned long)_text);
++      init_mm.end_code = ktla_ktva((unsigned long)_etext);
++      init_mm.end_data = (unsigned long)_edata;
+       init_mm.brk = _brk_end;
+       mpx_mm_init(&init_mm);
+-      code_resource.start = __pa_symbol(_text);
+-      code_resource.end = __pa_symbol(_etext)-1;
+-      data_resource.start = __pa_symbol(_etext);
++      code_resource.start = __pa_symbol(ktla_ktva((unsigned long)_text));
++      code_resource.end = __pa_symbol(ktla_ktva((unsigned long)_etext))-1;
++      data_resource.start = __pa_symbol(_sdata);
+       data_resource.end = __pa_symbol(_edata)-1;
+       bss_resource.start = __pa_symbol(__bss_start);
+       bss_resource.end = __pa_symbol(__bss_stop)-1;
+diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
+index 7a40e06..f60ccfe 100644
+--- a/arch/x86/kernel/setup_percpu.c
++++ b/arch/x86/kernel/setup_percpu.c
+@@ -21,19 +21,17 @@
+ #include <asm/cpu.h>
+ #include <asm/stackprotector.h>
+-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
++#ifdef CONFIG_SMP
++DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+ DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+       [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
+ {
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+       pg_data_t *last = NULL;
+-      unsigned int cpu;
++      int cpu;
+       for_each_possible_cpu(cpu) {
+               int node = early_cpu_to_node(cpu);
+@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
+ {
+ #ifdef CONFIG_X86_32
+       struct desc_struct gdt;
++      unsigned long base = per_cpu_offset(cpu);
+-      pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+-                      0x2 | DESCTYPE_S, 0x8);
+-      gdt.s = 1;
++      pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
++                      0x83 | DESCTYPE_S, 0xC);
+       write_gdt_entry(get_cpu_gdt_table(cpu),
+                       GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+ #endif
+@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
+       /* alrighty, percpu areas up and running */
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++              unsigned long canary = per_cpu(stack_canary.canary, cpu);
++#endif
++#endif
+               per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+@@ -261,6 +264,12 @@ void __init setup_per_cpu_areas(void)
+                */
+               set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++              if (!cpu)
++                      per_cpu(stack_canary.canary, cpu) = canary;
++#endif
++#endif
+               /*
+                * Up to this point, the boot CPU has been using .init.data
+                * area.  Reload any changed state for the boot CPU.
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 04cb321..e74f021 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -226,7 +226,7 @@ static unsigned long align_sigframe(unsigned long sp)
+        * Align the stack pointer according to the i386 ABI,
+        * i.e. so that on function entry ((sp + 4) & 15) == 0.
+        */
+-      sp = ((sp + 4) & -16ul) - 4;
++      sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+       sp = round_down(sp, 16) - 8;
+ #endif
+@@ -334,10 +334,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+       }
+       if (current->mm->context.vdso)
+-              restorer = current->mm->context.vdso +
+-                      vdso_image_32.sym___kernel_sigreturn;
++              restorer = (void __force_user *)(current->mm->context.vdso + vdso_image_32.sym___kernel_sigreturn);
+       else
+-              restorer = &frame->retcode;
++              restorer = frame->retcode;
+       if (ksig->ka.sa.sa_flags & SA_RESTORER)
+               restorer = ksig->ka.sa.sa_restorer;
+@@ -351,7 +350,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+-      err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
++      err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
+       if (err)
+               return -EFAULT;
+@@ -398,8 +397,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+               save_altstack_ex(&frame->uc.uc_stack, regs->sp);
+               /* Set up to return from userspace.  */
+-              restorer = current->mm->context.vdso +
+-                      vdso_image_32.sym___kernel_rt_sigreturn;
++              if (current->mm->context.vdso)
++                      restorer = (void __force_user *)(current->mm->context.vdso + vdso_image_32.sym___kernel_rt_sigreturn);
++              else
++                      restorer = (void __user *)&frame->retcode;
+               if (ksig->ka.sa.sa_flags & SA_RESTORER)
+                       restorer = ksig->ka.sa.sa_restorer;
+               put_user_ex(restorer, &frame->pretcode);
+@@ -411,7 +412,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+                * reasons and because gdb uses it as a signature to notice
+                * signal handler stack frames.
+                */
+-              put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
++              put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
+       } put_user_catch(err);
+       
+       err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 658777c..6285f88 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -336,7 +336,7 @@ static int __init nonmi_ipi_setup(char *str)
+ __setup("nonmi_ipi", nonmi_ipi_setup);
+-struct smp_ops smp_ops = {
++struct smp_ops smp_ops __read_only = {
+       .smp_prepare_boot_cpu   = native_smp_prepare_boot_cpu,
+       .smp_prepare_cpus       = native_smp_prepare_cpus,
+       .smp_cpus_done          = native_smp_cpus_done,
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 9e152cd..60ef544 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -225,14 +225,17 @@ static void notrace start_secondary(void *unused)
+       enable_start_cpu0 = 0;
+-#ifdef CONFIG_X86_32
++      /* otherwise gcc will move up smp_processor_id before the cpu_init */
++      barrier();
++
+       /* switch away from the initial page table */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
++#else
+       load_cr3(swapper_pg_dir);
++#endif
+       __flush_tlb_all();
+-#endif
+-      /* otherwise gcc will move up smp_processor_id before the cpu_init */
+-      barrier();
+       /*
+        * Check TSC synchronization with the BP:
+        */
+@@ -935,16 +938,15 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
+       alternatives_enable_smp();
+       per_cpu(current_task, cpu) = idle;
++      per_cpu(current_tinfo, cpu) = &idle->tinfo;
+ #ifdef CONFIG_X86_32
+-      /* Stack for startup_32 can be just as for start_secondary onwards */
+       irq_ctx_init(cpu);
+-      per_cpu(cpu_current_top_of_stack, cpu) =
+-              (unsigned long)task_stack_page(idle) + THREAD_SIZE;
+ #else
+       clear_tsk_thread_flag(idle, TIF_FORK);
+       initial_gs = per_cpu_offset(cpu);
+ #endif
++      per_cpu(cpu_current_top_of_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
+ }
+ /*
+@@ -965,9 +967,11 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+       unsigned long timeout;
+       idle->thread.sp = (unsigned long) (((struct pt_regs *)
+-                        (THREAD_SIZE +  task_stack_page(idle))) - 1);
++                        (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
++      pax_open_kernel();
+       early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++      pax_close_kernel();
+       initial_code = (unsigned long)start_secondary;
+       stack_start  = idle->thread.sp;
+@@ -1115,6 +1119,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+       common_cpu_up(cpu, tidle);
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++      clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++#endif
++
+       /*
+        * We have to walk the irq descriptors to setup the vector
+        * space for the cpu which comes online.  Prevent irq
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index c9a0738..f0ab628 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -45,7 +45,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+                       addr += base;
+               }
+               mutex_unlock(&child->mm->context.lock);
+-      }
++      } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
++              addr = ktla_ktva(addr);
+ #endif
+       return addr;
+@@ -57,6 +58,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+       unsigned char opcode[15];
+       unsigned long addr = convert_ip_to_linear(child, regs);
++      if (addr == -EINVAL)
++              return 0;
++
+       copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+       for (i = 0; i < copied; i++) {
+               switch (opcode[i]) {
+diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
+new file mode 100644
+index 0000000..920e413
+--- /dev/null
++++ b/arch/x86/kernel/sys_i386_32.c
+@@ -0,0 +1,189 @@
++/*
++ * This file contains various random system calls that
++ * have a non-standard calling sequence on the Linux/i386
++ * platform.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/smp.h>
++#include <linux/sem.h>
++#include <linux/msg.h>
++#include <linux/shm.h>
++#include <linux/stat.h>
++#include <linux/syscalls.h>
++#include <linux/mman.h>
++#include <linux/file.h>
++#include <linux/utsname.h>
++#include <linux/ipc.h>
++#include <linux/elf.h>
++
++#include <linux/uaccess.h>
++#include <linux/unistd.h>
++
++#include <asm/syscalls.h>
++
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++      unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      if (flags & MAP_FIXED)
++              if (len > pax_task_size || addr > pax_task_size - len)
++                      return -EINVAL;
++
++      return 0;
++}
++
++/*
++ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
++ */
++static unsigned long get_align_mask(void)
++{
++      if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
++              return 0;
++
++      if (!(current->flags & PF_RANDOMIZE))
++              return 0;
++
++      return va_align.mask;
++}
++
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++              unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++      struct mm_struct *mm = current->mm;
++      struct vm_area_struct *vma;
++      unsigned long pax_task_size = TASK_SIZE;
++      struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (len > pax_task_size)
++              return -ENOMEM;
++
++      if (flags & MAP_FIXED)
++              return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++      if (addr) {
++              addr = PAGE_ALIGN(addr);
++              if (pax_task_size - len >= addr) {
++                      vma = find_vma(mm, addr);
++                      if (check_heap_stack_gap(vma, addr, len, offset))
++                              return addr;
++              }
++      }
++
++      info.flags = 0;
++      info.length = len;
++      info.align_mask = filp ? get_align_mask() : 0;
++      info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
++              info.low_limit = 0x00110000UL;
++              info.high_limit = mm->start_code;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++              if (info.low_limit < info.high_limit) {
++                      addr = vm_unmapped_area(&info);
++                      if (!IS_ERR_VALUE(addr))
++                              return addr;
++              }
++      } else
++#endif
++
++      info.low_limit = mm->mmap_base;
++      info.high_limit = pax_task_size;
++
++      return vm_unmapped_area(&info);
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++                        unsigned long len, unsigned long pgoff,
++                        unsigned long flags)
++{
++      struct vm_area_struct *vma;
++      struct mm_struct *mm = current->mm;
++      unsigned long addr = addr0, pax_task_size = TASK_SIZE;
++      struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      /* requested length too big for entire address space */
++      if (len > pax_task_size)
++              return -ENOMEM;
++
++      if (flags & MAP_FIXED)
++              return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++              goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++      /* requesting a specific address */
++      if (addr) {
++              addr = PAGE_ALIGN(addr);
++              if (pax_task_size - len >= addr) {
++                      vma = find_vma(mm, addr);
++                      if (check_heap_stack_gap(vma, addr, len, offset))
++                              return addr;
++              }
++      }
++
++      info.flags = VM_UNMAPPED_AREA_TOPDOWN;
++      info.length = len;
++      info.low_limit = PAGE_SIZE;
++      info.high_limit = mm->mmap_base;
++      info.align_mask = filp ? get_align_mask() : 0;
++      info.align_offset = pgoff << PAGE_SHIFT;
++      info.threadstack_offset = offset;
++
++      addr = vm_unmapped_area(&info);
++      if (!(addr & ~PAGE_MASK))
++              return addr;
++      VM_BUG_ON(addr != -ENOMEM);
++
++bottomup:
++      /*
++       * A failed mmap() very likely causes application failure,
++       * so fall back to the bottom-up function here. This scenario
++       * can happen with large stack limits and large mmap()
++       * allocations.
++       */
++      return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++}
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index 10e0272..a73232f 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -97,8 +97,8 @@ out:
+       return error;
+ }
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+-                         unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++                         unsigned long *begin, unsigned long *end)
+ {
+       if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
+               unsigned long new_begin;
+@@ -117,7 +117,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+                               *begin = new_begin;
+               }
+       } else {
+-              *begin = current->mm->mmap_legacy_base;
++              *begin = mm->mmap_legacy_base;
+               *end = TASK_SIZE;
+       }
+ }
+@@ -130,20 +130,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
+       unsigned long begin, end;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       if (flags & MAP_FIXED)
+               return addr;
+-      find_start_end(flags, &begin, &end);
++      find_start_end(mm, flags, &begin, &end);
+       if (len > end)
+               return -ENOMEM;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (end - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -157,18 +161,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+               info.align_mask = get_align_mask();
+               info.align_offset += get_align_bits();
+       }
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+-                        const unsigned long len, const unsigned long pgoff,
+-                        const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++                        unsigned long len, unsigned long pgoff,
++                        unsigned long flags)
+ {
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       /* requested length too big for entire address space */
+       if (len > TASK_SIZE)
+@@ -181,12 +187,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
+               goto bottomup;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                              (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -200,6 +209,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               info.align_mask = get_align_mask();
+               info.align_offset += get_align_bits();
+       }
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       if (!(addr & ~PAGE_MASK))
+               return addr;
+diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
+index 654f6c6..cb648a2 100644
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -44,6 +44,7 @@
+ #include <asm/setup.h>
+ #include <asm/e820.h>
+ #include <asm/io.h>
++#include <asm/tlbflush.h>
+ #include "../realmode/rm/wakeup.h"
+@@ -145,6 +146,10 @@ static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn,
+       if (!tboot_pg_dir)
+               return -1;
++      clone_pgd_range(tboot_pg_dir + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++
+       for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
+               if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC))
+                       return -1;
+@@ -215,8 +220,6 @@ static int tboot_setup_sleep(void)
+ void tboot_shutdown(u32 shutdown_type)
+ {
+-      void (*shutdown)(void);
+-
+       if (!tboot_enabled())
+               return;
+@@ -236,9 +239,12 @@ void tboot_shutdown(u32 shutdown_type)
+       tboot->shutdown_type = shutdown_type;
+       switch_to_tboot_pt();
++      __write_cr4(__read_cr4() & ~X86_CR4_PCIDE);
+-      shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
+-      shutdown();
++      /*
++       * PaX: can't be a C indirect function call due to KERNEXEC
++       */
++      asm volatile("jmp *%0" : : "r"((unsigned long)tboot->shutdown_entry));
+       /* should not reach here */
+       while (1)
+@@ -304,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
+       return -ENODEV;
+ }
+-static atomic_t ap_wfs_count;
++static atomic_unchecked_t ap_wfs_count;
+ static int tboot_wait_for_aps(int num_aps)
+ {
+@@ -325,9 +331,9 @@ static int tboot_wait_for_aps(int num_aps)
+ static int tboot_dying_cpu(unsigned int cpu)
+ {
+-      atomic_inc(&ap_wfs_count);
++      atomic_inc_unchecked(&ap_wfs_count);
+       if (num_online_cpus() == 1) {
+-              if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
++              if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
+                       return -EBUSY;
+       }
+       return 0;
+@@ -407,7 +413,7 @@ static __init int tboot_late_init(void)
+       tboot_create_trampoline();
+-      atomic_set(&ap_wfs_count, 0);
++      atomic_set_unchecked(&ap_wfs_count, 0);
+       cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
+                         tboot_dying_cpu);
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index d39c091..1df4349 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -32,7 +32,7 @@ unsigned long profile_pc(struct pt_regs *regs)
+       if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+-              return *(unsigned long *)(regs->bp + sizeof(long));
++              return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+               unsigned long *sp =
+                       (unsigned long *)kernel_stack_pointer(regs);
+@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
+                * or above a saved flags. Eflags has bits 22-31 zero,
+                * kernel addresses don't.
+                */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              return ktla_ktva(sp[0]);
++#else
+               if (sp[0] >> 22)
+                       return sp[0];
+               if (sp[1] >> 22)
+                       return sp[1];
+ #endif
++
++#endif
+       }
+       return pc;
+ }
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index 9692a5e..aea9fa5 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -140,6 +140,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++              return -EINVAL;
++#endif
++
+       set_tls_desc(p, idx, &info, 1);
+       /*
+@@ -298,7 +303,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+       if (kbuf)
+               info = kbuf;
+-      else if (__copy_from_user(infobuf, ubuf, count))
++      else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
+               return -EFAULT;
+       else
+               info = infobuf;
+diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
+index 1c113db..287b42e 100644
+--- a/arch/x86/kernel/tracepoint.c
++++ b/arch/x86/kernel/tracepoint.c
+@@ -9,11 +9,11 @@
+ #include <linux/atomic.h>
+ atomic_t trace_idt_ctr = ATOMIC_INIT(0);
+-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
++const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
+                               (unsigned long) trace_idt_table };
+ /* No need to be aligned, but done to keep all IDTs defined the same way. */
+-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
++gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
+ static int trace_irq_vector_refcount;
+ static DEFINE_MUTEX(irq_vector_mutex);
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index b70ca12..2eb1474 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -71,7 +71,7 @@
+ #include <asm/proto.h>
+ /* No need to be aligned, but done to keep all IDTs defined the same way. */
+-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
++gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
+ #else
+ #include <asm/processor-flags.h>
+ #include <asm/setup.h>
+@@ -79,7 +79,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
+ #endif
+ /* Must be page-aligned because the real IDT is used in a fixmap. */
+-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
++gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+ EXPORT_SYMBOL_GPL(used_vectors);
+@@ -169,7 +169,7 @@ void ist_end_non_atomic(void)
+ }
+ static nokprobe_inline int
+-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
++do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
+                 struct pt_regs *regs, long error_code)
+ {
+       if (v8086_mode(regs)) {
+@@ -189,8 +189,25 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+               if (!fixup_exception(regs, trapnr)) {
+                       tsk->thread.error_code = error_code;
+                       tsk->thread.trap_nr = trapnr;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++                      if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
++                              str = "PAX: suspicious stack segment fault";
++#endif
++
++#ifdef CONFIG_PAX_RAP
++                      if (trapnr == X86_TRAP_UD)
++                              str = "PAX: overwritten function pointer or return address detected";
++#endif
++
+                       die(str, regs, error_code);
+               }
++
++#ifdef CONFIG_PAX_REFCOUNT
++              if (trapnr == X86_REFCOUNT_VECTOR)
++                      pax_report_refcount_error(regs, str);
++#endif
++
+               return 0;
+       }
+@@ -229,7 +246,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
+ }
+ static void
+-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
+       long error_code, siginfo_t *info)
+ {
+       struct task_struct *tsk = current;
+@@ -252,7 +269,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+       if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
+           printk_ratelimit()) {
+               pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+-                      tsk->comm, tsk->pid, str,
++                      tsk->comm, task_pid_nr(tsk), str,
+                       regs->ip, regs->sp, error_code);
+               print_vma_addr(" in ", regs->ip);
+               pr_cont("\n");
+@@ -262,7 +279,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+ }
+ NOKPROBE_SYMBOL(do_trap);
+-static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
++static void do_error_trap(struct pt_regs *regs, long error_code, const char *str,
+                         unsigned long trapnr, int signr)
+ {
+       siginfo_t info;
+@@ -292,6 +309,37 @@ DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present", segment_not_present)
+ DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",           stack_segment)
+ DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",         alignment_check)
++#ifdef CONFIG_PAX_REFCOUNT
++extern char __refcount_overflow_start[], __refcount_overflow_end[];
++extern char __refcount64_overflow_start[], __refcount64_overflow_end[];
++extern char __refcount_underflow_start[], __refcount_underflow_end[];
++extern char __refcount64_underflow_start[], __refcount64_underflow_end[];
++
++dotraplinkage void do_refcount_error(struct pt_regs *regs, long error_code)
++{
++      const char *str = NULL;
++
++      BUG_ON(!(regs->flags & X86_EFLAGS_OF));
++
++#define range_check(size, direction, type, value) \
++      if ((unsigned long)__##size##_##direction##_start <= regs->ip && \
++          regs->ip < (unsigned long)__##size##_##direction##_end) { \
++              *(type *)regs->cx = value; \
++              str = #size " " #direction; \
++      }
++
++      range_check(refcount,   overflow,  int,       INT_MAX)
++      range_check(refcount64, overflow,  long long, LLONG_MAX)
++      range_check(refcount,   underflow, int,       INT_MIN)
++      range_check(refcount64, underflow, long long, LLONG_MIN)
++
++#undef range_check
++
++      BUG_ON(!str);
++      do_error_trap(regs, error_code, str, X86_REFCOUNT_VECTOR, SIGILL);
++}
++#endif
++
+ #ifdef CONFIG_X86_64
+ /* Runs on IST stack */
+ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+@@ -332,6 +380,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+       tsk->thread.error_code = error_code;
+       tsk->thread.trap_nr = X86_TRAP_DF;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
++              die("grsec: kernel stack overflow detected", regs, error_code); 
++#endif
++
+ #ifdef CONFIG_DOUBLEFAULT
+       df_debug(regs, error_code);
+ #endif
+@@ -444,11 +497,35 @@ do_general_protection(struct pt_regs *regs, long error_code)
+               tsk->thread.error_code = error_code;
+               tsk->thread.trap_nr = X86_TRAP_GP;
+               if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+-                             X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
++                             X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++                      if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
++                              die("PAX: suspicious general protection fault", regs, error_code);
++                      else
++#endif
++
+                       die("general protection fault", regs, error_code);
++              }
+               return;
+       }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++      if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++              struct mm_struct *mm = tsk->mm;
++              unsigned long limit;
++
++              down_write(&mm->mmap_sem);
++              limit = mm->context.user_cs_limit;
++              if (limit < TASK_SIZE) {
++                      track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++                      up_write(&mm->mmap_sem);
++                      return;
++              }
++              up_write(&mm->mmap_sem);
++      }
++#endif
++
+       tsk->thread.error_code = error_code;
+       tsk->thread.trap_nr = X86_TRAP_GP;
+@@ -546,6 +623,9 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+               container_of(task_pt_regs(current),
+                            struct bad_iret_stack, regs);
++      if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
++              new_stack = s;
++
+       /* Copy the IRET target to the new stack. */
+       memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
+@@ -717,7 +797,7 @@ exit:
+        * This is the most likely code path that involves non-trivial use
+        * of the SYSENTER stack.  Check that we haven't overrun it.
+        */
+-      WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
++      WARN(cpu_tss[raw_smp_processor_id()].SYSENTER_stack_canary != STACK_END_MAGIC,
+            "Overran or corrupted SYSENTER stack\n");
+ #endif
+       ist_exit(regs);
+@@ -847,7 +927,7 @@ void __init early_trap_init(void)
+        * since we don't have trace_debug and it will be reset to
+        * 'debug' in trap_init() by set_intr_gate_ist().
+        */
+-      set_intr_gate_notrace(X86_TRAP_DB, debug);
++      set_intr_gate_notrace(X86_TRAP_DB, int1);
+       /* int3 can be called from all */
+       set_system_intr_gate(X86_TRAP_BP, &int3);
+ #ifdef CONFIG_X86_32
+@@ -914,6 +994,11 @@ void __init trap_init(void)
+       set_bit(IA32_SYSCALL_VECTOR, used_vectors);
+ #endif
++#ifdef CONFIG_PAX_REFCOUNT
++      set_intr_gate(X86_REFCOUNT_VECTOR, refcount_error);
++      set_bit(X86_REFCOUNT_VECTOR, used_vectors);
++#endif
++
+       /*
+        * Set the IDT descriptor to a fixed read-only location, so that the
+        * "sidt" instruction will not leak the location of the kernel, and
+@@ -932,7 +1017,7 @@ void __init trap_init(void)
+        * in early_trap_init(). However, ITS works only after
+        * cpu_init() loads TSS. See comments in early_trap_init().
+        */
+-      set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
++      set_intr_gate_ist(X86_TRAP_DB, &int1, DEBUG_STACK);
+       /* int3 can be called from all */
+       set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
+@@ -940,7 +1025,7 @@ void __init trap_init(void)
+ #ifdef CONFIG_X86_64
+       memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
+-      set_nmi_gate(X86_TRAP_DB, &debug);
++      set_nmi_gate(X86_TRAP_DB, &int1);
+       set_nmi_gate(X86_TRAP_BP, &int3);
+ #endif
+ }
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 78b9cb5..79fb053 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -157,7 +157,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
+        */
+       smp_wmb();
+-      ACCESS_ONCE(c2n->head) = data;
++      ACCESS_ONCE_RW(c2n->head) = data;
+ }
+ /*
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 495c776..c0427ef 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -287,7 +287,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
+ {
+       u32 volatile *good_insns;
+-      insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
++      insn_init(insn, (void *)ktva_ktla((unsigned long)auprobe->insn), sizeof(auprobe->insn), x86_64);
+       /* has the side-effect of processing the entire instruction */
+       insn_get_length(insn);
+       if (WARN_ON_ONCE(!insn_complete(insn)))
+@@ -978,7 +978,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
+       if (nleft != rasize) {
+               pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
+-                      "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
++                      "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
+               force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
+       }
+diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
+index 014ea59..03cfe40 100644
+--- a/arch/x86/kernel/verify_cpu.S
++++ b/arch/x86/kernel/verify_cpu.S
+@@ -20,6 +20,7 @@
+  *    arch/x86/boot/compressed/head_64.S: Boot cpu verification
+  *    arch/x86/kernel/trampoline_64.S: secondary processor verification
+  *    arch/x86/kernel/head_32.S: processor startup
++ *    arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
+  *
+  *    verify_cpu, returns the status of longmode and SSE in register %eax.
+  *            0: Success    1: Failure
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index 01f30e5..a304a4c 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -144,7 +144,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
+               do_exit(SIGSEGV);
+       }
+-      tss = &per_cpu(cpu_tss, get_cpu());
++      tss = cpu_tss + get_cpu();
+       tsk->thread.sp0 = vm86->saved_sp0;
+       tsk->thread.sysenter_cs = __KERNEL_CS;
+       load_sp0(tss, &tsk->thread);
+@@ -176,10 +176,8 @@ static void mark_screen_rdonly(struct mm_struct *mm)
+               goto out;
+       pmd = pmd_offset(pud, 0xA0000);
+-      if (pmd_trans_huge(*pmd)) {
+-              struct vm_area_struct *vma = find_vma(mm, 0xA0000);
+-              split_huge_pmd(vma, pmd, 0xA0000);
+-      }
++      if (pmd_trans_huge(*pmd))
++              split_huge_pmd(find_vma(mm, 0xA0000), pmd, 0xA0000);
+       if (pmd_none_or_clear_bad(pmd))
+               goto out;
+       pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
+@@ -263,6 +261,13 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
+               return -EPERM;
+       }
++#ifdef CONFIG_GRKERNSEC_VM86
++      if (!capable(CAP_SYS_RAWIO)) {
++              gr_handle_vm86();
++              return -EPERM;
++      }
++#endif
++
+       if (!vm86) {
+               if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
+                       return -ENOMEM;
+@@ -358,7 +363,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
+       vm86->saved_sp0 = tsk->thread.sp0;
+       lazy_save_gs(vm86->regs32.gs);
+-      tss = &per_cpu(cpu_tss, get_cpu());
++      tss = cpu_tss + get_cpu();
+       /* make room for real-mode segments */
+       tsk->thread.sp0 += 16;
+@@ -538,7 +543,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+               goto cannot_handle;
+       if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
+               goto cannot_handle;
+-      intr_ptr = (unsigned long __user *) (i << 2);
++      intr_ptr = (unsigned long __force_user *) (i << 2);
+       if (get_user(segoffs, intr_ptr))
+               goto cannot_handle;
+       if ((segoffs >> 16) == BIOSSEG)
+@@ -831,6 +836,14 @@ static inline int get_and_reset_irq(int irqnumber)
+ static int do_vm86_irq_handling(int subfunction, int irqnumber)
+ {
+       int ret;
++
++#ifdef CONFIG_GRKERNSEC_VM86
++      if (!capable(CAP_SYS_RAWIO)) {
++              gr_handle_vm86();
++              return -EPERM;
++      }
++#endif
++
+       switch (subfunction) {
+               case VM86_GET_AND_RESET_IRQ: {
+                       return get_and_reset_irq(irqnumber);
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 9297a00..3dc41ac 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -26,6 +26,13 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define __KERNEL_TEXT_OFFSET  (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
++#else
++#define __KERNEL_TEXT_OFFSET  0
++#endif
+ #undef i386     /* in case the preprocessor is a 32bit one */
+@@ -68,30 +75,44 @@ jiffies_64 = jiffies;
+ PHDRS {
+       text PT_LOAD FLAGS(5);          /* R_E */
++#ifdef CONFIG_X86_32
++      module PT_LOAD FLAGS(5);        /* R_E */
++#endif
++#ifdef CONFIG_XEN
++      rodata PT_LOAD FLAGS(5);        /* R_E */
++#else
++      rodata PT_LOAD FLAGS(4);        /* R__ */
++#endif
+       data PT_LOAD FLAGS(6);          /* RW_ */
+-#ifdef CONFIG_X86_64
++      init.begin PT_LOAD FLAGS(6);    /* RW_ */
+ #ifdef CONFIG_SMP
+       percpu PT_LOAD FLAGS(6);        /* RW_ */
+ #endif
+-      init PT_LOAD FLAGS(7);          /* RWE */
+-#endif
++      text.init PT_LOAD FLAGS(5);     /* R_E */
++      text.exit PT_LOAD FLAGS(5);     /* R_E */
++      init PT_LOAD FLAGS(6);          /* RW_ */
+       note PT_NOTE FLAGS(0);          /* ___ */
+ }
+ SECTIONS
+ {
+ #ifdef CONFIG_X86_32
+-      . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+-      phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
++      . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+ #else
+       . = __START_KERNEL;
+-      phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
+ #endif
+       /* Text and read-only data */
+-      .text :  AT(ADDR(.text) - LOAD_OFFSET) {
+-              _text = .;
++      .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+               /* bootstrapping code */
++#ifdef CONFIG_X86_32
++              phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET);
++              __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#else
++              phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET);
++              __LOAD_PHYSICAL_ADDR = ABSOLUTE(. - LOAD_OFFSET + __KERNEL_TEXT_OFFSET);
++#endif
++              _text = .;
+               HEAD_TEXT
+               . = ALIGN(8);
+               _stext = .;
+@@ -104,13 +125,35 @@ SECTIONS
+               SOFTIRQENTRY_TEXT
+               *(.fixup)
+               *(.gnu.warning)
+-              /* End of text section */
+-              _etext = .;
+       } :text = 0x9090
+-      NOTES :text :note
++      . += __KERNEL_TEXT_OFFSET;
+-      EXCEPTION_TABLE(16) :text = 0x9090
++#ifdef CONFIG_X86_32
++      . = ALIGN(PAGE_SIZE);
++      .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++              MODULES_EXEC_VADDR = .;
++              BYTE(0)
++              . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
++              . = ALIGN(HPAGE_SIZE) - 1;
++              MODULES_EXEC_END = .;
++#endif
++
++      } :module
++#endif
++
++      .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
++              /* End of text section */
++              BYTE(0)
++              _etext = . - __KERNEL_TEXT_OFFSET;
++      }
++
++      . = ALIGN(PAGE_SIZE);
++      NOTES :rodata :note
++
++      EXCEPTION_TABLE(16) :rodata
+       /* .text should occupy whole number of pages */
+       . = ALIGN(PAGE_SIZE);
+@@ -120,16 +163,20 @@ SECTIONS
+       /* Data */
+       .data : AT(ADDR(.data) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++              . = ALIGN(HPAGE_SIZE);
++#else
++              . = ALIGN(PAGE_SIZE);
++#endif
++
+               /* Start of data section */
+               _sdata = .;
+               /* init_task */
+               INIT_TASK_DATA(THREAD_SIZE)
+-#ifdef CONFIG_X86_32
+-              /* 32 bit has nosave before _edata */
+               NOSAVE_DATA
+-#endif
+               PAGE_ALIGNED_DATA(PAGE_SIZE)
+@@ -172,12 +219,19 @@ SECTIONS
+        . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
+       /* Init code and data - will be freed after init */
+-      . = ALIGN(PAGE_SIZE);
+       .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
++              BYTE(0)
++
++#ifdef CONFIG_PAX_KERNEXEC
++              . = ALIGN(HPAGE_SIZE);
++#else
++              . = ALIGN(PAGE_SIZE);
++#endif
++
+               __init_begin = .; /* paired with __init_end */
+-      }
++      } :init.begin
+-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+       /*
+        * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
+        * output PHDR, so the next output section - .init.text - should
+@@ -188,10 +242,13 @@ SECTIONS
+              "per-CPU data too large - increase CONFIG_PHYSICAL_START")
+ #endif
+-      INIT_TEXT_SECTION(PAGE_SIZE)
+-#ifdef CONFIG_X86_64
+-      :init
+-#endif
++      . = ALIGN(PAGE_SIZE);
++      init_begin = .;
++      .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
++              VMLINUX_SYMBOL(_sinittext) = .;
++              INIT_TEXT
++              . = ALIGN(PAGE_SIZE);
++      } :text.init
+       /*
+        * Section for code used exclusively before alternatives are run. All
+@@ -200,11 +257,29 @@ SECTIONS
+        *
+        * See static_cpu_has() for an example.
+        */
+-      .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
++      .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+               *(.altinstr_aux)
+       }
+-      INIT_DATA_SECTION(16)
++      /*
++       * .exit.text is discard at runtime, not link time, to deal with
++       *  references from .altinstructions and .eh_frame
++       */
++      .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++              EXIT_TEXT
++              VMLINUX_SYMBOL(_einittext) = .;
++
++#ifdef CONFIG_PAX_KERNEXEC
++              . = ALIGN(HPAGE_SIZE);
++#else
++              . = ALIGN(16);
++#endif
++
++      } :text.exit
++      . = init_begin + SIZEOF(.init.text) + SIZEOF(.altinstr_aux) + SIZEOF(.exit.text);
++
++      . = ALIGN(PAGE_SIZE);
++      INIT_DATA_SECTION(16) :init
+       .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
+               __x86_cpu_dev_start = .;
+@@ -275,19 +350,12 @@ SECTIONS
+       }
+       . = ALIGN(8);
+-      /*
+-       * .exit.text is discard at runtime, not link time, to deal with
+-       *  references from .altinstructions and .eh_frame
+-       */
+-      .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+-              EXIT_TEXT
+-      }
+       .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+               EXIT_DATA
+       }
+-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
++#ifndef CONFIG_SMP
+       PERCPU_SECTION(INTERNODE_CACHE_BYTES)
+ #endif
+@@ -306,16 +374,10 @@ SECTIONS
+       .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+               __smp_locks = .;
+               *(.smp_locks)
+-              . = ALIGN(PAGE_SIZE);
+               __smp_locks_end = .;
++              . = ALIGN(PAGE_SIZE);
+       }
+-#ifdef CONFIG_X86_64
+-      .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+-              NOSAVE_DATA
+-      }
+-#endif
+-
+       /* BSS */
+       . = ALIGN(PAGE_SIZE);
+       .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+@@ -331,6 +393,7 @@ SECTIONS
+               __brk_base = .;
+               . += 64 * 1024;         /* 64k alignment slop space */
+               *(.brk_reservation)     /* areas brk users have reserved */
++              . = ALIGN(HPAGE_SIZE);
+               __brk_limit = .;
+       }
+@@ -361,13 +424,12 @@ SECTIONS
+  * for the boot processor.
+  */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+ /*
+  * Build-time check on the image size:
+  */
+-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
+          "kernel image bigger than KERNEL_IMAGE_SIZE");
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
+index 95e49f6..975337d 100644
+--- a/arch/x86/kernel/x8664_ksyms_64.c
++++ b/arch/x86/kernel/x8664_ksyms_64.c
+@@ -35,8 +35,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
+ EXPORT_SYMBOL(copy_user_generic_unrolled);
+ EXPORT_SYMBOL(copy_user_enhanced_fast_string);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(_copy_from_user);
+-EXPORT_SYMBOL(_copy_to_user);
+ EXPORT_SYMBOL_GPL(memcpy_mcsafe);
+@@ -83,3 +81,7 @@ EXPORT_SYMBOL(native_load_gs_index);
+ EXPORT_SYMBOL(___preempt_schedule);
+ EXPORT_SYMBOL(___preempt_schedule_notrace);
+ #endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 76c5e52..6f2af84 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -91,7 +91,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
+ static void default_nmi_init(void) { };
+ static int default_i8042_detect(void) { return 1; };
+-struct x86_platform_ops x86_platform = {
++struct x86_platform_ops x86_platform __read_only = {
+       .calibrate_cpu                  = native_calibrate_cpu,
+       .calibrate_tsc                  = native_calibrate_tsc,
+       .get_wallclock                  = mach_get_cmos_time,
+@@ -108,7 +108,7 @@ struct x86_platform_ops x86_platform = {
+ EXPORT_SYMBOL_GPL(x86_platform);
+ #if defined(CONFIG_PCI_MSI)
+-struct x86_msi_ops x86_msi = {
++struct x86_msi_ops x86_msi __read_only = {
+       .setup_msi_irqs         = native_setup_msi_irqs,
+       .teardown_msi_irq       = native_teardown_msi_irq,
+       .teardown_msi_irqs      = default_teardown_msi_irqs,
+@@ -137,7 +137,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
+ }
+ #endif
+-struct x86_io_apic_ops x86_io_apic_ops = {
++struct x86_io_apic_ops x86_io_apic_ops __read_only = {
+       .read                   = native_io_apic_read,
+       .disable                = native_disable_io_apic,
+ };
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 3235e0f..60b5e71 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -224,15 +224,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+ {
+-      int r;
++      int r, i;
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -EFAULT;
+-      if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+-                         cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
++      if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
++      for (i = 0; i < cpuid->nent; ++i) {
++              struct kvm_cpuid_entry2 cpuid_entry;
++              if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
++                      goto out;
++              vcpu->arch.cpuid_entries[i] = cpuid_entry;
++      }
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+@@ -245,15 +250,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+ {
+-      int r;
++      int r, i;
+       r = -E2BIG;
+       if (cpuid->nent < vcpu->arch.cpuid_nent)
+               goto out;
+       r = -EFAULT;
+-      if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+-                       vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
++      if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
++      for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++              struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
++              if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
++                      goto out;
++      }
+       return 0;
+ out:
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 4e95d3e..e3e58b1 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -971,7 +971,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
+ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
+ {
+       u8 rc;
+-      void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
++      void (*fop)(struct fastop *) = (void *)em_setcc + 4 * (condition & 0xf);
+       flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
+       asm("push %[flags]; popf; call *%[fastop]"
+@@ -1893,7 +1893,7 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
+ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
+ {
+       int seg = ctxt->src2.val;
+-      unsigned long selector;
++      u16 selector;
+       int rc;
+       rc = emulate_pop(ctxt, &selector, 2);
+@@ -1905,7 +1905,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
+       if (ctxt->op_bytes > 2)
+               rsp_increment(ctxt, ctxt->op_bytes - 2);
+-      rc = load_segment_descriptor(ctxt, (u16)selector, seg);
++      rc = load_segment_descriptor(ctxt, selector, seg);
+       return rc;
+ }
+@@ -3882,7 +3882,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+       int cr = ctxt->modrm_reg;
+       u64 efer = 0;
+-      static u64 cr_reserved_bits[] = {
++      static const u64 cr_reserved_bits[] = {
+               0xffffffff00000000ULL,
+               0, 0, 0, /* CR3 checked later */
+               CR4_RESERVED_BITS,
+@@ -4968,7 +4968,10 @@ done_prefixes:
+       if (ctxt->d == 0)
+               return EMULATION_FAILED;
+-      ctxt->execute = opcode.u.execute;
++      if (ctxt->d & Fastop)
++              ctxt->u.fastop = opcode.u.fastop;
++      else
++              ctxt->u.execute = opcode.u.execute;
+       if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+               return EMULATION_FAILED;
+@@ -5283,15 +5286,14 @@ special_insn:
+       else
+               ctxt->eflags &= ~X86_EFLAGS_RF;
+-      if (ctxt->execute) {
++      if (ctxt->u.execute) {
+               if (ctxt->d & Fastop) {
+-                      void (*fop)(struct fastop *) = (void *)ctxt->execute;
+-                      rc = fastop(ctxt, fop);
++                      rc = fastop(ctxt, ctxt->u.fastop);
+                       if (rc != X86EMUL_CONTINUE)
+                               goto done;
+                       goto writeback;
+               }
+-              rc = ctxt->execute(ctxt);
++              rc = ctxt->u.execute(ctxt);
+               if (rc != X86EMUL_CONTINUE)
+                       goto done;
+               goto writeback;
+diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
+index 7cc2360..6ae1236 100644
+--- a/arch/x86/kvm/i8259.c
++++ b/arch/x86/kvm/i8259.c
+@@ -39,14 +39,14 @@
+ static void pic_irq_request(struct kvm *kvm, int level);
++static void pic_lock(struct kvm_pic *s) __acquires(&s->lock);
+ static void pic_lock(struct kvm_pic *s)
+-      __acquires(&s->lock)
+ {
+       spin_lock(&s->lock);
+ }
++static void pic_unlock(struct kvm_pic *s) __releases(&s->lock);
+ static void pic_unlock(struct kvm_pic *s)
+-      __releases(&s->lock)
+ {
+       bool wakeup = s->wakeup_needed;
+       struct kvm_vcpu *vcpu, *found = NULL;
+@@ -72,6 +72,7 @@ static void pic_unlock(struct kvm_pic *s)
+       }
+ }
++static void pic_clear_isr(struct kvm_kpic_state *s, int irq) __must_hold(s->pics_state);
+ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
+ {
+       s->isr &= ~(1 << irq);
+@@ -219,6 +220,7 @@ void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
+ /*
+  * acknowledge interrupt 'irq'
+  */
++static inline void pic_intack(struct kvm_kpic_state *s, int irq) __must_hold(s);
+ static inline void pic_intack(struct kvm_kpic_state *s, int irq)
+ {
+       s->isr |= 1 << irq;
+@@ -273,6 +275,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
+       return intno;
+ }
++void kvm_pic_reset(struct kvm_kpic_state *s) __must_hold(s);
+ void kvm_pic_reset(struct kvm_kpic_state *s)
+ {
+       int irq, i;
+@@ -307,6 +310,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
+                       pic_clear_isr(s, irq);
+ }
++static void pic_ioport_write(void *opaque, u32 addr, u32 val) __must_hold(opaque);
+ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
+ {
+       struct kvm_kpic_state *s = opaque;
+@@ -400,6 +404,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
+               }
+ }
++static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) __must_hold(s);
+ static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
+ {
+       int ret;
+@@ -422,6 +427,7 @@ static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
+       return ret;
+ }
++static u32 pic_ioport_read(void *opaque, u32 addr1) __must_hold(opaque);
+ static u32 pic_ioport_read(void *opaque, u32 addr1)
+ {
+       struct kvm_kpic_state *s = opaque;
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index 1a22de7..699421c 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -415,6 +415,8 @@ static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
+ #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
+ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
++                      struct kvm_ioapic *ioapic, int vector, int trigger_mode) __must_hold(&ioapic->lock);
++static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
+                       struct kvm_ioapic *ioapic, int vector, int trigger_mode)
+ {
+       struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index b62c852..bbf49f2 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -57,7 +57,7 @@
+ #define APIC_BUS_CYCLE_NS 1
+ /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
++#define apic_debug(fmt, arg...) do {} while (0)
+ /* 14 is the version for Xeon and Pentium 8.4.8*/
+ #define APIC_VERSION                  (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index a011054..da14b47 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -355,7 +355,7 @@ retry_walk:
+               if (unlikely(kvm_is_error_hva(host_addr)))
+                       goto error;
+-              ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
++              ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
+               if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+                       goto error;
+               walker->ptep_user[walker->level - 1] = ptep_user;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index af523d8..ba7da48 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4120,7 +4120,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+       int cpu = raw_smp_processor_id();
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
++
++      pax_open_kernel();
+       sd->tss_desc->type = 9; /* available 32/64-bit TSS */
++      pax_close_kernel();
++
+       load_TR_desc();
+ }
+@@ -4559,6 +4563,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ #endif
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      __set_fs(current_thread_info()->addr_limit);
++#endif
++
+       reload_tss(vcpu);
+       local_irq_disable();
+@@ -4961,7 +4969,7 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
+       avic_handle_ldr_update(vcpu);
+ }
+-static struct kvm_x86_ops svm_x86_ops = {
++static struct kvm_x86_ops svm_x86_ops __read_only = {
+       .cpu_has_kvm_support = has_svm,
+       .disabled_by_bios = is_disabled,
+       .hardware_setup = svm_hardware_setup,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 5cede40..f932797 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1666,14 +1666,14 @@ static __always_inline void vmcs_writel(unsigned long field, unsigned long value
+       __vmcs_writel(field, value);
+ }
+-static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
++static __always_inline void vmcs_clear_bits(unsigned long field, unsigned long mask)
+ {
+         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
+                        "vmcs_clear_bits does not support 64-bit fields");
+       __vmcs_writel(field, __vmcs_readl(field) & ~mask);
+ }
+-static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
++static __always_inline void vmcs_set_bits(unsigned long field, unsigned long mask)
+ {
+         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
+                        "vmcs_set_bits does not support 64-bit fields");
+@@ -1952,7 +1952,11 @@ static void reload_tss(void)
+       struct desc_struct *descs;
+       descs = (void *)gdt->address;
++
++      pax_open_kernel();
+       descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++      pax_close_kernel();
++
+       load_TR_desc();
+ }
+@@ -2256,6 +2260,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+               vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
+               vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
++#endif
++
+               rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+@@ -2580,7 +2588,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
+  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
+  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
+  */
+-static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
++static u64 __intentional_overflow(-1) guest_read_tsc(struct kvm_vcpu *vcpu)
+ {
+       u64 host_tsc, tsc_offset;
+@@ -4840,7 +4848,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+       unsigned long cr4;
+       vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+       vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
++#endif
+       /* Save the most likely value for this task's CR4 in the VMCS. */
+       cr4 = cr4_read_shadow();
+@@ -4867,7 +4878,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+       vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
+       vmx->host_idt_base = dt.address;
+-      vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
++      vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
+       rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+       vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+@@ -6428,11 +6439,17 @@ static __init int hardware_setup(void)
+        * page upon invalidation.  No need to do anything if not
+        * using the APIC_ACCESS_ADDR VMCS field.
+        */
+-      if (!flexpriority_enabled)
++      if (!flexpriority_enabled) {
++              pax_open_kernel();
+               kvm_x86_ops->set_apic_access_page_addr = NULL;
++              pax_close_kernel();
++      }
+-      if (!cpu_has_vmx_tpr_shadow())
++      if (!cpu_has_vmx_tpr_shadow()) {
++              pax_open_kernel();
+               kvm_x86_ops->update_cr8_intercept = NULL;
++              pax_close_kernel();
++      }
+       if (enable_ept && !cpu_has_vmx_ept_2m_page())
+               kvm_disable_largepages();
+@@ -6498,10 +6515,12 @@ static __init int hardware_setup(void)
+               enable_pml = 0;
+       if (!enable_pml) {
++              pax_open_kernel();
+               kvm_x86_ops->slot_enable_log_dirty = NULL;
+               kvm_x86_ops->slot_disable_log_dirty = NULL;
+               kvm_x86_ops->flush_log_dirty = NULL;
+               kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
++              pax_close_kernel();
+       }
+       if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
+@@ -8890,6 +8909,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+               "jmp 2f \n\t"
+               "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
+               "2: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              "ljmp %[cs],$3f\n\t"
++              "3: "
++#endif
++
+               /* Save guest registers, load host registers, keep flags */
+               "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
+               "pop %0 \n\t"
+@@ -8942,6 +8967,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+               [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+               [wordsize]"i"(sizeof(ulong))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              ,[cs]"i"(__KERNEL_CS)
++#endif
++
+             : "cc", "memory"
+ #ifdef CONFIG_X86_64
+               , "rax", "rbx", "rdi", "rsi"
+@@ -8955,7 +8985,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+       if (debugctlmsr)
+               update_debugctlmsr(debugctlmsr);
+-#ifndef CONFIG_X86_64
++#ifdef CONFIG_X86_32
+       /*
+        * The sysexit path does not restore ds/es, so we must set them to
+        * a reasonable value ourselves.
+@@ -8964,8 +8994,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+        * may be executed in interrupt context, which saves and restore segments
+        * around it, nullifying its effect.
+        */
+-      loadsegment(ds, __USER_DS);
+-      loadsegment(es, __USER_DS);
++      loadsegment(ds, __KERNEL_DS);
++      loadsegment(es, __KERNEL_DS);
++      loadsegment(ss, __KERNEL_DS);
++
++#ifdef CONFIG_PAX_KERNEXEC
++      loadsegment(fs, __KERNEL_PERCPU);
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ #endif
+       vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
+@@ -11177,7 +11217,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
+                       ~FEATURE_CONTROL_LMCE;
+ }
+-static struct kvm_x86_ops vmx_x86_ops = {
++static struct kvm_x86_ops vmx_x86_ops __read_only = {
+       .cpu_has_kvm_support = cpu_has_kvm_support,
+       .disabled_by_bios = vmx_disabled_by_bios,
+       .hardware_setup = hardware_setup,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 699f872..52b660d 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1948,8 +1948,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+ {
+       struct kvm *kvm = vcpu->kvm;
+       int lm = is_long_mode(vcpu);
+-      u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
+-              : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
++      u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
++              : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
+       u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
+               : kvm->arch.xen_hvm_config.blob_size_32;
+       u32 page_num = data & ~PAGE_MASK;
+@@ -2657,6 +2657,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+               if (n < msr_list.nmsrs)
+                       goto out;
+               r = -EFAULT;
++              if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
++                      goto out;
+               if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+                                num_msrs_to_save * sizeof(u32)))
+                       goto out;
+@@ -3073,7 +3075,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+ {
+-      struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
++      struct xregs_state *xsave = &vcpu->arch.guest_fpu.state->xsave;
+       u64 xstate_bv = xsave->header.xfeatures;
+       u64 valid;
+@@ -3109,7 +3111,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
+ {
+-      struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
++      struct xregs_state *xsave = &vcpu->arch.guest_fpu.state->xsave;
+       u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
+       u64 valid;
+@@ -3153,7 +3155,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+               fill_xsave((u8 *) guest_xsave->region, vcpu);
+       } else {
+               memcpy(guest_xsave->region,
+-                      &vcpu->arch.guest_fpu.state.fxsave,
++                      &vcpu->arch.guest_fpu.state->fxsave,
+                       sizeof(struct fxregs_state));
+               *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
+                       XFEATURE_MASK_FPSSE;
+@@ -3178,7 +3180,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+       } else {
+               if (xstate_bv & ~XFEATURE_MASK_FPSSE)
+                       return -EINVAL;
+-              memcpy(&vcpu->arch.guest_fpu.state.fxsave,
++              memcpy(&vcpu->arch.guest_fpu.state->fxsave,
+                       guest_xsave->region, sizeof(struct fxregs_state));
+       }
+       return 0;
+@@ -5739,7 +5741,7 @@ static unsigned long kvm_get_guest_ip(void)
+       unsigned long ip = 0;
+       if (__this_cpu_read(current_vcpu))
+-              ip = kvm_rip_read(__this_cpu_read(current_vcpu));
++              ip = kvm_get_linear_rip(__this_cpu_read(current_vcpu));
+       return ip;
+ }
+@@ -6462,6 +6464,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
+  * exiting to the userspace.  Otherwise, the value will be returned to the
+  * userspace.
+  */
++static int vcpu_enter_guest(struct kvm_vcpu *vcpu) __must_hold(&vcpu->kvm->srcu);
+ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ {
+       int r;
+@@ -6737,6 +6740,7 @@ out:
+       return r;
+ }
++static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) __must_hold(&kvm->srcu);
+ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+ {
+       if (!kvm_arch_vcpu_runnable(vcpu) &&
+@@ -7284,7 +7288,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+       struct fxregs_state *fxsave =
+-                      &vcpu->arch.guest_fpu.state.fxsave;
++                      &vcpu->arch.guest_fpu.state->fxsave;
+       memcpy(fpu->fpr, fxsave->st_space, 128);
+       fpu->fcw = fxsave->cwd;
+@@ -7301,7 +7305,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+       struct fxregs_state *fxsave =
+-                      &vcpu->arch.guest_fpu.state.fxsave;
++                      &vcpu->arch.guest_fpu.state->fxsave;
+       memcpy(fxsave->st_space, fpu->fpr, 128);
+       fxsave->cwd = fpu->fcw;
+@@ -7317,9 +7321,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ static void fx_init(struct kvm_vcpu *vcpu)
+ {
+-      fpstate_init(&vcpu->arch.guest_fpu.state);
++      fpstate_init(vcpu->arch.guest_fpu.state);
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
+-              vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
++              vcpu->arch.guest_fpu.state->xsave.header.xcomp_bv =
+                       host_xcr0 | XSTATE_COMPACTION_ENABLED;
+       /*
+@@ -7342,7 +7346,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+        */
+       vcpu->guest_fpu_loaded = 1;
+       __kernel_fpu_begin();
+-      __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
++      __copy_kernel_to_fpregs(vcpu->arch.guest_fpu.state);
+       trace_kvm_fpu(1);
+ }
+@@ -7640,6 +7644,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+ struct static_key kvm_no_apic_vcpu __read_mostly;
+ EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
++extern struct kmem_cache *fpregs_state_cachep;
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+       struct page *page;
+@@ -7657,11 +7663,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+       else
+               vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+-      page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+-      if (!page) {
+-              r = -ENOMEM;
++      r = -ENOMEM;
++      vcpu->arch.guest_fpu.state = kmem_cache_alloc(fpregs_state_cachep, GFP_KERNEL);
++      if (!vcpu->arch.guest_fpu.state)
+               goto fail;
+-      }
++
++      page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++      if (!page)
++              goto fail_free_fpregs;
+       vcpu->arch.pio_data = page_address(page);
+       kvm_set_tsc_khz(vcpu, max_tsc_khz);
+@@ -7719,6 +7728,9 @@ fail_mmu_destroy:
+       kvm_mmu_destroy(vcpu);
+ fail_free_pio_data:
+       free_page((unsigned long)vcpu->arch.pio_data);
++fail_free_fpregs:
++      kmem_cache_free(fpregs_state_cachep, vcpu->arch.guest_fpu.state);
++      vcpu->arch.guest_fpu.state = NULL;
+ fail:
+       return r;
+ }
+@@ -7737,6 +7749,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+       free_page((unsigned long)vcpu->arch.pio_data);
+       if (!lapic_in_kernel(vcpu))
+               static_key_slow_dec(&kvm_no_apic_vcpu);
++      kmem_cache_free(fpregs_state_cachep, vcpu->arch.guest_fpu.state);
++      vcpu->arch.guest_fpu.state = NULL;
+ }
+ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index 25da5bc8..3c3fbd4 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1329,9 +1329,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+  * Rebooting also tells the Host we're finished, but the RESTART flag tells the
+  * Launcher to reboot us.
+  */
+-static void lguest_restart(char *reason)
++static __noreturn void lguest_restart(char *reason)
+ {
+       hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
++      BUG();
+ }
+ /*G:050
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index 34a7413..499d0da 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -29,6 +29,10 @@ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+ obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
+ ifeq ($(CONFIG_X86_32),y)
++        CFLAGS_strstr_32.o += $(INITIFY_DISABLE_VERIFY_NOCAPTURE_FUNCTIONS)
++        CFLAGS_string_32.o += $(INITIFY_DISABLE_VERIFY_NOCAPTURE_FUNCTIONS)
++        CFLAGS_memcpy_32.o += $(INITIFY_DISABLE_VERIFY_NOCAPTURE_FUNCTIONS)
++
+         obj-y += atomic64_32.o
+         lib-y += atomic64_cx8_32.o
+         lib-y += checksum_32.o
+diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
+index 9b0ca8f..bf83b2c 100644
+--- a/arch/x86/lib/atomic64_386_32.S
++++ b/arch/x86/lib/atomic64_386_32.S
+@@ -10,6 +10,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/irq_vectors.h>
+ #include <asm/alternative-asm.h>
+ /* if you want SMP support, implement these with real spinlocks */
+@@ -45,6 +46,10 @@ BEGIN(read)
+       movl  (v), %eax
+       movl 4(v), %edx
+ RET_ENDP
++BEGIN(read_unchecked)
++      movl  (v), %eax
++      movl 4(v), %edx
++RET_ENDP
+ #undef v
+ #define v %esi
+@@ -52,6 +57,10 @@ BEGIN(set)
+       movl %ebx,  (v)
+       movl %ecx, 4(v)
+ RET_ENDP
++BEGIN(set_unchecked)
++      movl %ebx,  (v)
++      movl %ecx, 4(v)
++RET_ENDP
+ #undef v
+ #define v  %esi
+@@ -67,6 +76,12 @@ RET_ENDP
+ BEGIN(add)
+       addl %eax,  (v)
+       adcl %edx, 4(v)
++
++      PAX_REFCOUNT64_OVERFLOW (v)
++RET_ENDP
++BEGIN(add_unchecked)
++      addl %eax,  (v)
++      adcl %edx, 4(v)
+ RET_ENDP
+ #undef v
+@@ -74,6 +89,15 @@ RET_ENDP
+ BEGIN(add_return)
+       addl  (v), %eax
+       adcl 4(v), %edx
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++      PAX_REFCOUNT64_OVERFLOW (v)
++RET_ENDP
++BEGIN(add_return_unchecked)
++      addl  (v), %eax
++      adcl 4(v), %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -83,6 +107,12 @@ RET_ENDP
+ BEGIN(sub)
+       subl %eax,  (v)
+       sbbl %edx, 4(v)
++
++      PAX_REFCOUNT64_UNDERFLOW (v)
++RET_ENDP
++BEGIN(sub_unchecked)
++      subl %eax,  (v)
++      sbbl %edx, 4(v)
+ RET_ENDP
+ #undef v
+@@ -93,6 +123,18 @@ BEGIN(sub_return)
+       sbbl $0, %edx
+       addl  (v), %eax
+       adcl 4(v), %edx
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++      PAX_REFCOUNT64_UNDERFLOW (v)
++RET_ENDP
++BEGIN(sub_return_unchecked)
++      negl %edx
++      negl %eax
++      sbbl $0, %edx
++      addl  (v), %eax
++      adcl 4(v), %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -102,6 +144,12 @@ RET_ENDP
+ BEGIN(inc)
+       addl $1,  (v)
+       adcl $0, 4(v)
++
++      PAX_REFCOUNT64_OVERFLOW (v)
++RET_ENDP
++BEGIN(inc_unchecked)
++      addl $1,  (v)
++      adcl $0, 4(v)
+ RET_ENDP
+ #undef v
+@@ -111,6 +159,17 @@ BEGIN(inc_return)
+       movl 4(v), %edx
+       addl $1, %eax
+       adcl $0, %edx
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++      PAX_REFCOUNT64_OVERFLOW (v)
++RET_ENDP
++BEGIN(inc_return_unchecked)
++      movl  (v), %eax
++      movl 4(v), %edx
++      addl $1, %eax
++      adcl $0, %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -120,6 +179,12 @@ RET_ENDP
+ BEGIN(dec)
+       subl $1,  (v)
+       sbbl $0, 4(v)
++
++      PAX_REFCOUNT64_UNDERFLOW (v)
++RET_ENDP
++BEGIN(dec_unchecked)
++      subl $1,  (v)
++      sbbl $0, 4(v)
+ RET_ENDP
+ #undef v
+@@ -129,6 +194,17 @@ BEGIN(dec_return)
+       movl 4(v), %edx
+       subl $1, %eax
+       sbbl $0, %edx
++
++      movl %eax,  (v)
++      movl %edx, 4(v)
++
++      PAX_REFCOUNT64_UNDERFLOW (v)
++RET_ENDP
++BEGIN(dec_return_unchecked)
++      movl  (v), %eax
++      movl 4(v), %edx
++      subl $1, %eax
++      sbbl $0, %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+ RET_ENDP
+@@ -140,6 +216,9 @@ BEGIN(add_unless)
+       adcl %edx, %edi
+       addl  (v), %eax
+       adcl 4(v), %edx
++
++      PAX_REFCOUNT64_OVERFLOW (v)
++
+       cmpl %eax, %ecx
+       je 3f
+ 1:
+@@ -165,6 +244,9 @@ BEGIN(inc_not_zero)
+ 1:
+       addl $1, %eax
+       adcl $0, %edx
++
++      PAX_REFCOUNT64_OVERFLOW (v)
++
+       movl %eax,  (v)
+       movl %edx, 4(v)
+       movl $1, %eax
+@@ -183,6 +265,9 @@ BEGIN(dec_if_positive)
+       movl 4(v), %edx
+       subl $1, %eax
+       sbbl $0, %edx
++
++      PAX_REFCOUNT64_UNDERFLOW (v)
++
+       js 1f
+       movl %eax,  (v)
+       movl %edx, 4(v)
+diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
+index db3ae854..3852140 100644
+--- a/arch/x86/lib/atomic64_cx8_32.S
++++ b/arch/x86/lib/atomic64_cx8_32.S
+@@ -10,6 +10,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/irq_vectors.h>
+ #include <asm/alternative-asm.h>
+ .macro read64 reg
+@@ -22,9 +23,16 @@
+ ENTRY(atomic64_read_cx8)
+       read64 %ecx
++      pax_force_retaddr
+       ret
+ ENDPROC(atomic64_read_cx8)
++ENTRY(atomic64_read_unchecked_cx8)
++      read64 %ecx
++      pax_force_retaddr
++      ret
++ENDPROC(atomic64_read_unchecked_cx8)
++
+ ENTRY(atomic64_set_cx8)
+ 1:
+ /* we don't need LOCK_PREFIX since aligned 64-bit writes
+@@ -32,20 +40,33 @@ ENTRY(atomic64_set_cx8)
+       cmpxchg8b (%esi)
+       jne 1b
++      pax_force_retaddr
+       ret
+ ENDPROC(atomic64_set_cx8)
++ENTRY(atomic64_set_unchecked_cx8)
++1:
++/* we don't need LOCK_PREFIX since aligned 64-bit writes
++ * are atomic on 586 and newer */
++      cmpxchg8b (%esi)
++      jne 1b
++
++      pax_force_retaddr
++      ret
++ENDPROC(atomic64_set_unchecked_cx8)
++
+ ENTRY(atomic64_xchg_cx8)
+ 1:
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+       jne 1b
++      pax_force_retaddr
+       ret
+ ENDPROC(atomic64_xchg_cx8)
+-.macro addsub_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro addsub_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+       pushl %ebp
+       pushl %ebx
+       pushl %esi
+@@ -61,26 +82,37 @@ ENTRY(atomic64_\func\()_return_cx8)
+       movl %edx, %ecx
+       \ins\()l %esi, %ebx
+       \insc\()l %edi, %ecx
++
++.ifb \unchecked
++.if \func == add
++      PAX_REFCOUNT64_OVERFLOW (%ebp)
++.else
++      PAX_REFCOUNT64_UNDERFLOW (%ebp)
++.endif
++.endif
++
+       LOCK_PREFIX
+       cmpxchg8b (%ebp)
+       jne 1b
+-
+-10:
+       movl %ebx, %eax
+       movl %ecx, %edx
++
+       popl %edi
+       popl %esi
+       popl %ebx
+       popl %ebp
++      pax_force_retaddr
+       ret
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+ addsub_return add add adc
+ addsub_return sub sub sbb
++addsub_return add add adc _unchecked
++addsub_return sub sub sbb _unchecked
+-.macro incdec_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro incdec_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+       pushl %ebx
+       read64 %esi
+@@ -89,20 +121,31 @@ ENTRY(atomic64_\func\()_return_cx8)
+       movl %edx, %ecx
+       \ins\()l $1, %ebx
+       \insc\()l $0, %ecx
++
++.ifb \unchecked
++.if \func == inc
++      PAX_REFCOUNT64_OVERFLOW (%esi)
++.else
++      PAX_REFCOUNT64_UNDERFLOW (%esi)
++.endif
++.endif
++
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+       jne 1b
+-
+-10:
+       movl %ebx, %eax
+       movl %ecx, %edx
++
+       popl %ebx
++      pax_force_retaddr
+       ret
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+ incdec_return inc add adc
+ incdec_return dec sub sbb
++incdec_return inc add adc _unchecked
++incdec_return dec sub sbb _unchecked
+ ENTRY(atomic64_dec_if_positive_cx8)
+       pushl %ebx
+@@ -113,6 +156,9 @@ ENTRY(atomic64_dec_if_positive_cx8)
+       movl %edx, %ecx
+       subl $1, %ebx
+       sbb $0, %ecx
++
++      PAX_REFCOUNT64_UNDERFLOW (%esi)
++
+       js 2f
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+@@ -122,6 +168,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
+       movl %ebx, %eax
+       movl %ecx, %edx
+       popl %ebx
++      pax_force_retaddr
+       ret
+ ENDPROC(atomic64_dec_if_positive_cx8)
+@@ -144,6 +191,9 @@ ENTRY(atomic64_add_unless_cx8)
+       movl %edx, %ecx
+       addl %ebp, %ebx
+       adcl %edi, %ecx
++
++      PAX_REFCOUNT64_OVERFLOW (%esi)
++
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+       jne 1b
+@@ -153,6 +203,7 @@ ENTRY(atomic64_add_unless_cx8)
+       addl $8, %esp
+       popl %ebx
+       popl %ebp
++      pax_force_retaddr
+       ret
+ 4:
+       cmpl %edx, 4(%esp)
+@@ -173,6 +224,9 @@ ENTRY(atomic64_inc_not_zero_cx8)
+       xorl %ecx, %ecx
+       addl $1, %ebx
+       adcl %edx, %ecx
++
++      PAX_REFCOUNT64_OVERFLOW (%esi)
++
+       LOCK_PREFIX
+       cmpxchg8b (%esi)
+       jne 1b
+@@ -180,5 +234,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
+       movl $1, %eax
+ 3:
+       popl %ebx
++      pax_force_retaddr
+       ret
+ ENDPROC(atomic64_inc_not_zero_cx8)
+diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
+index c1e6232..ebbeba7 100644
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-                              
++#include <asm/segment.h>
++
+ /*
+  * computes a partial checksum, e.g. for TCP/UDP fragments
+  */
+@@ -280,7 +281,22 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
+ #define ARGBASE 16            
+ #define FP            12
+-              
++
++ENTRY(csum_partial_copy_generic_to_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl %gs
++      popl %es
++      jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl %gs
++      popl %ds
++#endif
++
+ ENTRY(csum_partial_copy_generic)
+       subl  $4,%esp   
+       pushl %edi
+@@ -299,7 +315,7 @@ ENTRY(csum_partial_copy_generic)
+       jmp 4f
+ SRC(1:        movw (%esi), %bx        )
+       addl $2, %esi
+-DST(  movw %bx, (%edi)        )
++DST(  movw %bx, %es:(%edi)    )
+       addl $2, %edi
+       addw %bx, %ax   
+       adcl $0, %eax
+@@ -311,30 +327,30 @@ DST(     movw %bx, (%edi)        )
+ SRC(1:        movl (%esi), %ebx       )
+ SRC(  movl 4(%esi), %edx      )
+       adcl %ebx, %eax
+-DST(  movl %ebx, (%edi)       )
++DST(  movl %ebx, %es:(%edi)   )
+       adcl %edx, %eax
+-DST(  movl %edx, 4(%edi)      )
++DST(  movl %edx, %es:4(%edi)  )
+ SRC(  movl 8(%esi), %ebx      )
+ SRC(  movl 12(%esi), %edx     )
+       adcl %ebx, %eax
+-DST(  movl %ebx, 8(%edi)      )
++DST(  movl %ebx, %es:8(%edi)  )
+       adcl %edx, %eax
+-DST(  movl %edx, 12(%edi)     )
++DST(  movl %edx, %es:12(%edi) )
+ SRC(  movl 16(%esi), %ebx     )
+ SRC(  movl 20(%esi), %edx     )
+       adcl %ebx, %eax
+-DST(  movl %ebx, 16(%edi)     )
++DST(  movl %ebx, %es:16(%edi) )
+       adcl %edx, %eax
+-DST(  movl %edx, 20(%edi)     )
++DST(  movl %edx, %es:20(%edi) )
+ SRC(  movl 24(%esi), %ebx     )
+ SRC(  movl 28(%esi), %edx     )
+       adcl %ebx, %eax
+-DST(  movl %ebx, 24(%edi)     )
++DST(  movl %ebx, %es:24(%edi) )
+       adcl %edx, %eax
+-DST(  movl %edx, 28(%edi)     )
++DST(  movl %edx, %es:28(%edi) )
+       lea 32(%esi), %esi
+       lea 32(%edi), %edi
+@@ -348,7 +364,7 @@ DST(       movl %edx, 28(%edi)     )
+       shrl $2, %edx                   # This clears CF
+ SRC(3:        movl (%esi), %ebx       )
+       adcl %ebx, %eax
+-DST(  movl %ebx, (%edi)       )
++DST(  movl %ebx, %es:(%edi)   )
+       lea 4(%esi), %esi
+       lea 4(%edi), %edi
+       dec %edx
+@@ -360,12 +376,12 @@ DST(     movl %ebx, (%edi)       )
+       jb 5f
+ SRC(  movw (%esi), %cx        )
+       leal 2(%esi), %esi
+-DST(  movw %cx, (%edi)        )
++DST(  movw %cx, %es:(%edi)    )
+       leal 2(%edi), %edi
+       je 6f
+       shll $16,%ecx
+ SRC(5:        movb (%esi), %cl        )
+-DST(  movb %cl, (%edi)        )
++DST(  movb %cl, %es:(%edi)    )
+ 6:    addl %ecx, %eax
+       adcl $0, %eax
+ 7:
+@@ -376,7 +392,7 @@ DST(       movb %cl, (%edi)        )
+ 6001:
+       movl ARGBASE+20(%esp), %ebx     # src_err_ptr
+-      movl $-EFAULT, (%ebx)
++      movl $-EFAULT, %ss:(%ebx)
+       # zero the complete destination - computing the rest
+       # is too much work 
+@@ -389,34 +405,58 @@ DST(     movb %cl, (%edi)        )
+ 6002:
+       movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
+-      movl $-EFAULT,(%ebx)
++      movl $-EFAULT,%ss:(%ebx)
+       jmp 5000b
+ .previous
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl %ss
++      popl %ds
++      pushl %ss
++      popl %es
++#endif
++
+       popl %ebx
+       popl %esi
+       popl %edi
+       popl %ecx                       # equivalent to addl $4,%esp
+       ret     
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+ #else
+ /* Version for PentiumII/PPro */
+ #define ROUND1(x) \
++      nop; nop; nop;                          \
+       SRC(movl x(%esi), %ebx  )       ;       \
+       addl %ebx, %eax                 ;       \
+-      DST(movl %ebx, x(%edi)  )       ; 
++      DST(movl %ebx, %es:x(%edi))     ;
+ #define ROUND(x) \
++      nop; nop; nop;                          \
+       SRC(movl x(%esi), %ebx  )       ;       \
+       adcl %ebx, %eax                 ;       \
+-      DST(movl %ebx, x(%edi)  )       ;
++      DST(movl %ebx, %es:x(%edi))     ;
+ #define ARGBASE 12
+-              
++
++ENTRY(csum_partial_copy_generic_to_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl %gs
++      popl %es
++      jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl %gs
++      popl %ds
++#endif
++
+ ENTRY(csum_partial_copy_generic)
+       pushl %ebx
+       pushl %edi
+@@ -435,7 +475,7 @@ ENTRY(csum_partial_copy_generic)
+       subl %ebx, %edi  
+       lea  -1(%esi),%edx
+       andl $-32,%edx
+-      lea 3f(%ebx,%ebx), %ebx
++      lea 3f(%ebx,%ebx,2), %ebx
+       testl %esi, %esi 
+       jmp *%ebx
+ 1:    addl $64,%esi
+@@ -456,19 +496,19 @@ ENTRY(csum_partial_copy_generic)
+       jb 5f
+ SRC(  movw (%esi), %dx         )
+       leal 2(%esi), %esi
+-DST(  movw %dx, (%edi)         )
++DST(  movw %dx, %es:(%edi)     )
+       leal 2(%edi), %edi
+       je 6f
+       shll $16,%edx
+ 5:
+ SRC(  movb (%esi), %dl         )
+-DST(  movb %dl, (%edi)         )
++DST(  movb %dl, %es:(%edi)     )
+ 6:    addl %edx, %eax
+       adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl    ARGBASE+20(%esp), %ebx  # src_err_ptr   
+-      movl $-EFAULT, (%ebx)
++      movl $-EFAULT, %ss:(%ebx)
+       # zero the complete destination (computing the rest is too much work)
+       movl ARGBASE+8(%esp),%edi       # dst
+       movl ARGBASE+12(%esp),%ecx      # len
+@@ -476,15 +516,22 @@ DST(     movb %dl, (%edi)         )
+       rep; stosb
+       jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
+-      movl $-EFAULT, (%ebx)
++      movl $-EFAULT, %ss:(%ebx)
+       jmp  7b                 
+ .previous                             
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      pushl %ss
++      popl %ds
++      pushl %ss
++      popl %es
++#endif
++
+       popl %esi
+       popl %edi
+       popl %ebx
+       ret
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+                               
+ #undef ROUND
+ #undef ROUND1         
+diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
+index 65be7cf..d4cb4b4 100644
+--- a/arch/x86/lib/clear_page_64.S
++++ b/arch/x86/lib/clear_page_64.S
+@@ -21,6 +21,7 @@ ENTRY(clear_page)
+       movl $4096/8,%ecx
+       xorl %eax,%eax
+       rep stosq
++      pax_force_retaddr
+       ret
+ ENDPROC(clear_page)
+@@ -43,6 +44,7 @@ ENTRY(clear_page_orig)
+       leaq    64(%rdi),%rdi
+       jnz     .Lloop
+       nop
++      pax_force_retaddr
+       ret
+ ENDPROC(clear_page_orig)
+@@ -50,5 +52,6 @@ ENTRY(clear_page_c_e)
+       movl $4096,%ecx
+       xorl %eax,%eax
+       rep stosb
++      pax_force_retaddr
+       ret
+ ENDPROC(clear_page_c_e)
+diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
+index 9b33024..e52ee44 100644
+--- a/arch/x86/lib/cmpxchg16b_emu.S
++++ b/arch/x86/lib/cmpxchg16b_emu.S
+@@ -7,6 +7,7 @@
+  */
+ #include <linux/linkage.h>
+ #include <asm/percpu.h>
++#include <asm/alternative-asm.h>
+ .text
+@@ -43,11 +44,13 @@ ENTRY(this_cpu_cmpxchg16b_emu)
+       popfq
+       mov $1, %al
++      pax_force_retaddr
+       ret
+ .Lnot_same:
+       popfq
+       xor %al,%al
++      pax_force_retaddr
+       ret
+ ENDPROC(this_cpu_cmpxchg16b_emu)
+diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
+index 24ef1c2..a119ef1 100644
+--- a/arch/x86/lib/copy_page_64.S
++++ b/arch/x86/lib/copy_page_64.S
+@@ -15,13 +15,14 @@ ENTRY(copy_page)
+       ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
+       movl    $4096/8, %ecx
+       rep     movsq
++      pax_force_retaddr
+       ret
+ ENDPROC(copy_page)
+ ENTRY(copy_page_regs)
+       subq    $2*8,   %rsp
+       movq    %rbx,   (%rsp)
+-      movq    %r12,   1*8(%rsp)
++      movq    %r13,   1*8(%rsp)
+       movl    $(4096/64)-5,   %ecx
+       .p2align 4
+@@ -34,7 +35,7 @@ ENTRY(copy_page_regs)
+       movq    0x8*4(%rsi), %r9
+       movq    0x8*5(%rsi), %r10
+       movq    0x8*6(%rsi), %r11
+-      movq    0x8*7(%rsi), %r12
++      movq    0x8*7(%rsi), %r13
+       prefetcht0 5*64(%rsi)
+@@ -45,7 +46,7 @@ ENTRY(copy_page_regs)
+       movq    %r9,  0x8*4(%rdi)
+       movq    %r10, 0x8*5(%rdi)
+       movq    %r11, 0x8*6(%rdi)
+-      movq    %r12, 0x8*7(%rdi)
++      movq    %r13, 0x8*7(%rdi)
+       leaq    64 (%rsi), %rsi
+       leaq    64 (%rdi), %rdi
+@@ -64,7 +65,7 @@ ENTRY(copy_page_regs)
+       movq    0x8*4(%rsi), %r9
+       movq    0x8*5(%rsi), %r10
+       movq    0x8*6(%rsi), %r11
+-      movq    0x8*7(%rsi), %r12
++      movq    0x8*7(%rsi), %r13
+       movq    %rax, 0x8*0(%rdi)
+       movq    %rbx, 0x8*1(%rdi)
+@@ -73,14 +74,15 @@ ENTRY(copy_page_regs)
+       movq    %r9,  0x8*4(%rdi)
+       movq    %r10, 0x8*5(%rdi)
+       movq    %r11, 0x8*6(%rdi)
+-      movq    %r12, 0x8*7(%rdi)
++      movq    %r13, 0x8*7(%rdi)
+       leaq    64(%rdi), %rdi
+       leaq    64(%rsi), %rsi
+       jnz     .Loop2
+       movq    (%rsp), %rbx
+-      movq    1*8(%rsp), %r12
++      movq    1*8(%rsp), %r13
+       addq    $2*8, %rsp
++      pax_force_retaddr
+       ret
+ ENDPROC(copy_page_regs)
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index bf603eb..5271364 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -14,51 +14,34 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/pgtable.h>
++#include <asm/frame.h>
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(_copy_to_user)
+-      mov PER_CPU_VAR(current_task), %rax
+-      movq %rdi,%rcx
+-      addq %rdx,%rcx
+-      jc bad_to_user
+-      cmpq TASK_addr_limit(%rax),%rcx
+-      ja bad_to_user
+-      ALTERNATIVE_2 "jmp copy_user_generic_unrolled",         \
+-                    "jmp copy_user_generic_string",           \
+-                    X86_FEATURE_REP_GOOD,                     \
+-                    "jmp copy_user_enhanced_fast_string",     \
+-                    X86_FEATURE_ERMS
+-ENDPROC(_copy_to_user)
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(_copy_from_user)
+-      mov PER_CPU_VAR(current_task), %rax
+-      movq %rsi,%rcx
+-      addq %rdx,%rcx
+-      jc bad_from_user
+-      cmpq TASK_addr_limit(%rax),%rcx
+-      ja bad_from_user
+-      ALTERNATIVE_2 "jmp copy_user_generic_unrolled",         \
+-                    "jmp copy_user_generic_string",           \
+-                    X86_FEATURE_REP_GOOD,                     \
+-                    "jmp copy_user_enhanced_fast_string",     \
+-                    X86_FEATURE_ERMS
+-ENDPROC(_copy_from_user)
+-
++.macro ALIGN_DESTINATION
++      /* check for bad alignment of destination */
++      movl %edi,%ecx
++      andl $7,%ecx
++      jz 102f                         /* already aligned */
++      subl $8,%ecx
++      negl %ecx
++      subl %ecx,%edx
++100:  movb (%rsi),%al
++101:  movb %al,(%rdi)
++      incq %rsi
++      incq %rdi
++      decl %ecx
++      jnz 100b
++102:
+       .section .fixup,"ax"
+-      /* must zero dest */
+-ENTRY(bad_from_user)
+-bad_from_user:
+-      movl %edx,%ecx
+-      xorl %eax,%eax
+-      rep
+-      stosb
+-bad_to_user:
+-      movl %edx,%eax
+-      ret
+-ENDPROC(bad_from_user)
++103:  addl %ecx,%edx                  /* ecx is zerorest also */
++      FRAME_END
++      jmp copy_user_handle_tail
+       .previous
++      _ASM_EXTABLE(100b,103b)
++      _ASM_EXTABLE(101b,103b)
++.endm
++
+ /*
+  * copy_user_generic_unrolled - memory copy with exception handling.
+  * This version is for CPUs like P4 that don't have efficient micro
+@@ -73,7 +56,8 @@ ENDPROC(bad_from_user)
+  * eax uncopied bytes or 0 if successful.
+  */
+ ENTRY(copy_user_generic_unrolled)
+-      ASM_STAC
++      FRAME_BEGIN
++      ASM_USER_ACCESS_BEGIN
+       cmpl $8,%edx
+       jb 20f          /* less then 8 bytes, go to byte copy loop */
+       ALIGN_DESTINATION
+@@ -121,7 +105,9 @@ ENTRY(copy_user_generic_unrolled)
+       decl %ecx
+       jnz 21b
+ 23:   xor %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+@@ -131,7 +117,8 @@ ENTRY(copy_user_generic_unrolled)
+ 40:   leal (%rdx,%rcx,8),%edx
+       jmp 60f
+ 50:   movl %ecx,%edx
+-60:   jmp copy_user_handle_tail /* ecx is zerorest also */
++60:   FRAME_END
++      jmp copy_user_handle_tail /* ecx is zerorest also */
+       .previous
+       _ASM_EXTABLE(1b,30b)
+@@ -175,7 +162,8 @@ ENDPROC(copy_user_generic_unrolled)
+  * eax uncopied bytes or 0 if successful.
+  */
+ ENTRY(copy_user_generic_string)
+-      ASM_STAC
++      FRAME_BEGIN
++      ASM_USER_ACCESS_BEGIN
+       cmpl $8,%edx
+       jb 2f           /* less than 8 bytes, go to byte copy loop */
+       ALIGN_DESTINATION
+@@ -188,12 +176,15 @@ ENTRY(copy_user_generic_string)
+ 3:    rep
+       movsb
+       xorl %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+ 11:   leal (%rdx,%rcx,8),%ecx
+ 12:   movl %ecx,%edx          /* ecx is zerorest also */
++      FRAME_END
+       jmp copy_user_handle_tail
+       .previous
+@@ -214,16 +205,20 @@ ENDPROC(copy_user_generic_string)
+  * eax uncopied bytes or 0 if successful.
+  */
+ ENTRY(copy_user_enhanced_fast_string)
+-      ASM_STAC
++      FRAME_BEGIN
++      ASM_USER_ACCESS_BEGIN
+       movl %edx,%ecx
+ 1:    rep
+       movsb
+       xorl %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+ 12:   movl %ecx,%edx          /* ecx is zerorest also */
++      FRAME_END
+       jmp copy_user_handle_tail
+       .previous
+@@ -240,7 +235,17 @@ ENDPROC(copy_user_enhanced_fast_string)
+  *  - Require 4-byte alignment when size is 4 bytes.
+  */
+ ENTRY(__copy_user_nocache)
+-      ASM_STAC
++      FRAME_BEGIN
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      mov pax_user_shadow_base,%rcx
++      cmp %rcx,%rsi
++      jae 1f
++      add %rcx,%rsi
++1:
++#endif
++
++      ASM_USER_ACCESS_BEGIN
+       /* If size is less than 8 bytes, go to 4-byte copy */
+       cmpl $8,%edx
+@@ -334,8 +339,10 @@ ENTRY(__copy_user_nocache)
+       /* Finished copying; fence the prior stores */
+ .L_finish_copy:
+       xorl %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
+       sfence
++      FRAME_END
++      pax_force_retaddr
+       ret
+       .section .fixup,"ax"
+@@ -353,6 +360,7 @@ ENTRY(__copy_user_nocache)
+       movl %ecx,%edx
+ .L_fixup_handle_tail:
+       sfence
++      FRAME_END
+       jmp copy_user_handle_tail
+       .previous
+diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
+index 7e48807..cc966ff 100644
+--- a/arch/x86/lib/csum-copy_64.S
++++ b/arch/x86/lib/csum-copy_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
++#include <asm/alternative-asm.h>
+ /*
+  * Checksum copy with exception handling.
+@@ -52,7 +53,7 @@ ENTRY(csum_partial_copy_generic)
+ .Lignore:
+       subq  $7*8, %rsp
+       movq  %rbx, 2*8(%rsp)
+-      movq  %r12, 3*8(%rsp)
++      movq  %r15, 3*8(%rsp)
+       movq  %r14, 4*8(%rsp)
+       movq  %r13, 5*8(%rsp)
+       movq  %rbp, 6*8(%rsp)
+@@ -64,16 +65,16 @@ ENTRY(csum_partial_copy_generic)
+       movl  %edx, %ecx
+       xorl  %r9d, %r9d
+-      movq  %rcx, %r12
++      movq  %rcx, %r15
+-      shrq  $6, %r12
++      shrq  $6, %r15
+       jz      .Lhandle_tail       /* < 64 */
+       clc
+       /* main loop. clear in 64 byte blocks */
+       /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
+-      /* r11: temp3, rdx: temp4, r12 loopcnt */
++      /* r11: temp3, rdx: temp4, r15 loopcnt */
+       /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+       .p2align 4
+ .Lloop:
+@@ -107,7 +108,7 @@ ENTRY(csum_partial_copy_generic)
+       adcq  %r14, %rax
+       adcq  %r13, %rax
+-      decl %r12d
++      decl %r15d
+       dest
+       movq %rbx, (%rsi)
+@@ -200,11 +201,12 @@ ENTRY(csum_partial_copy_generic)
+ .Lende:
+       movq 2*8(%rsp), %rbx
+-      movq 3*8(%rsp), %r12
++      movq 3*8(%rsp), %r15
+       movq 4*8(%rsp), %r14
+       movq 5*8(%rsp), %r13
+       movq 6*8(%rsp), %rbp
+       addq $7*8, %rsp
++      pax_force_retaddr
+       ret
+       /* Exception handlers. Very simple, zeroing is done in the wrappers */
+diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
+index 8bd5358..a6c9102 100644
+--- a/arch/x86/lib/csum-wrappers_64.c
++++ b/arch/x86/lib/csum-wrappers_64.c
+@@ -53,10 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
+                       len -= 2;
+               }
+       }
+-      stac();
+-      isum = csum_partial_copy_generic((__force const void *)src,
++      user_access_begin();
++      isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
+                               dst, len, isum, errp, NULL);
+-      clac();
++      user_access_end();
+       if (unlikely(*errp))
+               goto out_err;
+@@ -110,10 +110,10 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
+       }
+       *errp = 0;
+-      stac();
+-      ret = csum_partial_copy_generic(src, (void __force *)dst,
++      user_access_begin();
++      ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
+                                       len, isum, NULL, errp);
+-      clac();
++      user_access_end();
+       return ret;
+ }
+ EXPORT_SYMBOL(csum_partial_copy_to_user);
+diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
+index 0ef5128..4a52ddc 100644
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -32,56 +32,127 @@
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/frame.h>
++#include <asm/alternative-asm.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
+       .text
+ ENTRY(__get_user_1)
++      FRAME_BEGIN
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
+-      ASM_STAC
+-1:    movzbl (%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++      ASM_USER_ACCESS_BEGIN
++1:    __copyuser_seg movzbl (%_ASM_AX),%edx
+       xor %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(__get_user_1)
+ ENTRY(__get_user_2)
++      FRAME_BEGIN
+       add $1,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       jc bad_get_user
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
+-      ASM_STAC
+-2:    movzwl -1(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++      ASM_USER_ACCESS_BEGIN
++2:    __copyuser_seg movzwl -1(%_ASM_AX),%edx
+       xor %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(__get_user_2)
+ ENTRY(__get_user_4)
++      FRAME_BEGIN
+       add $3,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       jc bad_get_user
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
+-      ASM_STAC
+-3:    movl -3(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++      ASM_USER_ACCESS_BEGIN
++3:    __copyuser_seg movl -3(%_ASM_AX),%edx
+       xor %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(__get_user_4)
+ ENTRY(__get_user_8)
++      FRAME_BEGIN
++
+ #ifdef CONFIG_X86_64
+       add $7,%_ASM_AX
+       jc bad_get_user
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user
+-      ASM_STAC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++      mov pax_user_shadow_base,%_ASM_DX
++      cmp %_ASM_DX,%_ASM_AX
++      jae 1234f
++      add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++      ASM_USER_ACCESS_BEGIN
+ 4:    movq -7(%_ASM_AX),%rdx
+       xor %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+ #else
+       add $7,%_ASM_AX
+@@ -89,11 +160,13 @@ ENTRY(__get_user_8)
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user_8
+-      ASM_STAC
+-4:    movl -7(%_ASM_AX),%edx
+-5:    movl -3(%_ASM_AX),%ecx
++      ASM_USER_ACCESS_BEGIN
++4:    __copyuser_seg movl -7(%_ASM_AX),%edx
++5:    __copyuser_seg movl -3(%_ASM_AX),%ecx
+       xor %eax,%eax
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+ #endif
+ ENDPROC(__get_user_8)
+@@ -102,7 +175,9 @@ ENDPROC(__get_user_8)
+ bad_get_user:
+       xor %edx,%edx
+       mov $(-EFAULT),%_ASM_AX
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+ END(bad_get_user)
+@@ -111,7 +186,9 @@ bad_get_user_8:
+       xor %edx,%edx
+       xor %ecx,%ecx
+       mov $(-EFAULT),%_ASM_AX
+-      ASM_CLAC
++      ASM_USER_ACCESS_END
++      FRAME_END
++      pax_force_retaddr
+       ret
+ END(bad_get_user_8)
+ #endif
+diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
+index 1088eb8..fac8468 100644
+--- a/arch/x86/lib/insn.c
++++ b/arch/x86/lib/insn.c
+@@ -20,8 +20,10 @@
+ #ifdef __KERNEL__
+ #include <linux/string.h>
++#include <asm/pgtable_types.h>
+ #else
+ #include <string.h>
++#define ktla_ktva(addr) addr
+ #endif
+ #include <asm/inat.h>
+ #include <asm/insn.h>
+@@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+               buf_len = MAX_INSN_SIZE;
+       memset(insn, 0, sizeof(*insn));
+-      insn->kaddr = kaddr;
+-      insn->end_kaddr = kaddr + buf_len;
+-      insn->next_byte = kaddr;
++      insn->kaddr = (void *)ktla_ktva((unsigned long)kaddr);
++      insn->end_kaddr = insn->kaddr + buf_len;
++      insn->next_byte = insn->kaddr;
+       insn->x86_64 = x86_64 ? 1 : 0;
+       insn->opnd_bytes = 4;
+       if (x86_64)
+diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
+index 33147fe..12a8815 100644
+--- a/arch/x86/lib/iomap_copy_64.S
++++ b/arch/x86/lib/iomap_copy_64.S
+@@ -16,6 +16,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ /*
+  * override generic version in lib/iomap_copy.c
+@@ -23,5 +24,6 @@
+ ENTRY(__iowrite32_copy)
+       movl %edx,%ecx
+       rep movsd
++      pax_force_retaddr
+       ret
+ ENDPROC(__iowrite32_copy)
+diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
+index 2ec0b0abb..3e02ccd 100644
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -37,6 +37,7 @@ ENTRY(memcpy)
+       rep movsq
+       movl %edx, %ecx
+       rep movsb
++      pax_force_retaddr
+       ret
+ ENDPROC(memcpy)
+ ENDPROC(__memcpy)
+@@ -49,6 +50,7 @@ ENTRY(memcpy_erms)
+       movq %rdi, %rax
+       movq %rdx, %rcx
+       rep movsb
++      pax_force_retaddr
+       ret
+ ENDPROC(memcpy_erms)
+@@ -133,6 +135,7 @@ ENTRY(memcpy_orig)
+       movq %r9,       1*8(%rdi)
+       movq %r10,      -2*8(%rdi, %rdx)
+       movq %r11,      -1*8(%rdi, %rdx)
++      pax_force_retaddr
+       retq
+       .p2align 4
+ .Lless_16bytes:
+@@ -145,6 +148,7 @@ ENTRY(memcpy_orig)
+       movq -1*8(%rsi, %rdx),  %r9
+       movq %r8,       0*8(%rdi)
+       movq %r9,       -1*8(%rdi, %rdx)
++      pax_force_retaddr
+       retq
+       .p2align 4
+ .Lless_8bytes:
+@@ -158,6 +162,7 @@ ENTRY(memcpy_orig)
+       movl -4(%rsi, %rdx), %r8d
+       movl %ecx, (%rdi)
+       movl %r8d, -4(%rdi, %rdx)
++      pax_force_retaddr
+       retq
+       .p2align 4
+ .Lless_3bytes:
+@@ -176,6 +181,7 @@ ENTRY(memcpy_orig)
+       movb %cl, (%rdi)
+ .Lend:
++      pax_force_retaddr
+       retq
+ ENDPROC(memcpy_orig)
+diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
+index 90ce01b..8817b34 100644
+--- a/arch/x86/lib/memmove_64.S
++++ b/arch/x86/lib/memmove_64.S
+@@ -41,7 +41,7 @@ ENTRY(__memmove)
+       jg 2f
+ .Lmemmove_begin_forward:
+-      ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
++      ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; pax_force_retaddr; retq", X86_FEATURE_ERMS
+       /*
+        * movsq instruction have many startup latency
+@@ -204,6 +204,7 @@ ENTRY(__memmove)
+       movb (%rsi), %r11b
+       movb %r11b, (%rdi)
+ 13:
++      pax_force_retaddr
+       retq
+ ENDPROC(__memmove)
+ ENDPROC(memmove)
+diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
+index e1229ec..2ca5a7a 100644
+--- a/arch/x86/lib/memset_64.S
++++ b/arch/x86/lib/memset_64.S
+@@ -40,6 +40,7 @@ ENTRY(__memset)
+       movl %edx,%ecx
+       rep stosb
+       movq %r9,%rax
++      pax_force_retaddr
+       ret
+ ENDPROC(memset)
+ ENDPROC(__memset)
+@@ -61,6 +62,7 @@ ENTRY(memset_erms)
+       movq %rdx,%rcx
+       rep stosb
+       movq %r9,%rax
++      pax_force_retaddr
+       ret
+ ENDPROC(memset_erms)
+@@ -123,6 +125,7 @@ ENTRY(memset_orig)
+ .Lende:
+       movq    %r10,%rax
++      pax_force_retaddr
+       ret
+ .Lbad_alignment:
+diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
+index c2311a6..3b01ad9 100644
+--- a/arch/x86/lib/mmx_32.c
++++ b/arch/x86/lib/mmx_32.c
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+ {
+       void *p;
+       int i;
++      unsigned long cr0;
+       if (unlikely(in_interrupt()))
+               return __memcpy(to, from, len);
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+       kernel_fpu_begin();
+       __asm__ __volatile__ (
+-              "1: prefetch (%0)\n"            /* This set is 28 bytes */
+-              "   prefetch 64(%0)\n"
+-              "   prefetch 128(%0)\n"
+-              "   prefetch 192(%0)\n"
+-              "   prefetch 256(%0)\n"
++              "1: prefetch (%1)\n"            /* This set is 28 bytes */
++              "   prefetch 64(%1)\n"
++              "   prefetch 128(%1)\n"
++              "   prefetch 192(%1)\n"
++              "   prefetch 256(%1)\n"
+               "2:  \n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++              "3:  \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+                       _ASM_EXTABLE(1b, 3b)
+-                      : : "r" (from));
++                      : "=&r" (cr0) : "r" (from) : "ax");
+       for ( ; i > 5; i--) {
+               __asm__ __volatile__ (
+-              "1:  prefetch 320(%0)\n"
+-              "2:  movq (%0), %%mm0\n"
+-              "  movq 8(%0), %%mm1\n"
+-              "  movq 16(%0), %%mm2\n"
+-              "  movq 24(%0), %%mm3\n"
+-              "  movq %%mm0, (%1)\n"
+-              "  movq %%mm1, 8(%1)\n"
+-              "  movq %%mm2, 16(%1)\n"
+-              "  movq %%mm3, 24(%1)\n"
+-              "  movq 32(%0), %%mm0\n"
+-              "  movq 40(%0), %%mm1\n"
+-              "  movq 48(%0), %%mm2\n"
+-              "  movq 56(%0), %%mm3\n"
+-              "  movq %%mm0, 32(%1)\n"
+-              "  movq %%mm1, 40(%1)\n"
+-              "  movq %%mm2, 48(%1)\n"
+-              "  movq %%mm3, 56(%1)\n"
++              "1:  prefetch 320(%1)\n"
++              "2:  movq (%1), %%mm0\n"
++              "  movq 8(%1), %%mm1\n"
++              "  movq 16(%1), %%mm2\n"
++              "  movq 24(%1), %%mm3\n"
++              "  movq %%mm0, (%2)\n"
++              "  movq %%mm1, 8(%2)\n"
++              "  movq %%mm2, 16(%2)\n"
++              "  movq %%mm3, 24(%2)\n"
++              "  movq 32(%1), %%mm0\n"
++              "  movq 40(%1), %%mm1\n"
++              "  movq 48(%1), %%mm2\n"
++              "  movq 56(%1), %%mm3\n"
++              "  movq %%mm0, 32(%2)\n"
++              "  movq %%mm1, 40(%2)\n"
++              "  movq %%mm2, 48(%2)\n"
++              "  movq %%mm3, 56(%2)\n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++              "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+                       _ASM_EXTABLE(1b, 3b)
+-                      : : "r" (from), "r" (to) : "memory");
++                      : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+               from += 64;
+               to += 64;
+@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+       int i;
++      unsigned long cr0;
+       kernel_fpu_begin();
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
+        * but that is for later. -AV
+        */
+       __asm__ __volatile__(
+-              "1: prefetch (%0)\n"
+-              "   prefetch 64(%0)\n"
+-              "   prefetch 128(%0)\n"
+-              "   prefetch 192(%0)\n"
+-              "   prefetch 256(%0)\n"
++              "1: prefetch (%1)\n"
++              "   prefetch 64(%1)\n"
++              "   prefetch 128(%1)\n"
++              "   prefetch 192(%1)\n"
++              "   prefetch 256(%1)\n"
+               "2:  \n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++              "3:  \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+-                      _ASM_EXTABLE(1b, 3b) : : "r" (from));
++                      _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+       for (i = 0; i < (4096-320)/64; i++) {
+               __asm__ __volatile__ (
+-              "1: prefetch 320(%0)\n"
+-              "2: movq (%0), %%mm0\n"
+-              "   movntq %%mm0, (%1)\n"
+-              "   movq 8(%0), %%mm1\n"
+-              "   movntq %%mm1, 8(%1)\n"
+-              "   movq 16(%0), %%mm2\n"
+-              "   movntq %%mm2, 16(%1)\n"
+-              "   movq 24(%0), %%mm3\n"
+-              "   movntq %%mm3, 24(%1)\n"
+-              "   movq 32(%0), %%mm4\n"
+-              "   movntq %%mm4, 32(%1)\n"
+-              "   movq 40(%0), %%mm5\n"
+-              "   movntq %%mm5, 40(%1)\n"
+-              "   movq 48(%0), %%mm6\n"
+-              "   movntq %%mm6, 48(%1)\n"
+-              "   movq 56(%0), %%mm7\n"
+-              "   movntq %%mm7, 56(%1)\n"
++              "1: prefetch 320(%1)\n"
++              "2: movq (%1), %%mm0\n"
++              "   movntq %%mm0, (%2)\n"
++              "   movq 8(%1), %%mm1\n"
++              "   movntq %%mm1, 8(%2)\n"
++              "   movq 16(%1), %%mm2\n"
++              "   movntq %%mm2, 16(%2)\n"
++              "   movq 24(%1), %%mm3\n"
++              "   movntq %%mm3, 24(%2)\n"
++              "   movq 32(%1), %%mm4\n"
++              "   movntq %%mm4, 32(%2)\n"
++              "   movq 40(%1), %%mm5\n"
++              "   movntq %%mm5, 40(%2)\n"
++              "   movq 48(%1), %%mm6\n"
++              "   movntq %%mm6, 48(%2)\n"
++              "   movq 56(%1), %%mm7\n"
++              "   movntq %%mm7, 56(%2)\n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++              "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+-              _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
++              _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+               from += 64;
+               to += 64;
+@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+       int i;
++      unsigned long cr0;
+       kernel_fpu_begin();
+       __asm__ __volatile__ (
+-              "1: prefetch (%0)\n"
+-              "   prefetch 64(%0)\n"
+-              "   prefetch 128(%0)\n"
+-              "   prefetch 192(%0)\n"
+-              "   prefetch 256(%0)\n"
++              "1: prefetch (%1)\n"
++              "   prefetch 64(%1)\n"
++              "   prefetch 128(%1)\n"
++              "   prefetch 192(%1)\n"
++              "   prefetch 256(%1)\n"
+               "2:  \n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++              "3:  \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+-                      _ASM_EXTABLE(1b, 3b) : : "r" (from));
++                      _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+       for (i = 0; i < 4096/64; i++) {
+               __asm__ __volatile__ (
+-              "1: prefetch 320(%0)\n"
+-              "2: movq (%0), %%mm0\n"
+-              "   movq 8(%0), %%mm1\n"
+-              "   movq 16(%0), %%mm2\n"
+-              "   movq 24(%0), %%mm3\n"
+-              "   movq %%mm0, (%1)\n"
+-              "   movq %%mm1, 8(%1)\n"
+-              "   movq %%mm2, 16(%1)\n"
+-              "   movq %%mm3, 24(%1)\n"
+-              "   movq 32(%0), %%mm0\n"
+-              "   movq 40(%0), %%mm1\n"
+-              "   movq 48(%0), %%mm2\n"
+-              "   movq 56(%0), %%mm3\n"
+-              "   movq %%mm0, 32(%1)\n"
+-              "   movq %%mm1, 40(%1)\n"
+-              "   movq %%mm2, 48(%1)\n"
+-              "   movq %%mm3, 56(%1)\n"
++              "1: prefetch 320(%1)\n"
++              "2: movq (%1), %%mm0\n"
++              "   movq 8(%1), %%mm1\n"
++              "   movq 16(%1), %%mm2\n"
++              "   movq 24(%1), %%mm3\n"
++              "   movq %%mm0, (%2)\n"
++              "   movq %%mm1, 8(%2)\n"
++              "   movq %%mm2, 16(%2)\n"
++              "   movq %%mm3, 24(%2)\n"
++              "   movq 32(%1), %%mm0\n"
++              "   movq 40(%1), %%mm1\n"
++              "   movq 48(%1), %%mm2\n"
++              "   movq 56(%1), %%mm3\n"
++              "   movq %%mm0, 32(%2)\n"
++              "   movq %%mm1, 40(%2)\n"
++              "   movq %%mm2, 48(%2)\n"
++              "   movq %%mm3, 56(%2)\n"
+               ".section .fixup, \"ax\"\n"
+-              "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++              "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %%cr0, %0\n"
++              "   movl %0, %%eax\n"
++              "   andl $0xFFFEFFFF, %%eax\n"
++              "   movl %%eax, %%cr0\n"
++#endif
++
++              "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++              "   movl %0, %%cr0\n"
++#endif
++
+               "   jmp 2b\n"
+               ".previous\n"
+                       _ASM_EXTABLE(1b, 3b)
+-                      : : "r" (from), "r" (to) : "memory");
++                      : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+               from += 64;
+               to += 64;
+diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
+index c815564..303dcfa 100644
+--- a/arch/x86/lib/msr-reg.S
++++ b/arch/x86/lib/msr-reg.S
+@@ -2,6 +2,7 @@
+ #include <linux/errno.h>
+ #include <asm/asm.h>
+ #include <asm/msr.h>
++#include <asm/alternative-asm.h>
+ #ifdef CONFIG_X86_64
+ /*
+@@ -34,6 +35,7 @@ ENTRY(\op\()_safe_regs)
+       movl    %edi, 28(%r10)
+       popq %rbp
+       popq %rbx
++      pax_force_retaddr
+       ret
+ 3:
+       movl    $-EIO, %r11d
+diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
+index c891ece..27057c0 100644
+--- a/arch/x86/lib/putuser.S
++++ b/arch/x86/lib/putuser.S
+@@ -15,7 +15,10 @@
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
+-
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ /*
+  * __put_user_X
+@@ -29,55 +32,125 @@
+  * as they get called from within inline assembly.
+  */
+-#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
+-#define EXIT  ASM_CLAC ;      \
++#define ENTER FRAME_BEGIN
++#define EXIT  ASM_USER_ACCESS_END ;   \
++              FRAME_END ;             \
++              pax_force_retaddr ;     \
+               ret
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define _DEST %_ASM_CX,%_ASM_BX
++#else
++#define _DEST %_ASM_CX
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
++
+ .text
+ ENTRY(__put_user_1)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov PER_CPU_VAR(current_task), %_ASM_BX
+       cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
+       jae bad_put_user
+-      ASM_STAC
+-1:    movb %al,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++      ASM_USER_ACCESS_BEGIN
++1:    __copyuser_seg movb %al,(_DEST)
+       xor %eax,%eax
+       EXIT
+ ENDPROC(__put_user_1)
+ ENTRY(__put_user_2)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov PER_CPU_VAR(current_task), %_ASM_BX
+       mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
+       sub $1,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jae bad_put_user
+-      ASM_STAC
+-2:    movw %ax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++      ASM_USER_ACCESS_BEGIN
++2:    __copyuser_seg movw %ax,(_DEST)
+       xor %eax,%eax
+       EXIT
+ ENDPROC(__put_user_2)
+ ENTRY(__put_user_4)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov PER_CPU_VAR(current_task), %_ASM_BX
+       mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
+       sub $3,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jae bad_put_user
+-      ASM_STAC
+-3:    movl %eax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++      ASM_USER_ACCESS_BEGIN
++3:    __copyuser_seg movl %eax,(_DEST)
+       xor %eax,%eax
+       EXIT
+ ENDPROC(__put_user_4)
+ ENTRY(__put_user_8)
+       ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov PER_CPU_VAR(current_task), %_ASM_BX
+       mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
+       sub $7,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jae bad_put_user
+-      ASM_STAC
+-4:    mov %_ASM_AX,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      mov pax_user_shadow_base,%_ASM_BX
++      cmp %_ASM_BX,%_ASM_CX
++      jb 1234f
++      xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++      ASM_USER_ACCESS_BEGIN
++4:    __copyuser_seg mov %_ASM_AX,(_DEST)
+ #ifdef CONFIG_X86_32
+-5:    movl %edx,4(%_ASM_CX)
++5:    __copyuser_seg movl %edx,4(_DEST)
+ #endif
+       xor %eax,%eax
+       EXIT
+diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
+index bf2c607..0e6d18b 100644
+--- a/arch/x86/lib/rwsem.S
++++ b/arch/x86/lib/rwsem.S
+@@ -95,6 +95,7 @@ ENTRY(call_rwsem_down_read_failed)
+       __ASM_SIZE(pop,) %__ASM_REG(dx)
+       restore_common_regs
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(call_rwsem_down_read_failed)
+@@ -105,6 +106,7 @@ ENTRY(call_rwsem_down_write_failed)
+       call rwsem_down_write_failed
+       restore_common_regs
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(call_rwsem_down_write_failed)
+@@ -128,6 +130,7 @@ ENTRY(call_rwsem_wake)
+       call rwsem_wake
+       restore_common_regs
+ 1:    FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(call_rwsem_wake)
+@@ -140,5 +143,6 @@ ENTRY(call_rwsem_downgrade_wake)
+       __ASM_SIZE(pop,) %__ASM_REG(dx)
+       restore_common_regs
+       FRAME_END
++      pax_force_retaddr
+       ret
+ ENDPROC(call_rwsem_downgrade_wake)
+diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
+index 3bc7baf..63d1a4d 100644
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -42,11 +42,13 @@ do {                                                                       \
+       int __d0;                                                       \
+       might_fault();                                                  \
+       __asm__ __volatile__(                                           \
++              __COPYUSER_SET_ES                                       \
+               ASM_STAC "\n"                                           \
+               "0:     rep; stosl\n"                                   \
+               "       movl %2,%0\n"                                   \
+               "1:     rep; stosb\n"                                   \
+               "2: " ASM_CLAC "\n"                                     \
++              __COPYUSER_RESTORE_ES                                   \
+               ".section .fixup,\"ax\"\n"                              \
+               "3:     lea 0(%2,%0,4),%0\n"                            \
+               "       jmp 2b\n"                                       \
+@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ static unsigned long
+-__copy_user_intel(void __user *to, const void *from, unsigned long size)
++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
+ {
+       int d0, d1;
+       __asm__ __volatile__(
+@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+                      "       .align 2,0x90\n"
+                      "3:     movl 0(%4), %%eax\n"
+                      "4:     movl 4(%4), %%edx\n"
+-                     "5:     movl %%eax, 0(%3)\n"
+-                     "6:     movl %%edx, 4(%3)\n"
++                     "5:     "__copyuser_seg" movl %%eax, 0(%3)\n"
++                     "6:     "__copyuser_seg" movl %%edx, 4(%3)\n"
+                      "7:     movl 8(%4), %%eax\n"
+                      "8:     movl 12(%4),%%edx\n"
+-                     "9:     movl %%eax, 8(%3)\n"
+-                     "10:    movl %%edx, 12(%3)\n"
++                     "9:     "__copyuser_seg" movl %%eax, 8(%3)\n"
++                     "10:    "__copyuser_seg" movl %%edx, 12(%3)\n"
+                      "11:    movl 16(%4), %%eax\n"
+                      "12:    movl 20(%4), %%edx\n"
+-                     "13:    movl %%eax, 16(%3)\n"
+-                     "14:    movl %%edx, 20(%3)\n"
++                     "13:    "__copyuser_seg" movl %%eax, 16(%3)\n"
++                     "14:    "__copyuser_seg" movl %%edx, 20(%3)\n"
+                      "15:    movl 24(%4), %%eax\n"
+                      "16:    movl 28(%4), %%edx\n"
+-                     "17:    movl %%eax, 24(%3)\n"
+-                     "18:    movl %%edx, 28(%3)\n"
++                     "17:    "__copyuser_seg" movl %%eax, 24(%3)\n"
++                     "18:    "__copyuser_seg" movl %%edx, 28(%3)\n"
+                      "19:    movl 32(%4), %%eax\n"
+                      "20:    movl 36(%4), %%edx\n"
+-                     "21:    movl %%eax, 32(%3)\n"
+-                     "22:    movl %%edx, 36(%3)\n"
++                     "21:    "__copyuser_seg" movl %%eax, 32(%3)\n"
++                     "22:    "__copyuser_seg" movl %%edx, 36(%3)\n"
+                      "23:    movl 40(%4), %%eax\n"
+                      "24:    movl 44(%4), %%edx\n"
+-                     "25:    movl %%eax, 40(%3)\n"
+-                     "26:    movl %%edx, 44(%3)\n"
++                     "25:    "__copyuser_seg" movl %%eax, 40(%3)\n"
++                     "26:    "__copyuser_seg" movl %%edx, 44(%3)\n"
+                      "27:    movl 48(%4), %%eax\n"
+                      "28:    movl 52(%4), %%edx\n"
+-                     "29:    movl %%eax, 48(%3)\n"
+-                     "30:    movl %%edx, 52(%3)\n"
++                     "29:    "__copyuser_seg" movl %%eax, 48(%3)\n"
++                     "30:    "__copyuser_seg" movl %%edx, 52(%3)\n"
+                      "31:    movl 56(%4), %%eax\n"
+                      "32:    movl 60(%4), %%edx\n"
+-                     "33:    movl %%eax, 56(%3)\n"
+-                     "34:    movl %%edx, 60(%3)\n"
++                     "33:    "__copyuser_seg" movl %%eax, 56(%3)\n"
++                     "34:    "__copyuser_seg" movl %%edx, 60(%3)\n"
+                      "       addl $-64, %0\n"
+                      "       addl $64, %4\n"
+                      "       addl $64, %3\n"
+@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+                      "       shrl  $2, %0\n"
+                      "       andl  $3, %%eax\n"
+                      "       cld\n"
++                     __COPYUSER_SET_ES
+                      "99:    rep; movsl\n"
+                      "36:    movl %%eax, %0\n"
+                      "37:    rep; movsb\n"
+                      "100:\n"
++                     __COPYUSER_RESTORE_ES
++                     ".section .fixup,\"ax\"\n"
++                     "101:   lea 0(%%eax,%0,4),%0\n"
++                     "       jmp 100b\n"
++                     ".previous\n"
++                     _ASM_EXTABLE(1b,100b)
++                     _ASM_EXTABLE(2b,100b)
++                     _ASM_EXTABLE(3b,100b)
++                     _ASM_EXTABLE(4b,100b)
++                     _ASM_EXTABLE(5b,100b)
++                     _ASM_EXTABLE(6b,100b)
++                     _ASM_EXTABLE(7b,100b)
++                     _ASM_EXTABLE(8b,100b)
++                     _ASM_EXTABLE(9b,100b)
++                     _ASM_EXTABLE(10b,100b)
++                     _ASM_EXTABLE(11b,100b)
++                     _ASM_EXTABLE(12b,100b)
++                     _ASM_EXTABLE(13b,100b)
++                     _ASM_EXTABLE(14b,100b)
++                     _ASM_EXTABLE(15b,100b)
++                     _ASM_EXTABLE(16b,100b)
++                     _ASM_EXTABLE(17b,100b)
++                     _ASM_EXTABLE(18b,100b)
++                     _ASM_EXTABLE(19b,100b)
++                     _ASM_EXTABLE(20b,100b)
++                     _ASM_EXTABLE(21b,100b)
++                     _ASM_EXTABLE(22b,100b)
++                     _ASM_EXTABLE(23b,100b)
++                     _ASM_EXTABLE(24b,100b)
++                     _ASM_EXTABLE(25b,100b)
++                     _ASM_EXTABLE(26b,100b)
++                     _ASM_EXTABLE(27b,100b)
++                     _ASM_EXTABLE(28b,100b)
++                     _ASM_EXTABLE(29b,100b)
++                     _ASM_EXTABLE(30b,100b)
++                     _ASM_EXTABLE(31b,100b)
++                     _ASM_EXTABLE(32b,100b)
++                     _ASM_EXTABLE(33b,100b)
++                     _ASM_EXTABLE(34b,100b)
++                     _ASM_EXTABLE(35b,100b)
++                     _ASM_EXTABLE(36b,100b)
++                     _ASM_EXTABLE(37b,100b)
++                     _ASM_EXTABLE(99b,101b)
++                     : "=&c"(size), "=&D" (d0), "=&S" (d1)
++                     :  "1"(to), "2"(from), "0"(size)
++                     : "eax", "edx", "memory");
++      return size;
++}
++
++static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
++{
++      int d0, d1;
++      __asm__ __volatile__(
++                     "       .align 2,0x90\n"
++                     "1:     "__copyuser_seg" movl 32(%4), %%eax\n"
++                     "       cmpl $67, %0\n"
++                     "       jbe 3f\n"
++                     "2:     "__copyuser_seg" movl 64(%4), %%eax\n"
++                     "       .align 2,0x90\n"
++                     "3:     "__copyuser_seg" movl 0(%4), %%eax\n"
++                     "4:     "__copyuser_seg" movl 4(%4), %%edx\n"
++                     "5:     movl %%eax, 0(%3)\n"
++                     "6:     movl %%edx, 4(%3)\n"
++                     "7:     "__copyuser_seg" movl 8(%4), %%eax\n"
++                     "8:     "__copyuser_seg" movl 12(%4),%%edx\n"
++                     "9:     movl %%eax, 8(%3)\n"
++                     "10:    movl %%edx, 12(%3)\n"
++                     "11:    "__copyuser_seg" movl 16(%4), %%eax\n"
++                     "12:    "__copyuser_seg" movl 20(%4), %%edx\n"
++                     "13:    movl %%eax, 16(%3)\n"
++                     "14:    movl %%edx, 20(%3)\n"
++                     "15:    "__copyuser_seg" movl 24(%4), %%eax\n"
++                     "16:    "__copyuser_seg" movl 28(%4), %%edx\n"
++                     "17:    movl %%eax, 24(%3)\n"
++                     "18:    movl %%edx, 28(%3)\n"
++                     "19:    "__copyuser_seg" movl 32(%4), %%eax\n"
++                     "20:    "__copyuser_seg" movl 36(%4), %%edx\n"
++                     "21:    movl %%eax, 32(%3)\n"
++                     "22:    movl %%edx, 36(%3)\n"
++                     "23:    "__copyuser_seg" movl 40(%4), %%eax\n"
++                     "24:    "__copyuser_seg" movl 44(%4), %%edx\n"
++                     "25:    movl %%eax, 40(%3)\n"
++                     "26:    movl %%edx, 44(%3)\n"
++                     "27:    "__copyuser_seg" movl 48(%4), %%eax\n"
++                     "28:    "__copyuser_seg" movl 52(%4), %%edx\n"
++                     "29:    movl %%eax, 48(%3)\n"
++                     "30:    movl %%edx, 52(%3)\n"
++                     "31:    "__copyuser_seg" movl 56(%4), %%eax\n"
++                     "32:    "__copyuser_seg" movl 60(%4), %%edx\n"
++                     "33:    movl %%eax, 56(%3)\n"
++                     "34:    movl %%edx, 60(%3)\n"
++                     "       addl $-64, %0\n"
++                     "       addl $64, %4\n"
++                     "       addl $64, %3\n"
++                     "       cmpl $63, %0\n"
++                     "       ja  1b\n"
++                     "35:    movl  %0, %%eax\n"
++                     "       shrl  $2, %0\n"
++                     "       andl  $3, %%eax\n"
++                     "       cld\n"
++                     "99:    rep; "__copyuser_seg" movsl\n"
++                     "36:    movl %%eax, %0\n"
++                     "37:    rep; "__copyuser_seg" movsb\n"
++                     "100:\n"
+                      ".section .fixup,\"ax\"\n"
+                      "101:   lea 0(%%eax,%0,4),%0\n"
+                      "       jmp 100b\n"
+@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+       int d0, d1;
+       __asm__ __volatile__(
+                      "        .align 2,0x90\n"
+-                     "0:      movl 32(%4), %%eax\n"
++                     "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "        cmpl $67, %0\n"
+                      "        jbe 2f\n"
+-                     "1:      movl 64(%4), %%eax\n"
++                     "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
+                      "        .align 2,0x90\n"
+-                     "2:      movl 0(%4), %%eax\n"
+-                     "21:     movl 4(%4), %%edx\n"
++                     "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
++                     "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
+                      "        movl %%eax, 0(%3)\n"
+                      "        movl %%edx, 4(%3)\n"
+-                     "3:      movl 8(%4), %%eax\n"
+-                     "31:     movl 12(%4),%%edx\n"
++                     "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
++                     "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
+                      "        movl %%eax, 8(%3)\n"
+                      "        movl %%edx, 12(%3)\n"
+-                     "4:      movl 16(%4), %%eax\n"
+-                     "41:     movl 20(%4), %%edx\n"
++                     "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
++                     "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
+                      "        movl %%eax, 16(%3)\n"
+                      "        movl %%edx, 20(%3)\n"
+-                     "10:     movl 24(%4), %%eax\n"
+-                     "51:     movl 28(%4), %%edx\n"
++                     "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
++                     "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
+                      "        movl %%eax, 24(%3)\n"
+                      "        movl %%edx, 28(%3)\n"
+-                     "11:     movl 32(%4), %%eax\n"
+-                     "61:     movl 36(%4), %%edx\n"
++                     "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
++                     "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
+                      "        movl %%eax, 32(%3)\n"
+                      "        movl %%edx, 36(%3)\n"
+-                     "12:     movl 40(%4), %%eax\n"
+-                     "71:     movl 44(%4), %%edx\n"
++                     "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
++                     "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
+                      "        movl %%eax, 40(%3)\n"
+                      "        movl %%edx, 44(%3)\n"
+-                     "13:     movl 48(%4), %%eax\n"
+-                     "81:     movl 52(%4), %%edx\n"
++                     "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
++                     "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
+                      "        movl %%eax, 48(%3)\n"
+                      "        movl %%edx, 52(%3)\n"
+-                     "14:     movl 56(%4), %%eax\n"
+-                     "91:     movl 60(%4), %%edx\n"
++                     "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
++                     "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
+                      "        movl %%eax, 56(%3)\n"
+                      "        movl %%edx, 60(%3)\n"
+                      "        addl $-64, %0\n"
+@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+                      "        shrl  $2, %0\n"
+                      "        andl $3, %%eax\n"
+                      "        cld\n"
+-                     "6:      rep; movsl\n"
++                     "6:      rep; "__copyuser_seg" movsl\n"
+                      "        movl %%eax,%0\n"
+-                     "7:      rep; movsb\n"
++                     "7:      rep; "__copyuser_seg" movsb\n"
+                      "8:\n"
+                      ".section .fixup,\"ax\"\n"
+                      "9:      lea 0(%%eax,%0,4),%0\n"
+@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+       __asm__ __volatile__(
+              "        .align 2,0x90\n"
+-             "0:      movl 32(%4), %%eax\n"
++             "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
+              "        cmpl $67, %0\n"
+              "        jbe 2f\n"
+-             "1:      movl 64(%4), %%eax\n"
++             "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
+              "        .align 2,0x90\n"
+-             "2:      movl 0(%4), %%eax\n"
+-             "21:     movl 4(%4), %%edx\n"
++             "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
++             "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
+              "        movnti %%eax, 0(%3)\n"
+              "        movnti %%edx, 4(%3)\n"
+-             "3:      movl 8(%4), %%eax\n"
+-             "31:     movl 12(%4),%%edx\n"
++             "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
++             "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
+              "        movnti %%eax, 8(%3)\n"
+              "        movnti %%edx, 12(%3)\n"
+-             "4:      movl 16(%4), %%eax\n"
+-             "41:     movl 20(%4), %%edx\n"
++             "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
++             "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
+              "        movnti %%eax, 16(%3)\n"
+              "        movnti %%edx, 20(%3)\n"
+-             "10:     movl 24(%4), %%eax\n"
+-             "51:     movl 28(%4), %%edx\n"
++             "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
++             "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
+              "        movnti %%eax, 24(%3)\n"
+              "        movnti %%edx, 28(%3)\n"
+-             "11:     movl 32(%4), %%eax\n"
+-             "61:     movl 36(%4), %%edx\n"
++             "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
++             "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
+              "        movnti %%eax, 32(%3)\n"
+              "        movnti %%edx, 36(%3)\n"
+-             "12:     movl 40(%4), %%eax\n"
+-             "71:     movl 44(%4), %%edx\n"
++             "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
++             "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
+              "        movnti %%eax, 40(%3)\n"
+              "        movnti %%edx, 44(%3)\n"
+-             "13:     movl 48(%4), %%eax\n"
+-             "81:     movl 52(%4), %%edx\n"
++             "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
++             "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
+              "        movnti %%eax, 48(%3)\n"
+              "        movnti %%edx, 52(%3)\n"
+-             "14:     movl 56(%4), %%eax\n"
+-             "91:     movl 60(%4), %%edx\n"
++             "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
++             "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
+              "        movnti %%eax, 56(%3)\n"
+              "        movnti %%edx, 60(%3)\n"
+              "        addl $-64, %0\n"
+@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+              "        shrl  $2, %0\n"
+              "        andl $3, %%eax\n"
+              "        cld\n"
+-             "6:      rep; movsl\n"
++             "6:      rep; "__copyuser_seg" movsl\n"
+              "        movl %%eax,%0\n"
+-             "7:      rep; movsb\n"
++             "7:      rep; "__copyuser_seg" movsb\n"
+              "8:\n"
+              ".section .fixup,\"ax\"\n"
+              "9:      lea 0(%%eax,%0,4),%0\n"
+@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
+       __asm__ __volatile__(
+              "        .align 2,0x90\n"
+-             "0:      movl 32(%4), %%eax\n"
++             "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
+              "        cmpl $67, %0\n"
+              "        jbe 2f\n"
+-             "1:      movl 64(%4), %%eax\n"
++             "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
+              "        .align 2,0x90\n"
+-             "2:      movl 0(%4), %%eax\n"
+-             "21:     movl 4(%4), %%edx\n"
++             "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
++             "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
+              "        movnti %%eax, 0(%3)\n"
+              "        movnti %%edx, 4(%3)\n"
+-             "3:      movl 8(%4), %%eax\n"
+-             "31:     movl 12(%4),%%edx\n"
++             "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
++             "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
+              "        movnti %%eax, 8(%3)\n"
+              "        movnti %%edx, 12(%3)\n"
+-             "4:      movl 16(%4), %%eax\n"
+-             "41:     movl 20(%4), %%edx\n"
++             "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
++             "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
+              "        movnti %%eax, 16(%3)\n"
+              "        movnti %%edx, 20(%3)\n"
+-             "10:     movl 24(%4), %%eax\n"
+-             "51:     movl 28(%4), %%edx\n"
++             "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
++             "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
+              "        movnti %%eax, 24(%3)\n"
+              "        movnti %%edx, 28(%3)\n"
+-             "11:     movl 32(%4), %%eax\n"
+-             "61:     movl 36(%4), %%edx\n"
++             "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
++             "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
+              "        movnti %%eax, 32(%3)\n"
+              "        movnti %%edx, 36(%3)\n"
+-             "12:     movl 40(%4), %%eax\n"
+-             "71:     movl 44(%4), %%edx\n"
++             "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
++             "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
+              "        movnti %%eax, 40(%3)\n"
+              "        movnti %%edx, 44(%3)\n"
+-             "13:     movl 48(%4), %%eax\n"
+-             "81:     movl 52(%4), %%edx\n"
++             "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
++             "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
+              "        movnti %%eax, 48(%3)\n"
+              "        movnti %%edx, 52(%3)\n"
+-             "14:     movl 56(%4), %%eax\n"
+-             "91:     movl 60(%4), %%edx\n"
++             "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
++             "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
+              "        movnti %%eax, 56(%3)\n"
+              "        movnti %%edx, 60(%3)\n"
+              "        addl $-64, %0\n"
+@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
+              "        shrl  $2, %0\n"
+              "        andl $3, %%eax\n"
+              "        cld\n"
+-             "6:      rep; movsl\n"
++             "6:      rep; "__copyuser_seg" movsl\n"
+              "        movl %%eax,%0\n"
+-             "7:      rep; movsb\n"
++             "7:      rep; "__copyuser_seg" movsb\n"
+              "8:\n"
+              ".section .fixup,\"ax\"\n"
+              "9:      lea 0(%%eax,%0,4),%0\n"
+@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
+  */
+ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+                                       unsigned long size);
+-unsigned long __copy_user_intel(void __user *to, const void *from,
++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
++                                      unsigned long size);
++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
+                                       unsigned long size);
+ unsigned long __copy_user_zeroing_intel_nocache(void *to,
+                               const void __user *from, unsigned long size);
+ #endif /* CONFIG_X86_INTEL_USERCOPY */
+ /* Generic arbitrary sized copy.  */
+-#define __copy_user(to, from, size)                                   \
++#define __copy_user(to, from, size, prefix, set, restore)             \
+ do {                                                                  \
+       int __d0, __d1, __d2;                                           \
+       __asm__ __volatile__(                                           \
++              set                                                     \
+               "       cmp  $7,%0\n"                                   \
+               "       jbe  1f\n"                                      \
+               "       movl %1,%0\n"                                   \
+               "       negl %0\n"                                      \
+               "       andl $7,%0\n"                                   \
+               "       subl %0,%3\n"                                   \
+-              "4:     rep; movsb\n"                                   \
++              "4:     rep; "prefix"movsb\n"                           \
+               "       movl %3,%0\n"                                   \
+               "       shrl $2,%0\n"                                   \
+               "       andl $3,%3\n"                                   \
+               "       .align 2,0x90\n"                                \
+-              "0:     rep; movsl\n"                                   \
++              "0:     rep; "prefix"movsl\n"                           \
+               "       movl %3,%0\n"                                   \
+-              "1:     rep; movsb\n"                                   \
++              "1:     rep; "prefix"movsb\n"                           \
+               "2:\n"                                                  \
++              restore                                                 \
+               ".section .fixup,\"ax\"\n"                              \
+               "5:     addl %3,%0\n"                                   \
+               "       jmp 2b\n"                                       \
+@@ -538,14 +650,14 @@ do {                                                                     \
+               "       negl %0\n"                                      \
+               "       andl $7,%0\n"                                   \
+               "       subl %0,%3\n"                                   \
+-              "4:     rep; movsb\n"                                   \
++              "4:     rep; "__copyuser_seg"movsb\n"                   \
+               "       movl %3,%0\n"                                   \
+               "       shrl $2,%0\n"                                   \
+               "       andl $3,%3\n"                                   \
+               "       .align 2,0x90\n"                                \
+-              "0:     rep; movsl\n"                                   \
++              "0:     rep; "__copyuser_seg"movsl\n"                   \
+               "       movl %3,%0\n"                                   \
+-              "1:     rep; movsb\n"                                   \
++              "1:     rep; "__copyuser_seg"movsb\n"                   \
+               "2:\n"                                                  \
+               ".section .fixup,\"ax\"\n"                              \
+               "5:     addl %3,%0\n"                                   \
+@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
+ {
+       stac();
+       if (movsl_is_ok(to, from, n))
+-              __copy_user(to, from, n);
++              __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
+       else
+-              n = __copy_user_intel(to, from, n);
++              n = __generic_copy_to_user_intel(to, from, n);
+       clac();
+       return n;
+ }
+@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
+ {
+       stac();
+       if (movsl_is_ok(to, from, n))
+-              __copy_user(to, from, n);
++              __copy_user(to, from, n, __copyuser_seg, "", "");
+       else
+-              n = __copy_user_intel((void __user *)to,
+-                                    (const void *)from, n);
++              n = __generic_copy_from_user_intel(to, from, n);
+       clac();
+       return n;
+ }
+@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+       if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
+               n = __copy_user_intel_nocache(to, from, n);
+       else
+-              __copy_user(to, from, n);
++              __copy_user(to, from, n, __copyuser_seg, "", "");
+ #else
+-      __copy_user(to, from, n);
++      __copy_user(to, from, n, __copyuser_seg, "", "");
+ #endif
+       clac();
+       return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+-/**
+- * copy_to_user: - Copy a block of data into user space.
+- * @to:   Destination address, in user space.
+- * @from: Source address, in kernel space.
+- * @n:    Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep if pagefaults are
+- *          enabled.
+- *
+- * Copy data from kernel space to user space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- */
+-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++void __set_fs(mm_segment_t x)
+ {
+-      if (access_ok(VERIFY_WRITE, to, n))
+-              n = __copy_to_user(to, from, n);
+-      return n;
++      switch (x.seg) {
++      case 0:
++              loadsegment(gs, 0);
++              break;
++      case TASK_SIZE_MAX:
++              loadsegment(gs, __USER_DS);
++              break;
++      case -1UL:
++              loadsegment(gs, __KERNEL_DS);
++              break;
++      default:
++              BUG();
++      }
+ }
+-EXPORT_SYMBOL(_copy_to_user);
++EXPORT_SYMBOL(__set_fs);
+-/**
+- * copy_from_user: - Copy a block of data from user space.
+- * @to:   Destination address, in kernel space.
+- * @from: Source address, in user space.
+- * @n:    Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep if pagefaults are
+- *          enabled.
+- *
+- * Copy data from user space to kernel space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- *
+- * If some data could not be copied, this function will pad the copied
+- * data to the requested size using zero bytes.
+- */
+-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
++void set_fs(mm_segment_t x)
+ {
+-      if (access_ok(VERIFY_READ, from, n))
+-              n = __copy_from_user(to, from, n);
+-      else
+-              memset(to, 0, n);
+-      return n;
++      current_thread_info()->addr_limit = x;
++      __set_fs(x);
+ }
+-EXPORT_SYMBOL(_copy_from_user);
++EXPORT_SYMBOL(set_fs);
++#endif
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index 6987358..adaea41 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -18,7 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
+       might_fault();
+       /* no memory constraint because it doesn't change any memory gcc knows
+          about */
+-      stac();
++      user_access_begin();
+       asm volatile(
+               "       testq  %[size8],%[size8]\n"
+               "       jz     4f\n"
+@@ -39,9 +39,9 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
+               _ASM_EXTABLE(0b,3b)
+               _ASM_EXTABLE(1b,2b)
+               : [size8] "=&c"(size), [dst] "=&D" (__d0)
+-              : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
++              : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
+                 [zero] "r" (0UL), [eight] "r" (8UL));
+-      clac();
++      user_access_end();
+       return size;
+ }
+ EXPORT_SYMBOL(__clear_user);
+@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
+ }
+ EXPORT_SYMBOL(clear_user);
+-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
++unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
+ {
+-      if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
+-              return copy_user_generic((__force void *)to, (__force void *)from, len);
+-      } 
+-      return len;             
++      if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
++              return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
++      return len;
+ }
+ EXPORT_SYMBOL(copy_in_user);
+@@ -69,8 +68,9 @@ EXPORT_SYMBOL(copy_in_user);
+  * it is not necessary to optimize tail handling.
+  */
+ __visible unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len)
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
+ {
++      user_access_end();
+       for (; len; --len, to++) {
+               char c;
+@@ -79,10 +79,9 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
+               if (__put_user_nocheck(c, to, sizeof(char)))
+                       break;
+       }
+-      clac();
+       /* If the destination is a kernel buffer, we always clear the end */
+-      if (!__addr_ok(to))
+-              memset(to, 0, len);
++      if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base)
++              memset((void __force_kernel *)to, 0, len);
+       return len;
+ }
+diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
+index 024f6e9..308f1b0 100644
+--- a/arch/x86/math-emu/fpu_aux.c
++++ b/arch/x86/math-emu/fpu_aux.c
+@@ -52,7 +52,7 @@ void fpstate_init_soft(struct swregs_state *soft)
+ void finit(void)
+ {
+-      fpstate_init_soft(&current->thread.fpu.state.soft);
++      fpstate_init_soft(&current->thread.fpu.state->soft);
+ }
+ /*
+diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
+index e945fed..bffe686 100644
+--- a/arch/x86/math-emu/fpu_entry.c
++++ b/arch/x86/math-emu/fpu_entry.c
+@@ -643,7 +643,7 @@ int fpregs_soft_set(struct task_struct *target,
+                   unsigned int pos, unsigned int count,
+                   const void *kbuf, const void __user *ubuf)
+ {
+-      struct swregs_state *s387 = &target->thread.fpu.state.soft;
++      struct swregs_state *s387 = &target->thread.fpu.state->soft;
+       void *space = s387->st_space;
+       int ret;
+       int offset, other, i, tags, regnr, tag, newtop;
+@@ -695,7 +695,7 @@ int fpregs_soft_get(struct task_struct *target,
+                   unsigned int pos, unsigned int count,
+                   void *kbuf, void __user *ubuf)
+ {
+-      struct swregs_state *s387 = &target->thread.fpu.state.soft;
++      struct swregs_state *s387 = &target->thread.fpu.state->soft;
+       const void *space = s387->st_space;
+       int ret;
+       int offset = (S387->ftop & 7) * 10, other = 80 - offset;
+diff --git a/arch/x86/math-emu/fpu_etc.c b/arch/x86/math-emu/fpu_etc.c
+index 233e5af5..dd82ff0 100644
+--- a/arch/x86/math-emu/fpu_etc.c
++++ b/arch/x86/math-emu/fpu_etc.c
+@@ -119,9 +119,14 @@ static void fxam(FPU_REG *st0_ptr, u_char st0tag)
+       setcc(c);
+ }
++static void FPU_ST0_illegal(FPU_REG *st0_ptr, u_char st0_tag)
++{
++      FPU_illegal();
++}
++
+ static FUNC_ST0 const fp_etc_table[] = {
+-      fchs, fabs, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal,
+-      ftst_, fxam, (FUNC_ST0) FPU_illegal, (FUNC_ST0) FPU_illegal
++      fchs, fabs, FPU_ST0_illegal, FPU_ST0_illegal,
++      ftst_, fxam, FPU_ST0_illegal, FPU_ST0_illegal
+ };
+ void FPU_etc(void)
+diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
+index 5e044d5..d342fce 100644
+--- a/arch/x86/math-emu/fpu_system.h
++++ b/arch/x86/math-emu/fpu_system.h
+@@ -46,7 +46,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+ #define SEG_EXPAND_DOWN(s)    (((s).b & ((1 << 11) | (1 << 10))) \
+                                == (1 << 10))
+-#define I387                  (&current->thread.fpu.state)
++#define I387                  (current->thread.fpu.state)
+ #define FPU_info              (I387->soft.info)
+ #define FPU_CS                        (*(unsigned short *) &(FPU_info->regs->cs))
+diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
+index ecd0668..4b4c664 100644
+--- a/arch/x86/math-emu/fpu_trig.c
++++ b/arch/x86/math-emu/fpu_trig.c
+@@ -432,13 +432,13 @@ static void fxtract(FPU_REG *st0_ptr, u_char st0_tag)
+ #endif /* PARANOID */
+ }
+-static void fdecstp(void)
++static void fdecstp(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+       clear_C1();
+       top--;
+ }
+-static void fincstp(void)
++static void fincstp(FPU_REG *st0_ptr, u_char st0_tag)
+ {
+       clear_C1();
+       top++;
+@@ -607,6 +607,11 @@ static int fsin(FPU_REG *st0_ptr, u_char tag)
+       }
+ }
++static void _fsin(FPU_REG *st0_ptr, u_char tag)
++{
++      fsin(st0_ptr, tag);
++}
++
+ static int f_cos(FPU_REG *st0_ptr, u_char tag)
+ {
+       u_char st0_sign;
+@@ -1625,7 +1630,7 @@ static void fscale(FPU_REG *st0_ptr, u_char st0_tag)
+ static FUNC_ST0 const trig_table_a[] = {
+       f2xm1, fyl2x, fptan, fpatan,
+-      fxtract, fprem1, (FUNC_ST0) fdecstp, (FUNC_ST0) fincstp
++      fxtract, fprem1, fdecstp, fincstp
+ };
+ void FPU_triga(void)
+@@ -1634,7 +1639,7 @@ void FPU_triga(void)
+ }
+ static FUNC_ST0 const trig_table_b[] = {
+-      fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos
++      fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, _fsin, fcos
+ };
+ void FPU_trigb(void)
+diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c
+index 0054835..a3bd671 100644
+--- a/arch/x86/math-emu/reg_constant.c
++++ b/arch/x86/math-emu/reg_constant.c
+@@ -107,8 +107,13 @@ static void fldz(int rc)
+ typedef void (*FUNC_RC) (int);
++static  void FPU_RC_illegal(int rc)
++{
++      FPU_illegal();
++}
++
+ static FUNC_RC constants_table[] = {
+-      fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, (FUNC_RC) FPU_illegal
++      fld1, fldl2t, fldl2e, fldpi, fldlg2, fldln2, fldz, FPU_RC_illegal
+ };
+ void fconst(void)
+diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
+index 96d2b84..b3db380 100644
+--- a/arch/x86/mm/Makefile
++++ b/arch/x86/mm/Makefile
+@@ -39,3 +39,7 @@ obj-$(CONFIG_X86_INTEL_MPX)  += mpx.o
+ obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
+ obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
++quote:="
++obj-$(CONFIG_X86_64)          += uderef_64.o
++CFLAGS_uderef_64.o            := -fcall-saved-rax -fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11
++
+diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
+index ea9c49a..7ab033a 100644
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -27,6 +27,7 @@
+ struct pg_state {
+       int level;
+       pgprot_t current_prot;
++      pgprot_t current_prots[5];
+       unsigned long start_address;
+       unsigned long current_address;
+       const struct addr_marker *marker;
+@@ -184,6 +185,23 @@ static unsigned long normalize_addr(unsigned long u)
+ #endif
+ }
++static pgprot_t merge_prot(pgprot_t old_prot, pgprot_t new_prot)
++{
++      if (!(pgprot_val(new_prot) & _PAGE_PRESENT))
++              return new_prot;
++
++      if (!(pgprot_val(old_prot) & _PAGE_PRESENT))
++              return new_prot;
++
++      if (pgprot_val(old_prot) & _PAGE_NX)
++              pgprot_val(new_prot) |= _PAGE_NX;
++
++      if (!(pgprot_val(old_prot) & _PAGE_RW))
++              pgprot_val(new_prot) &= ~_PAGE_RW;
++
++      return new_prot;
++}
++
+ /*
+  * This function gets called on a break in a continuous series
+  * of PTE entries; the next one is different so we need to
+@@ -200,11 +218,13 @@ static void note_page(struct seq_file *m, struct pg_state *st,
+        * we have now. "break" is either changing perms, levels or
+        * address space marker.
+        */
++      new_prot = merge_prot(st->current_prots[level - 1], new_prot);
+       prot = pgprot_val(new_prot);
+       cur = pgprot_val(st->current_prot);
+       if (!st->level) {
+               /* First entry */
++              st->current_prots[0] = __pgprot(_PAGE_RW);
+               st->current_prot = new_prot;
+               st->level = level;
+               st->marker = address_markers;
+@@ -216,9 +236,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
+               const char *unit = units;
+               unsigned long delta;
+               int width = sizeof(unsigned long) * 2;
+-              pgprotval_t pr = pgprot_val(st->current_prot);
+-              if (st->check_wx && (pr & _PAGE_RW) && !(pr & _PAGE_NX)) {
++              if (st->check_wx && (cur & _PAGE_RW) && !(cur & _PAGE_NX)) {
+                       WARN_ONCE(1,
+                                 "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
+                                 (void *)st->start_address,
+@@ -304,9 +323,10 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
+       start = (pmd_t *) pud_page_vaddr(addr);
+       for (i = 0; i < PTRS_PER_PMD; i++) {
+               st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
++              prot = pmd_flags(*start);
++              st->current_prots[3] = merge_prot(st->current_prots[2], __pgprot(prot));
+               if (!pmd_none(*start)) {
+                       if (pmd_large(*start) || !pmd_present(*start)) {
+-                              prot = pmd_flags(*start);
+                               note_page(m, st, __pgprot(prot), 3);
+                       } else {
+                               walk_pte_level(m, st, *start,
+@@ -337,9 +357,10 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
+       for (i = 0; i < PTRS_PER_PUD; i++) {
+               st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
++              prot = pud_flags(*start);
++              st->current_prots[2] = merge_prot(st->current_prots[1], __pgprot(start->pud));
+               if (!pud_none(*start)) {
+                       if (pud_large(*start) || !pud_present(*start)) {
+-                              prot = pud_flags(*start);
+                               note_page(m, st, __pgprot(prot), 2);
+                       } else {
+                               walk_pmd_level(m, st, *start,
+@@ -395,9 +416,10 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
+       for (i = 0; i < PTRS_PER_PGD; i++) {
+               st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
++              prot = pgd_flags(*start);
++              st.current_prots[1] = __pgprot(prot);
+               if (!pgd_none(*start) && !is_hypervisor_range(i)) {
+                       if (pgd_large(*start) || !pgd_present(*start)) {
+-                              prot = pgd_flags(*start);
+                               note_page(m, &st, __pgprot(prot), 1);
+                       } else {
+                               walk_pud_level(m, &st, *start,
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+index 832b98f..f107868 100644
+--- a/arch/x86/mm/extable.c
++++ b/arch/x86/mm/extable.c
+@@ -102,7 +102,7 @@ int fixup_exception(struct pt_regs *regs, int trapnr)
+       ex_handler_t handler;
+ #ifdef CONFIG_PNPBIOS
+-      if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++      if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
+               extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+               extern u32 pnp_bios_is_utter_crap;
+               pnp_bios_is_utter_crap = 1;
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index dc80230..d0ef276 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -14,6 +14,8 @@
+ #include <linux/prefetch.h>           /* prefetchw                    */
+ #include <linux/context_tracking.h>   /* exception_enter(), ...       */
+ #include <linux/uaccess.h>            /* faulthandler_disabled()      */
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+ #include <asm/cpufeature.h>           /* boot_cpu_has, ...            */
+ #include <asm/traps.h>                        /* dotraplinkage, ...           */
+@@ -23,6 +25,11 @@
+ #include <asm/vsyscall.h>             /* emulate_vsyscall             */
+ #include <asm/vm86.h>                 /* struct vm86                  */
+ #include <asm/mmu_context.h>          /* vma_pkey()                   */
++#include <asm/tlbflush.h>
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/stacktrace.h>
++#endif
+ #define CREATE_TRACE_POINTS
+ #include <asm/trace/exceptions.h>
+@@ -126,7 +133,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+               return !instr_lo || (instr_lo>>1) == 1;
+       case 0x00:
+               /* Prefetch instruction is 0x0F0D or 0x0F18 */
+-              if (probe_kernel_address(instr, opcode))
++              if (user_mode(regs)) {
++                      if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++                              return 0;
++              } else if (probe_kernel_address(instr, opcode))
+                       return 0;
+               *prefetch = (instr_lo == 0xF) &&
+@@ -160,7 +170,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+       while (instr < max_instr) {
+               unsigned char opcode;
+-              if (probe_kernel_address(instr, opcode))
++              if (user_mode(regs)) {
++                      if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++                              break;
++              } else if (probe_kernel_address(instr, opcode))
+                       break;
+               instr++;
+@@ -244,6 +257,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+       force_sig_info(si_signo, &info, tsk);
+ }
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
++{
++      pgd_t *pgd;
++      pud_t *pud;
++      pmd_t *pmd;
++
++      pgd = pgd_offset(mm, address);
++      if (!pgd_present(*pgd))
++              return NULL;
++      pud = pud_offset(pgd, address);
++      if (!pud_present(*pud))
++              return NULL;
++      pmd = pmd_offset(pud, address);
++      if (!pmd_present(*pmd))
++              return NULL;
++      return pmd;
++}
++#endif
++
+ DEFINE_SPINLOCK(pgd_lock);
+ LIST_HEAD(pgd_list);
+@@ -294,10 +335,27 @@ void vmalloc_sync_all(void)
+       for (address = VMALLOC_START & PMD_MASK;
+            address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
+            address += PMD_SIZE) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              unsigned long cpu;
++#else
+               struct page *page;
++#endif
+               spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++                      pgd_t *pgd = get_cpu_pgd(cpu, user);
++                      pmd_t *ret;
++
++                      ret = vmalloc_sync_one(pgd, address);
++                      if (!ret)
++                              break;
++                      pgd = get_cpu_pgd(cpu, kernel);
++#else
+               list_for_each_entry(page, &pgd_list, lru) {
++                      pgd_t *pgd;
+                       spinlock_t *pgt_lock;
+                       pmd_t *ret;
+@@ -305,8 +363,14 @@ void vmalloc_sync_all(void)
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       spin_lock(pgt_lock);
+-                      ret = vmalloc_sync_one(page_address(page), address);
++                      pgd = page_address(page);
++#endif
++
++                      ret = vmalloc_sync_one(pgd, address);
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+                       spin_unlock(pgt_lock);
++#endif
+                       if (!ret)
+                               break;
+@@ -340,6 +404,12 @@ static noinline int vmalloc_fault(unsigned long address)
+        * an interrupt in the middle of a task switch..
+        */
+       pgd_paddr = read_cr3();
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
++      vmalloc_sync_one(__va(pgd_paddr + PTRS_PER_PGD * sizeof(pgd_t)), address);
++#endif
++
+       pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+       if (!pmd_k)
+               return -1;
+@@ -439,11 +509,24 @@ static noinline int vmalloc_fault(unsigned long address)
+        * happen within a race in page table update. In the later
+        * case just flush:
+        */
+-      pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
+       pgd_ref = pgd_offset_k(address);
+       if (pgd_none(*pgd_ref))
+               return -1;
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
++      pgd = pgd_offset_cpu(smp_processor_id(), user, address);
++      if (pgd_none(*pgd)) {
++              set_pgd(pgd, *pgd_ref);
++              arch_flush_lazy_mmu_mode();
++      } else {
++              BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++      }
++      pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
++#else
++      pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
++#endif
++
+       if (pgd_none(*pgd)) {
+               set_pgd(pgd, *pgd_ref);
+               arch_flush_lazy_mmu_mode();
+@@ -616,7 +699,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+ static int is_errata100(struct pt_regs *regs, unsigned long address)
+ {
+ #ifdef CONFIG_X86_64
+-      if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
++      if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
+               return 1;
+ #endif
+       return 0;
+@@ -643,9 +726,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+ }
+ static const char nx_warning[] = KERN_CRIT
+-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
+ static const char smep_warning[] = KERN_CRIT
+-"unable to execute userspace code (SMEP?) (uid: %d)\n";
++"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
+ static void
+ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -654,7 +737,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+       if (!oops_may_print())
+               return;
+-      if (error_code & PF_INSTR) {
++      if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
+               unsigned int level;
+               pgd_t *pgd;
+               pte_t *pte;
+@@ -665,13 +748,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+               pte = lookup_address_in_pgd(pgd, address, &level);
+               if (pte && pte_present(*pte) && !pte_exec(*pte))
+-                      printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
++                      printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
+               if (pte && pte_present(*pte) && pte_exec(*pte) &&
+                               (pgd_flags(*pgd) & _PAGE_USER) &&
+                               (__read_cr4() & X86_CR4_SMEP))
+-                      printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
++                      printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
+       }
++#ifdef CONFIG_PAX_KERNEXEC
++      if (init_mm.start_code <= address && address < init_mm.end_code) {
++              if (current->signal->curr_ip)
++                      printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++                                      &current->signal->curr_ip, current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++              else
++                      printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
++                                      from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++      }
++#endif
++
+       printk(KERN_ALERT "BUG: unable to handle kernel ");
+       if (address < PAGE_SIZE)
+               printk(KERN_CONT "NULL pointer dereference");
+@@ -855,6 +950,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+               }
+ #endif
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++              if (pax_is_fetch_fault(regs, error_code, address)) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++                      switch (pax_handle_fetch_fault(regs)) {
++                      case 2:
++                              return;
++                      }
++#endif
++
++                      pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++                      do_group_exit(SIGKILL);
++              }
++#endif
++
+               /*
+                * To avoid leaking information about the kernel page table
+                * layout, pretend that user-mode accesses to kernel addresses
+@@ -966,7 +1076,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+       if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+               printk(KERN_ERR
+       "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
+-                      tsk->comm, tsk->pid, address);
++                      tsk->comm, task_pid_nr(tsk), address);
+               code = BUS_MCEERR_AR;
+       }
+ #endif
+@@ -1025,6 +1135,109 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+       return 1;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++static inline unsigned long get_limit(unsigned long segment)
++{
++      unsigned long __limit;
++
++      asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
++      return __limit + 1;
++}
++
++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
++{
++      pte_t *pte;
++      pmd_t *pmd;
++      spinlock_t *ptl;
++      unsigned char pte_mask;
++
++      if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
++          !(mm->pax_flags & MF_PAX_PAGEEXEC))
++              return 0;
++
++      /* PaX: it's our fault, let's handle it if we can */
++
++      /* PaX: take a look at read faults before acquiring any locks */
++      if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
++              /* instruction fetch attempt from a protected page in user mode */
++              up_read(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_EMUTRAMP
++              switch (pax_handle_fetch_fault(regs)) {
++              case 2:
++                      return 1;
++              }
++#endif
++
++              pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++              do_group_exit(SIGKILL);
++      }
++
++      pmd = pax_get_pmd(mm, address);
++      if (unlikely(!pmd))
++              return 0;
++
++      pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++      if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
++              pte_unmap_unlock(pte, ptl);
++              return 0;
++      }
++
++      if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
++              /* write attempt to a protected page in user mode */
++              pte_unmap_unlock(pte, ptl);
++              return 0;
++      }
++
++#ifdef CONFIG_SMP
++      if (likely(address > get_limit(regs->cs) && cpumask_test_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask)))
++#else
++      if (likely(address > get_limit(regs->cs)))
++#endif
++      {
++              set_pte(pte, pte_mkread(*pte));
++              __flush_tlb_one(address);
++              pte_unmap_unlock(pte, ptl);
++              up_read(&mm->mmap_sem);
++              return 1;
++      }
++
++      pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
++
++      /*
++       * PaX: fill DTLB with user rights and retry
++       */
++      __asm__ __volatile__ (
++              "orb %2,(%1)\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++              "invlpg (%0)\n"
++#endif
++              ASM_STAC "\n"
++              __copyuser_seg"testb $0,(%0)\n"
++              ASM_CLAC "\n"
++              "xorb %3,(%1)\n"
++              :
++              : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
++              : "memory", "cc");
++      pte_unmap_unlock(pte, ptl);
++      up_read(&mm->mmap_sem);
++      return 1;
++}
++#endif
++
+ /*
+  * Handle a spurious fault caused by a stale TLB entry.
+  *
+@@ -1112,6 +1325,10 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
+ {
+       /* This is only called for the current mm, so: */
+       bool foreign = false;
++
++      if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
++              return 1;
++
+       /*
+        * Make sure to check the VMA so that we do not perform
+        * faults just to hit a PF_PK as soon as we fill in a
+@@ -1183,6 +1400,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
+       tsk = current;
+       mm = tsk->mm;
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
++              if (!search_exception_tables(regs->ip)) {
++                      printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n");
++                      bad_area_nosemaphore(regs, error_code, address, NULL);
++                      return;
++              }
++              if (address < pax_user_shadow_base) {
++                      printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n");
++                      printk(KERN_EMERG "PAX: faulting IP: %pS\n", (void *)regs->ip);
++                      show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_EMERG);
++              } else
++                      address -= pax_user_shadow_base;
++      }
++#endif
++
+       /*
+        * Detect and handle instructions that would cause a page fault for
+        * both a tracked kernel page and a userspace page.
+@@ -1309,6 +1542,11 @@ retry:
+               might_sleep();
+       }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++      if (pax_handle_pageexec_fault(regs, mm, address, error_code))
++              return;
++#endif
++
+       vma = find_vma(mm, address);
+       if (unlikely(!vma)) {
+               bad_area(regs, error_code, address);
+@@ -1320,18 +1558,24 @@ retry:
+               bad_area(regs, error_code, address);
+               return;
+       }
+-      if (error_code & PF_USER) {
+-              /*
+-               * Accessing the stack below %sp is always a bug.
+-               * The large cushion allows instructions like enter
+-               * and pusha to work. ("enter $65535, $31" pushes
+-               * 32 pointers and then decrements %sp by 65535.)
+-               */
+-              if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+-                      bad_area(regs, error_code, address);
+-                      return;
+-              }
++      /*
++       * Accessing the stack below %sp is always a bug.
++       * The large cushion allows instructions like enter
++       * and pusha to work. ("enter $65535, $31" pushes
++       * 32 pointers and then decrements %sp by 65535.)
++       */
++      if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
++              bad_area(regs, error_code, address);
++              return;
+       }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
++              bad_area(regs, error_code, address);
++              return;
++      }
++#endif
++
+       if (unlikely(expand_stack(vma, address))) {
+               bad_area(regs, error_code, address);
+               return;
+@@ -1451,3 +1695,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ }
+ NOKPROBE_SYMBOL(trace_do_page_fault);
+ #endif /* CONFIG_TRACING */
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
++{
++      struct mm_struct *mm = current->mm;
++      unsigned long ip = regs->ip;
++
++      if (v8086_mode(regs))
++              ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++              if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
++                      return true;
++              if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
++                      return true;
++              return false;
++      }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
++                      return true;
++              return false;
++      }
++#endif
++
++      return false;
++}
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
++{
++      int err;
++
++      do { /* PaX: libffi trampoline emulation */
++              unsigned char mov, jmp;
++              unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++              if ((regs->ip + 9) >> 32)
++                      break;
++#endif
++
++              err = get_user(mov, (unsigned char __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++              err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++              err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++              if (err)
++                      break;
++
++              if (mov == 0xB8 && jmp == 0xE9) {
++                      regs->ax = addr1;
++                      regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #1 */
++              unsigned char mov1, mov2;
++              unsigned short jmp;
++              unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++              if ((regs->ip + 11) >> 32)
++                      break;
++#endif
++
++              err = get_user(mov1, (unsigned char __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++              err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
++              err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++              err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
++                      regs->cx = addr1;
++                      regs->ax = addr2;
++                      regs->ip = addr2;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #2 */
++              unsigned char mov, jmp;
++              unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++              if ((regs->ip + 9) >> 32)
++                      break;
++#endif
++
++              err = get_user(mov, (unsigned char __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++              err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++              err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++              if (err)
++                      break;
++
++              if (mov == 0xB9 && jmp == 0xE9) {
++                      regs->cx = addr1;
++                      regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++                      return 2;
++              }
++      } while (0);
++
++      return 1; /* PaX in action */
++}
++
++#ifdef CONFIG_X86_64
++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
++{
++      int err;
++
++      do { /* PaX: libffi trampoline emulation */
++              unsigned short mov1, mov2, jmp1;
++              unsigned char stcclc, jmp2;
++              unsigned long addr1, addr2;
++
++              err = get_user(mov1, (unsigned short __user *)regs->ip);
++              err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++              err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++              err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++              err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
++              err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
++              err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++                      regs->r11 = addr1;
++                      regs->r10 = addr2;
++                      if (stcclc == 0xF8)
++                              regs->flags &= ~X86_EFLAGS_CF;
++                      else
++                              regs->flags |= X86_EFLAGS_CF;
++                      regs->ip = addr1;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #1 */
++              unsigned short mov1, mov2, jmp1;
++              unsigned char jmp2;
++              unsigned int addr1;
++              unsigned long addr2;
++
++              err = get_user(mov1, (unsigned short __user *)regs->ip);
++              err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
++              err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
++              err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
++              err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
++              err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++                      regs->r11 = addr1;
++                      regs->r10 = addr2;
++                      regs->ip = addr1;
++                      return 2;
++              }
++      } while (0);
++
++      do { /* PaX: gcc trampoline emulation #2 */
++              unsigned short mov1, mov2, jmp1;
++              unsigned char jmp2;
++              unsigned long addr1, addr2;
++
++              err = get_user(mov1, (unsigned short __user *)regs->ip);
++              err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++              err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++              err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++              err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
++              err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
++
++              if (err)
++                      break;
++
++              if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++                      regs->r11 = addr1;
++                      regs->r10 = addr2;
++                      regs->ip = addr1;
++                      return 2;
++              }
++      } while (0);
++
++      return 1; /* PaX in action */
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->ip = fault address)
++ *
++ * returns 1 when task should be killed
++ *         2 when gcc trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++      if (v8086_mode(regs))
++              return 1;
++
++      if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++              return 1;
++
++#ifdef CONFIG_X86_32
++      return pax_handle_fetch_fault_32(regs);
++#else
++      if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
++              return pax_handle_fetch_fault_32(regs);
++      else
++              return pax_handle_fetch_fault_64(regs);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++      long i;
++
++      printk(KERN_ERR "PAX: bytes at PC: ");
++      for (i = 0; i < 20; i++) {
++              unsigned char c;
++              if (get_user(c, (unsigned char __force_user *)pc+i))
++                      printk(KERN_CONT "?? ");
++              else
++                      printk(KERN_CONT "%02x ", c);
++      }
++      printk("\n");
++
++      printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
++      for (i = -1; i < 80 / (long)sizeof(long); i++) {
++              unsigned long c;
++              if (get_user(c, (unsigned long __force_user *)sp+i)) {
++#ifdef CONFIG_X86_32
++                      printk(KERN_CONT "???????? ");
++#else
++                      if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
++                              printk(KERN_CONT "???????? ???????? ");
++                      else
++                              printk(KERN_CONT "???????????????? ");
++#endif
++              } else {
++#ifdef CONFIG_X86_64
++                      if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
++                              printk(KERN_CONT "%08x ", (unsigned int)c);
++                              printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
++                      } else
++#endif
++                              printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
++              }
++      }
++      printk("\n");
++}
++#endif
++
++/**
++ * probe_kernel_write(): safely attempt to write to a location
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src.  If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++long notrace probe_kernel_write(void *dst, const void *src, size_t size)
++{
++      long ret;
++      mm_segment_t old_fs = get_fs();
++
++      set_fs(KERNEL_DS);
++      pagefault_disable();
++      pax_open_kernel();
++      ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
++      pax_close_kernel();
++      pagefault_enable();
++      set_fs(old_fs);
++
++      return ret ? -EFAULT : 0;
++}
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index b8b6a60..9193b78 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -313,7 +313,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+       addr = start;
+       len = (unsigned long) nr_pages << PAGE_SHIFT;
+       end = start + len;
+-      if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++      if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
+                                       (void __user *)start, len)))
+               return 0;
+@@ -389,6 +389,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+               goto slow_irqon;
+ #endif
++      if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
++                                      (void __user *)start, len)))
++              return 0;
++
+       /*
+        * XXX: batch / limit 'nr', to avoid large irq off latency
+        * needs some instrumenting to determine the common sizes used by
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 6d18b70..9dc249e 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -35,6 +35,8 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+       unsigned long vaddr;
+       int idx, type;
++      BUG_ON(pgprot_val(prot) & _PAGE_USER);
++
+       preempt_disable();
+       pagefault_disable();
+@@ -45,7 +47,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       BUG_ON(!pte_none(*(kmap_pte-idx)));
++
++      pax_open_kernel();
+       set_pte(kmap_pte-idx, mk_pte(page, prot));
++      pax_close_kernel();
++
+       arch_flush_lazy_mmu_mode();
+       return (void *)vaddr;
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index 2ae8584..e8f8f29 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
+ #ifdef CONFIG_HUGETLB_PAGE
+ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+               unsigned long addr, unsigned long len,
+-              unsigned long pgoff, unsigned long flags)
++              unsigned long pgoff, unsigned long flags, unsigned long offset)
+ {
+       struct hstate *h = hstate_file(file);
+       struct vm_unmapped_area_info info;
+-
++      
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = current->mm->mmap_legacy_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+               unsigned long addr0, unsigned long len,
+-              unsigned long pgoff, unsigned long flags)
++              unsigned long pgoff, unsigned long flags, unsigned long offset)
+ {
+       struct hstate *h = hstate_file(file);
+       struct vm_unmapped_area_info info;
+@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+       info.high_limit = current->mm->mmap_base;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += current->mm->delta_mmap;
++#endif
++
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+       struct hstate *h = hstate_file(file);
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
++      unsigned long pax_task_size = TASK_SIZE;
++      unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+-      if (len > TASK_SIZE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (len > pax_task_size)
+               return -ENOMEM;
+       if (flags & MAP_FIXED) {
+@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = ALIGN(addr, huge_page_size(h));
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       if (mm->get_unmapped_area == arch_get_unmapped_area)
+               return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+       else
+               return hugetlb_get_unmapped_area_topdown(file, addr, len,
+-                              pgoff, flags);
++                              pgoff, flags, offset);
+ }
+ #endif /* CONFIG_HUGETLB_PAGE */
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index d28a2d7..3e6afa44 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -4,6 +4,7 @@
+ #include <linux/swap.h>
+ #include <linux/memblock.h>
+ #include <linux/bootmem.h>    /* for max_low_pfn */
++#include <linux/tboot.h>
+ #include <asm/cacheflush.h>
+ #include <asm/e820.h>
+@@ -18,6 +19,7 @@
+ #include <asm/dma.h>          /* for MAX_DMA_PFN */
+ #include <asm/microcode.h>
+ #include <asm/kaslr.h>
++#include <asm/bios_ebda.h>
+ /*
+  * We need to define the tracepoints somewhere, and tlb.c
+@@ -633,7 +635,18 @@ void __init init_mem_mapping(void)
+       early_ioremap_page_table_range_init();
+ #endif
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++      clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
++                      swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++                      KERNEL_PGD_PTRS);
++      load_cr3(get_cpu_pgd(0, kernel));
++#else
+       load_cr3(swapper_pg_dir);
++#endif
++
+       __flush_tlb_all();
+       early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
+@@ -649,10 +662,34 @@ void __init init_mem_mapping(void)
+  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+  * mmio resources as well as potential bios/acpi data regions.
+  */
++
++#ifdef CONFIG_GRKERNSEC_KMEM
++static unsigned int ebda_start __read_only;
++static unsigned int ebda_end __read_only;
++#endif
++
+ int devmem_is_allowed(unsigned long pagenr)
+ {
++#ifdef CONFIG_GRKERNSEC_KMEM
++      /* allow BDA */
++      if (!pagenr)
++              return 1;
++      /* allow EBDA */
++      if (pagenr >= ebda_start && pagenr < ebda_end)
++              return 1;
++      /* if tboot is in use, allow access to its hardcoded serial log range */
++      if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
++              return 1;
++      if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
++              return 1;
++      /* throw out everything else below 1MB */
++      if (pagenr <= 256)
++              return 0;
++#else
+       if (pagenr < 256)
+               return 1;
++#endif
++
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+               return 0;
+       if (!page_is_ram(pagenr))
+@@ -699,8 +736,33 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+       }
+ }
++#ifdef CONFIG_GRKERNSEC_KMEM
++static inline void gr_init_ebda(void)
++{
++      unsigned int ebda_addr;
++      unsigned int ebda_size = 0;
++
++      ebda_addr = get_bios_ebda();
++      if (ebda_addr) {
++              ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
++              ebda_size <<= 10;
++      }
++      if (ebda_addr && ebda_size) {
++              ebda_start = ebda_addr >> PAGE_SHIFT;
++              ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
++      } else {
++              ebda_start = 0x9f000 >> PAGE_SHIFT;
++              ebda_end = 0xa0000 >> PAGE_SHIFT;
++      }
++}
++#else
++static inline void gr_init_ebda(void) { }
++#endif
++
+ void free_initmem(void)
+ {
++      gr_init_ebda();
++
+       free_init_pages("unused kernel",
+                       (unsigned long)(&__init_begin),
+                       (unsigned long)(&__init_end));
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index cf80590..90a1a8f 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -51,6 +51,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/page_types.h>
+ #include <asm/init.h>
++#include <asm/desc.h>
+ #include "mm_internal.h"
+@@ -61,33 +62,6 @@ static noinline int do_test_wp_bit(void);
+ bool __read_mostly __vmalloc_start_set = false;
+ /*
+- * Creates a middle page table and puts a pointer to it in the
+- * given global directory entry. This only returns the gd entry
+- * in non-PAE compilation mode, since the middle layer is folded.
+- */
+-static pmd_t * __init one_md_table_init(pgd_t *pgd)
+-{
+-      pud_t *pud;
+-      pmd_t *pmd_table;
+-
+-#ifdef CONFIG_X86_PAE
+-      if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+-              pmd_table = (pmd_t *)alloc_low_page();
+-              paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+-              set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+-              pud = pud_offset(pgd, 0);
+-              BUG_ON(pmd_table != pmd_offset(pud, 0));
+-
+-              return pmd_table;
+-      }
+-#endif
+-      pud = pud_offset(pgd, 0);
+-      pmd_table = pmd_offset(pud, 0);
+-
+-      return pmd_table;
+-}
+-
+-/*
+  * Create a page table and place a pointer to it in a middle page
+  * directory entry:
+  */
+@@ -97,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
+               pte_t *page_table = (pte_t *)alloc_low_page();
+               paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++              set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
++#else
+               set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++#endif
+               BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+       }
+       return pte_offset_kernel(pmd, 0);
+ }
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++      pud_t *pud;
++      pmd_t *pmd_table;
++
++      pud = pud_offset(pgd, 0);
++      pmd_table = pmd_offset(pud, 0);
++
++      return pmd_table;
++}
++
+ pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+ {
+       int pgd_idx = pgd_index(vaddr);
+@@ -208,6 +197,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+       int pgd_idx, pmd_idx;
+       unsigned long vaddr;
+       pgd_t *pgd;
++      pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte = NULL;
+       unsigned long count = page_table_range_init_count(start, end);
+@@ -222,8 +212,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+       pgd = pgd_base + pgd_idx;
+       for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+-              pmd = one_md_table_init(pgd);
+-              pmd = pmd + pmd_index(vaddr);
++              pud = pud_offset(pgd, vaddr);
++              pmd = pmd_offset(pud, vaddr);
++
++#ifdef CONFIG_X86_PAE
++              paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
++
+               for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+                                                       pmd++, pmd_idx++) {
+                       pte = page_table_kmap_check(one_page_table_init(pmd),
+@@ -235,11 +230,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+       }
+ }
+-static inline int is_kernel_text(unsigned long addr)
++static inline int is_kernel_text(unsigned long start, unsigned long end)
+ {
+-      if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
+-              return 1;
+-      return 0;
++      if ((start >= ktla_ktva((unsigned long)_etext) ||
++           end <= ktla_ktva((unsigned long)_stext)) &&
++          (start >= ktla_ktva((unsigned long)_einittext) ||
++           end <= ktla_ktva((unsigned long)_sinittext)) &&
++
++#ifdef CONFIG_ACPI_SLEEP
++          (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
++#endif
++
++          (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
++              return 0;
++      return 1;
+ }
+ /*
+@@ -256,9 +260,10 @@ kernel_physical_mapping_init(unsigned long start,
+       unsigned long last_map_addr = end;
+       unsigned long start_pfn, end_pfn;
+       pgd_t *pgd_base = swapper_pg_dir;
+-      int pgd_idx, pmd_idx, pte_ofs;
++      unsigned int pgd_idx, pmd_idx, pte_ofs;
+       unsigned long pfn;
+       pgd_t *pgd;
++      pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned pages_2m, pages_4k;
+@@ -291,8 +296,13 @@ repeat:
+       pfn = start_pfn;
+       pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+       pgd = pgd_base + pgd_idx;
+-      for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+-              pmd = one_md_table_init(pgd);
++      for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
++              pud = pud_offset(pgd, 0);
++              pmd = pmd_offset(pud, 0);
++
++#ifdef CONFIG_X86_PAE
++              paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
+               if (pfn >= end_pfn)
+                       continue;
+@@ -304,14 +314,13 @@ repeat:
+ #endif
+               for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
+                    pmd++, pmd_idx++) {
+-                      unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++                      unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
+                       /*
+                        * Map with big pages if possible, otherwise
+                        * create normal page tables:
+                        */
+                       if (use_pse) {
+-                              unsigned int addr2;
+                               pgprot_t prot = PAGE_KERNEL_LARGE;
+                               /*
+                                * first pass will use the same initial
+@@ -322,11 +331,7 @@ repeat:
+                                                _PAGE_PSE);
+                               pfn &= PMD_MASK >> PAGE_SHIFT;
+-                              addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
+-                                      PAGE_OFFSET + PAGE_SIZE-1;
+-
+-                              if (is_kernel_text(addr) ||
+-                                  is_kernel_text(addr2))
++                              if (is_kernel_text(address, address + PMD_SIZE))
+                                       prot = PAGE_KERNEL_LARGE_EXEC;
+                               pages_2m++;
+@@ -343,7 +348,7 @@ repeat:
+                       pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+                       pte += pte_ofs;
+                       for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
+-                           pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++                           pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+                               pgprot_t prot = PAGE_KERNEL;
+                               /*
+                                * first pass will use the same initial
+@@ -351,7 +356,7 @@ repeat:
+                                */
+                               pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
+-                              if (is_kernel_text(addr))
++                              if (is_kernel_text(address, address + PAGE_SIZE))
+                                       prot = PAGE_KERNEL_EXEC;
+                               pages_4k++;
+@@ -471,7 +476,7 @@ void __init native_pagetable_init(void)
+               pud = pud_offset(pgd, va);
+               pmd = pmd_offset(pud, va);
+-              if (!pmd_present(*pmd))
++              if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
+                       break;
+               /* should not be large page here */
+@@ -529,12 +534,10 @@ void __init early_ioremap_page_table_range_init(void)
+ static void __init pagetable_init(void)
+ {
+-      pgd_t *pgd_base = swapper_pg_dir;
+-
+-      permanent_kmaps_init(pgd_base);
++      permanent_kmaps_init(swapper_pg_dir);
+ }
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+ /* user-defined highmem size */
+@@ -784,10 +787,10 @@ void __init mem_init(void)
+               ((unsigned long)&__init_end -
+                (unsigned long)&__init_begin) >> 10,
+-              (unsigned long)&_etext, (unsigned long)&_edata,
+-              ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++              (unsigned long)&_sdata, (unsigned long)&_edata,
++              ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
+-              (unsigned long)&_text, (unsigned long)&_etext,
++              ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
+               ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+       /*
+@@ -867,7 +870,7 @@ static noinline int do_test_wp_bit(void)
+ const int rodata_test_data = 0xC3;
+ EXPORT_SYMBOL_GPL(rodata_test_data);
+-int kernel_set_to_readonly __read_mostly;
++int kernel_set_to_readonly __read_only;
+ void set_kernel_text_rw(void)
+ {
+@@ -877,6 +880,7 @@ void set_kernel_text_rw(void)
+       if (!kernel_set_to_readonly)
+               return;
++      start = ktla_ktva(start);
+       pr_debug("Set kernel text: %lx - %lx for read write\n",
+                start, start+size);
+@@ -891,6 +895,7 @@ void set_kernel_text_ro(void)
+       if (!kernel_set_to_readonly)
+               return;
++      start = ktla_ktva(start);
+       pr_debug("Set kernel text: %lx - %lx for read only\n",
+                start, start+size);
+@@ -903,7 +908,7 @@ static void mark_nxdata_nx(void)
+        * When this called, init has already been executed and released,
+        * so everything past _etext should be NX.
+        */
+-      unsigned long start = PFN_ALIGN(_etext);
++      unsigned long start = ktla_ktva(PFN_ALIGN(_etext));
+       /*
+        * This comes from is_kernel_text upper limit. Also HPAGE where used:
+        */
+@@ -919,26 +924,52 @@ void mark_rodata_ro(void)
+       unsigned long start = PFN_ALIGN(_text);
+       unsigned long size = PFN_ALIGN(_etext) - start;
+-      set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+-      printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+-              size >> 10);
++#ifdef CONFIG_PAX_KERNEXEC
++      /* PaX: limit KERNEL_CS to actual size */
++      unsigned long limit;
++      struct desc_struct d;
++      int cpu;
++      limit = get_kernel_rpl() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++      limit = (limit - 1UL) >> PAGE_SHIFT;
++
++      memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++      for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++              pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++              write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++              write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
++      }
++
++#ifdef CONFIG_MODULES
++      set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++#endif
++#endif
++
++      start = ktla_ktva(start);
++#ifdef CONFIG_PAX_KERNEXEC
++      /* PaX: make KERNEL_CS read-only */
++      if (!get_kernel_rpl()) {
++#endif
+       kernel_set_to_readonly = 1;
++      set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
++      printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10);
++
+ #ifdef CONFIG_CPA_DEBUG
+-      printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
+-              start, start+size);
++      printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size);
+       set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
+       printk(KERN_INFO "Testing CPA: write protecting again\n");
+       set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
+ #endif
++#ifdef CONFIG_PAX_KERNEXEC
++      }
++#endif
+       start += size;
+-      size = (unsigned long)__end_rodata - start;
++      size = PFN_ALIGN(_sdata) - start;
+       set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+-      printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+-              size >> 10);
++      printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10);
+       rodata_test();
+ #ifdef CONFIG_CPA_DEBUG
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 14b9dd7..774d517 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -65,7 +65,7 @@
+  * around without checking the pgd every time.
+  */
+-pteval_t __supported_pte_mask __read_mostly = ~0;
++pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+ int force_personality32;
+@@ -98,7 +98,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
+       for (address = start; address <= end; address += PGDIR_SIZE) {
+               const pgd_t *pgd_ref = pgd_offset_k(address);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              unsigned long cpu;
++#else
+               struct page *page;
++#endif
+               /*
+                * When it is called after memory hot remove, pgd_none()
+@@ -109,6 +114,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
+                       continue;
+               spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++                      pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
++
++                      if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
++                              BUG_ON(pgd_page_vaddr(*pgd)
++                                     != pgd_page_vaddr(*pgd_ref));
++
++                      if (removed) {
++                              if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
++                                      pgd_clear(pgd);
++                      } else {
++                              if (pgd_none(*pgd))
++                                      set_pgd(pgd, *pgd_ref);
++                      }
++
++                      pgd = pgd_offset_cpu(cpu, kernel, address);
++#else
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       spinlock_t *pgt_lock;
+@@ -117,6 +141,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
+                       /* the pgt_lock only for Xen */
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       spin_lock(pgt_lock);
++#endif
+                       if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
+                               BUG_ON(pgd_page_vaddr(*pgd)
+@@ -130,7 +155,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
+                                       set_pgd(pgd, *pgd_ref);
+                       }
++#ifndef CONFIG_PAX_PER_CPU_PGD
+                       spin_unlock(pgt_lock);
++#endif
++
+               }
+               spin_unlock(&pgd_lock);
+       }
+@@ -163,7 +191,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
+ {
+       if (pgd_none(*pgd)) {
+               pud_t *pud = (pud_t *)spp_getpage();
+-              pgd_populate(&init_mm, pgd, pud);
++              pgd_populate_kernel(&init_mm, pgd, pud);
+               if (pud != pud_offset(pgd, 0))
+                       printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
+                              pud, pud_offset(pgd, 0));
+@@ -175,7 +203,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
+ {
+       if (pud_none(*pud)) {
+               pmd_t *pmd = (pmd_t *) spp_getpage();
+-              pud_populate(&init_mm, pud, pmd);
++              pud_populate_kernel(&init_mm, pud, pmd);
+               if (pmd != pmd_offset(pud, 0))
+                       printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
+                              pmd, pmd_offset(pud, 0));
+@@ -204,7 +232,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+       pmd = fill_pmd(pud, vaddr);
+       pte = fill_pte(pmd, vaddr);
++      pax_open_kernel();
+       set_pte(pte, new_pte);
++      pax_close_kernel();
+       /*
+        * It's enough to flush this one mapping.
+@@ -266,14 +296,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+               pgd = pgd_offset_k((unsigned long)__va(phys));
+               if (pgd_none(*pgd)) {
+                       pud = (pud_t *) spp_getpage();
+-                      set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+-                                              _PAGE_USER));
++                      set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
+               }
+               pud = pud_offset(pgd, (unsigned long)__va(phys));
+               if (pud_none(*pud)) {
+                       pmd = (pmd_t *) spp_getpage();
+-                      set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+-                                              _PAGE_USER));
++                      set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
+               }
+               pmd = pmd_offset(pud, phys);
+               BUG_ON(!pmd_none(*pmd));
+@@ -543,7 +571,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
+                                          page_size_mask, prot);
+               spin_lock(&init_mm.page_table_lock);
+-              pud_populate(&init_mm, pud, pmd);
++              pud_populate_kernel(&init_mm, pud, pmd);
+               spin_unlock(&init_mm.page_table_lock);
+       }
+       __flush_tlb_all();
+@@ -590,7 +618,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
+                                          page_size_mask);
+               spin_lock(&init_mm.page_table_lock);
+-              pgd_populate(&init_mm, pgd, pud);
++              pgd_populate_kernel(&init_mm, pgd, pud);
+               spin_unlock(&init_mm.page_table_lock);
+               pgd_changed = true;
+       }
+@@ -1013,7 +1041,7 @@ void __init mem_init(void)
+ const int rodata_test_data = 0xC3;
+ EXPORT_SYMBOL_GPL(rodata_test_data);
+-int kernel_set_to_readonly;
++int kernel_set_to_readonly __read_only;
+ void set_kernel_text_rw(void)
+ {
+@@ -1042,8 +1070,7 @@ void set_kernel_text_ro(void)
+       if (!kernel_set_to_readonly)
+               return;
+-      pr_debug("Set kernel text: %lx - %lx for read only\n",
+-               start, end);
++      pr_debug("Set kernel text: %lx - %lx for read only\n", start, end);
+       /*
+        * Set the kernel identity mapping for text RO.
+@@ -1054,18 +1081,23 @@ void set_kernel_text_ro(void)
+ void mark_rodata_ro(void)
+ {
+       unsigned long start = PFN_ALIGN(_text);
++#ifdef CONFIG_PAX_KERNEXEC
++      unsigned long addr;
++      unsigned long end = PFN_ALIGN(_sdata);
++      unsigned long text_end = end;
++#else
+       unsigned long rodata_start = PFN_ALIGN(__start_rodata);
+       unsigned long end = (unsigned long) &__end_rodata_hpage_align;
+       unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
+       unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
++#endif
+       unsigned long all_end;
+-      printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+-             (end - start) >> 10);
+-      set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+-
+       kernel_set_to_readonly = 1;
++      printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10);
++      set_memory_ro(start, (end - start) >> PAGE_SHIFT);
++
+       /*
+        * The rodata/data/bss/brk section (but not the kernel text!)
+        * should also be not-executable.
+@@ -1091,12 +1123,54 @@ void mark_rodata_ro(void)
+       set_memory_ro(start, (end-start) >> PAGE_SHIFT);
+ #endif
++#ifdef CONFIG_PAX_KERNEXEC
++      /* PaX: ensure that kernel code/rodata is read-only, the rest is non-executable */
++      for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++              pgd_t *pgd;
++              pud_t *pud;
++              pmd_t *pmd;
++
++              pgd = pgd_offset_k(addr);
++              pud = pud_offset(pgd, addr);
++              pmd = pmd_offset(pud, addr);
++              if (!pmd_present(*pmd))
++                      continue;
++              if (addr >= (unsigned long)_text)
++                      BUG_ON(!pmd_large(*pmd));
++              if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++                      BUG_ON(pmd_write(*pmd));
++//                    set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++              else
++                      BUG_ON(!(pmd_flags(*pmd) & _PAGE_NX));
++//                    set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++      }
++
++      addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++      end = addr + KERNEL_IMAGE_SIZE;
++      for (; addr < end; addr += PMD_SIZE) {
++              pgd_t *pgd;
++              pud_t *pud;
++              pmd_t *pmd;
++
++              pgd = pgd_offset_k(addr);
++              pud = pud_offset(pgd, addr);
++              pmd = pmd_offset(pud, addr);
++              if (!pmd_present(*pmd))
++                      continue;
++              if (addr >= (unsigned long)_text)
++                      BUG_ON(!pmd_large(*pmd));
++              if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++                      BUG_ON(pmd_write(*pmd));
++//                    set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++      }
++#else
+       free_init_pages("unused kernel",
+                       (unsigned long) __va(__pa_symbol(text_end)),
+                       (unsigned long) __va(__pa_symbol(rodata_start)));
+       free_init_pages("unused kernel",
+                       (unsigned long) __va(__pa_symbol(rodata_end)),
+                       (unsigned long) __va(__pa_symbol(_sdata)));
++#endif
+       debug_checkwx();
+ }
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index ada98b3..c812b62 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++      pax_open_kernel();
+       set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++      pax_close_kernel();
++
+       arch_flush_lazy_mmu_mode();
+       return (void *)vaddr;
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 7aaa263..e77438f 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -58,8 +58,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+       unsigned long i;
+       for (i = 0; i < nr_pages; ++i)
+-              if (pfn_valid(start_pfn + i) &&
+-                  !PageReserved(pfn_to_page(start_pfn + i)))
++              if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
++                  !PageReserved(pfn_to_page(start_pfn + i))))
+                       return 1;
+       return 0;
+@@ -80,7 +80,7 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+  * caller shouldn't need to know that small detail.
+  */
+ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+-              unsigned long size, enum page_cache_mode pcm, void *caller)
++              resource_size_t size, enum page_cache_mode pcm, void *caller)
+ {
+       unsigned long offset, vaddr;
+       resource_size_t pfn, last_pfn, last_addr;
+@@ -331,7 +331,7 @@ EXPORT_SYMBOL(ioremap_prot);
+  *
+  * Caller must ensure there is only one unmapping for the same pointer.
+  */
+-void iounmap(volatile void __iomem *addr)
++void iounmap(const volatile void __iomem *addr)
+ {
+       struct vm_struct *p, *o;
+@@ -394,31 +394,37 @@ int __init arch_ioremap_pmd_supported(void)
+  */
+ void *xlate_dev_mem_ptr(phys_addr_t phys)
+ {
+-      unsigned long start  = phys &  PAGE_MASK;
+-      unsigned long offset = phys & ~PAGE_MASK;
+-      void *vaddr;
++      phys_addr_t pfn = phys >> PAGE_SHIFT;
+-      /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
+-      if (page_is_ram(start >> PAGE_SHIFT))
+-              return __va(phys);
++      if (page_is_ram(pfn)) {
++#ifdef CONFIG_HIGHMEM
++              if (pfn >= max_low_pfn)
++                      return kmap_high(pfn_to_page(pfn));
++              else
++#endif
++                      return __va(phys);
++      }
+-      vaddr = ioremap_cache(start, PAGE_SIZE);
+-      /* Only add the offset on success and return NULL if the ioremap() failed: */
+-      if (vaddr)
+-              vaddr += offset;
+-
+-      return vaddr;
++      return (void __force *)ioremap_cache(phys, 1);
+ }
+ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+ {
+-      if (page_is_ram(phys >> PAGE_SHIFT))
++      phys_addr_t pfn = phys >> PAGE_SHIFT;
++
++      if (page_is_ram(pfn)) {
++#ifdef CONFIG_HIGHMEM
++              if (pfn >= max_low_pfn)
++                      kunmap_high(pfn_to_page(pfn));
++#endif
+               return;
++      }
+-      iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
++      iounmap((void __iomem __force *)addr);
+ }
+-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
++static pte_t __bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_rodata;
++static pte_t *bm_pte __read_only = __bm_pte;
+ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+ {
+@@ -454,8 +460,14 @@ void __init early_ioremap_init(void)
+       early_ioremap_setup();
+       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+-      memset(bm_pte, 0, sizeof(bm_pte));
+-      pmd_populate_kernel(&init_mm, pmd, bm_pte);
++      if (pmd_none(*pmd))
++#ifdef CONFIG_COMPAT_VDSO
++              pmd_populate_user(&init_mm, pmd, __bm_pte);
++#else
++              pmd_populate_kernel(&init_mm, pmd, __bm_pte);
++#endif
++      else
++              bm_pte = (pte_t *)pmd_page_vaddr(*pmd);
+       /*
+        * The boot-ioremap range spans multiple pmds, for which
+diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
+index 4515bae..e162764 100644
+--- a/arch/x86/mm/kmemcheck/kmemcheck.c
++++ b/arch/x86/mm/kmemcheck/kmemcheck.c
+@@ -627,9 +627,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
+        * memory (e.g. tracked pages)? For now, we need this to avoid
+        * invoking kmemcheck for PnP BIOS calls.
+        */
+-      if (regs->flags & X86_VM_MASK)
++      if (v8086_mode(regs))
+               return false;
+-      if (regs->cs != __KERNEL_CS)
++      if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
+               return false;
+       pte = kmemcheck_pte_lookup(address);
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index d2dc043..41dfc2b 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
+  * Leave an at least ~128 MB hole with possible stack randomization.
+  */
+ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
+-#define MAX_GAP (TASK_SIZE/6*5)
++#define MAX_GAP (pax_task_size/6*5)
+ static int mmap_is_legacy(void)
+ {
+@@ -81,16 +81,31 @@ unsigned long arch_mmap_rnd(void)
+       return rnd << PAGE_SHIFT;
+ }
+-static unsigned long mmap_base(unsigned long rnd)
++static unsigned long mmap_base(struct mm_struct *mm, unsigned long rnd)
+ {
+       unsigned long gap = rlimit(RLIMIT_STACK);
++      unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+       if (gap < MIN_GAP)
+               gap = MIN_GAP;
+       else if (gap > MAX_GAP)
+               gap = MAX_GAP;
+-      return PAGE_ALIGN(TASK_SIZE - gap - rnd);
++      return PAGE_ALIGN(pax_task_size - gap - rnd);
++}
++
++static unsigned long mmap_legacy_base(struct mm_struct *mm, unsigned long rnd)
++{
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mmap_is_ia32() && (mm->pax_flags & MF_PAX_SEGMEXEC))
++              return SEGMEXEC_TASK_UNMAPPED_BASE + rnd;
++#endif
++      return TASK_UNMAPPED_BASE + rnd;
+ }
+ /*
+@@ -101,18 +116,29 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       unsigned long random_factor = 0UL;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
+       if (current->flags & PF_RANDOMIZE)
+               random_factor = arch_mmap_rnd();
+-      mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
++      mm->mmap_legacy_base = mmap_legacy_base(mm, random_factor);
+       if (mmap_is_legacy()) {
+               mm->mmap_base = mm->mmap_legacy_base;
+               mm->get_unmapped_area = arch_get_unmapped_area;
+       } else {
+-              mm->mmap_base = mmap_base(random_factor);
++              mm->mmap_base = mmap_base(mm, random_factor);
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP) {
++              mm->mmap_legacy_base += mm->delta_mmap;
++              mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++      }
++#endif
++
+ }
+ const char *arch_vma_name(struct vm_area_struct *vma)
+diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
+index bef3662..c5b2523 100644
+--- a/arch/x86/mm/mmio-mod.c
++++ b/arch/x86/mm/mmio-mod.c
+@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
+               break;
+       default:
+               {
+-                      unsigned char *ip = (unsigned char *)instptr;
++                      unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
+                       my_trace->opcode = MMIO_UNKNOWN_OP;
+                       my_trace->width = 0;
+                       my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
+@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
+ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+                                                       void __iomem *addr)
+ {
+-      static atomic_t next_id;
++      static atomic_unchecked_t next_id;
+       struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+       /* These are page-unaligned. */
+       struct mmiotrace_map map = {
+@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+                       .private = trace
+               },
+               .phys = offset,
+-              .id = atomic_inc_return(&next_id)
++              .id = atomic_inc_return_unchecked(&next_id)
+       };
+       map.map_id = trace->id;
+@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+       ioremap_trace_core(offset, size, addr);
+ }
+-static void iounmap_trace_core(volatile void __iomem *addr)
++static void iounmap_trace_core(const volatile void __iomem *addr)
+ {
+       struct mmiotrace_map map = {
+               .phys = 0,
+@@ -328,7 +328,7 @@ not_enabled:
+       }
+ }
+-void mmiotrace_iounmap(volatile void __iomem *addr)
++void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+       might_sleep();
+       if (is_enabled()) /* recheck and proper locking in *_core() */
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index 8047687..6351be43 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -193,7 +193,7 @@ static int mpx_insn_decode(struct insn *insn,
+        */
+       if (!nr_copied)
+               return -EFAULT;
+-      insn_init(insn, buf, nr_copied, x86_64);
++      insn_init(insn, (void *)ktva_ktla((unsigned long)buf), nr_copied, x86_64);
+       insn_get_length(insn);
+       /*
+        * copy_from_user() tries to get as many bytes as we could see in
+@@ -293,11 +293,11 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
+        * We were not able to extract an address from the instruction,
+        * probably because there was something invalid in it.
+        */
+-      if (info->si_addr == (void *)-1) {
++      if (info->si_addr == (void __user *)-1) {
+               err = -EINVAL;
+               goto err_out;
+       }
+-      trace_mpx_bounds_register_exception(info->si_addr, bndreg);
++      trace_mpx_bounds_register_exception((void __force_kernel *)info->si_addr, bndreg);
+       return info;
+ err_out:
+       /* info might be NULL, but kfree() handles that */
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index fb68210..591f415 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -528,7 +528,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
+       }
+ }
+-static int __init numa_register_memblks(struct numa_meminfo *mi)
++static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
+ {
+       unsigned long uninitialized_var(pfn_align);
+       int i, nid;
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index e3353c9..2a8fbe5 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -265,7 +265,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+        */
+ #ifdef CONFIG_PCI_BIOS
+       if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
+-              pgprot_val(forbidden) |= _PAGE_NX;
++              pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+ #endif
+       /*
+@@ -273,14 +273,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+        * Does not cover __inittext since that is gone later on. On
+        * 64bit we do not enforce !NX on the low mapping
+        */
+-      if (within(address, (unsigned long)_text, (unsigned long)_etext))
+-              pgprot_val(forbidden) |= _PAGE_NX;
++      if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
++              pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+       /*
+        * The .rodata section needs to be read-only. Using the pfn
+        * catches all aliases.
+        */
+-      if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
++      if (kernel_set_to_readonly && within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+                  __pa_symbol(__end_rodata) >> PAGE_SHIFT))
+               pgprot_val(forbidden) |= _PAGE_RW;
+@@ -321,6 +321,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+       }
+ #endif
++#ifdef CONFIG_PAX_KERNEXEC
++      if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)) >> PAGE_SHIFT, __pa((unsigned long)&_sdata) >> PAGE_SHIFT)) {
++              pgprot_val(forbidden) |= _PAGE_RW;
++              pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
++      }
++#endif
++
+       prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+       return prot;
+@@ -457,23 +464,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+ {
+       /* change init_mm */
++      pax_open_kernel();
+       set_pte_atomic(kpte, pte);
++
+ #ifdef CONFIG_X86_32
+       if (!SHARED_KERNEL_PMD) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              unsigned long cpu;
++#else
+               struct page *page;
++#endif
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++                      pgd_t *pgd = get_cpu_pgd(cpu, kernel);
++#else
+               list_for_each_entry(page, &pgd_list, lru) {
+-                      pgd_t *pgd;
++                      pgd_t *pgd = (pgd_t *)page_address(page);
++#endif
++
+                       pud_t *pud;
+                       pmd_t *pmd;
+-                      pgd = (pgd_t *)page_address(page) + pgd_index(address);
++                      pgd += pgd_index(address);
+                       pud = pud_offset(pgd, address);
+                       pmd = pmd_offset(pud, address);
+                       set_pte_atomic((pte_t *)pmd, pte);
+               }
+       }
+ #endif
++      pax_close_kernel();
+ }
+ static int
+@@ -711,6 +732,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ }
+ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
++                          unsigned long address) __must_hold(&cpa_lock);
++static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
+                           unsigned long address)
+ {
+       struct page *base;
+@@ -1153,6 +1176,7 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
+       }
+ }
++static int __change_page_attr(struct cpa_data *cpa, int primary) __must_hold(&cpa_lock);
+ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ {
+       unsigned long address;
+@@ -1211,7 +1235,9 @@ repeat:
+                * Do we really change anything ?
+                */
+               if (pte_val(old_pte) != pte_val(new_pte)) {
++                      pax_open_kernel();
+                       set_pte_atomic(kpte, new_pte);
++                      pax_close_kernel();
+                       cpa->flags |= CPA_FLUSHTLB;
+               }
+               cpa->numpages = 1;
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
+index 170cc4f..33d1874 100644
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -632,7 +632,7 @@ int free_memtype(u64 start, u64 end)
+       if (IS_ERR(entry)) {
+               pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+-                      current->comm, current->pid, start, end - 1);
++                      current->comm, task_pid_nr(current), start, end - 1);
+               return -EINVAL;
+       }
+@@ -804,7 +804,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
+       if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
+               pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
+-                      current->comm, current->pid,
++                      current->comm, task_pid_nr(current),
+                       cattr_name(pcm),
+                       base, (unsigned long long)(base + size-1));
+               return -EINVAL;
+@@ -839,7 +839,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+               pcm = lookup_memtype(paddr);
+               if (want_pcm != pcm) {
+                       pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
+-                              current->comm, current->pid,
++                              current->comm, task_pid_nr(current),
+                               cattr_name(want_pcm),
+                               (unsigned long long)paddr,
+                               (unsigned long long)(paddr + size - 1),
+@@ -860,7 +860,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+                   !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
+                       free_memtype(paddr, paddr + size);
+                       pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
+-                             current->comm, current->pid,
++                             current->comm, task_pid_nr(current),
+                              cattr_name(want_pcm),
+                              (unsigned long long)paddr,
+                              (unsigned long long)(paddr + size - 1),
+diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
+index de391b7..532da7a 100644
+--- a/arch/x86/mm/pat_rbtree.c
++++ b/arch/x86/mm/pat_rbtree.c
+@@ -170,7 +170,7 @@ success:
+ failure:
+       pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+-              current->comm, current->pid, start, end,
++              current->comm, task_pid_nr(current), start, end,
+               cattr_name(found_type), cattr_name(match->type));
+       return -EBUSY;
+ }
+diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
+index a235869..3aa7bdd 100644
+--- a/arch/x86/mm/pf_in.c
++++ b/arch/x86/mm/pf_in.c
+@@ -147,7 +147,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
+       int i;
+       enum reason_type rv = OTHERS;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+@@ -167,7 +167,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+@@ -190,7 +190,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+@@ -414,7 +414,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+       for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
+@@ -469,7 +469,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
+       struct prefix_bits prf;
+       int i;
+-      p = (unsigned char *)ins_addr;
++      p = (unsigned char *)ktla_ktva(ins_addr);
+       p += skip_prefix(p, &prf);
+       p += get_opcode(p, &opcode);
+       for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 3feec5a..0f77f72 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -98,10 +98,75 @@ static inline void pgd_list_del(pgd_t *pgd)
+       list_del(&page->lru);
+ }
+-#define UNSHARED_PTRS_PER_PGD                         \
+-      (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++      unsigned int count = USER_PGD_PTRS;
++      if (!pax_user_shadow_base)
++              return;
++
++      while (count--)
++              *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
++}
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++      unsigned int count = USER_PGD_PTRS;
++
++      while (count--) {
++              pgd_t pgd;
++
++#ifdef CONFIG_X86_64
++              pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
++#else
++              pgd = *src++;
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++              pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
++#endif
++
++              *dst++ = pgd;
++      }
++
++}
++#endif
++
++#ifdef CONFIG_X86_64
++#define pxd_t                         pud_t
++#define pyd_t                         pgd_t
++#define paravirt_release_pxd(pfn)     paravirt_release_pud(pfn)
++#define pgtable_pxd_page_ctor(page)   true
++#define pgtable_pxd_page_dtor(page)   do {} while (0)
++#define pxd_free(mm, pud)             pud_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud)    pgd_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address)               pgd_offset((mm), (address))
++#define PYD_SIZE                      PGDIR_SIZE
++#define mm_inc_nr_pxds(mm)            do {} while (0)
++#define mm_dec_nr_pxds(mm)            do {} while (0)
++#else
++#define pxd_t                         pmd_t
++#define pyd_t                         pud_t
++#define paravirt_release_pxd(pfn)     paravirt_release_pmd(pfn)
++#define pgtable_pxd_page_ctor(page)   pgtable_pmd_page_ctor(page)
++#define pgtable_pxd_page_dtor(page)   pgtable_pmd_page_dtor(page)
++#define pxd_free(mm, pud)             pmd_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud)    pud_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address)               pud_offset((mm), (address))
++#define PYD_SIZE                      PUD_SIZE
++#define mm_inc_nr_pxds(mm)            mm_inc_nr_pmds(mm)
++#define mm_dec_nr_pxds(mm)            mm_dec_nr_pmds(mm)
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
++static inline void pgd_dtor(pgd_t *pgd) {}
++#else
+ static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
+ {
+       BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
+@@ -142,6 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
+       pgd_list_del(pgd);
+       spin_unlock(&pgd_lock);
+ }
++#endif
+ /*
+  * List of all pgd's needed for non-PAE so it can invalidate entries
+@@ -154,7 +220,7 @@ static void pgd_dtor(pgd_t *pgd)
+  * -- nyc
+  */
+-#ifdef CONFIG_X86_PAE
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+ /*
+  * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+  * updating the top-level pagetable entries to guarantee the
+@@ -166,7 +232,7 @@ static void pgd_dtor(pgd_t *pgd)
+  * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+  * and initialize the kernel pmds here.
+  */
+-#define PREALLOCATED_PMDS     UNSHARED_PTRS_PER_PGD
++#define PREALLOCATED_PXDS     (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+ {
+@@ -184,26 +250,28 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+        */
+       flush_tlb_mm(mm);
+ }
++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
++#define PREALLOCATED_PXDS     USER_PGD_PTRS
+ #else  /* !CONFIG_X86_PAE */
+ /* No need to prepopulate any pagetable entries in non-PAE modes. */
+-#define PREALLOCATED_PMDS     0
++#define PREALLOCATED_PXDS     0
+ #endif        /* CONFIG_X86_PAE */
+-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
++static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
+ {
+       int i;
+-      for(i = 0; i < PREALLOCATED_PMDS; i++)
+-              if (pmds[i]) {
+-                      pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
+-                      free_page((unsigned long)pmds[i]);
+-                      mm_dec_nr_pmds(mm);
++      for(i = 0; i < PREALLOCATED_PXDS; i++)
++              if (pxds[i]) {
++                      pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
++                      free_page((unsigned long)pxds[i]);
++                      mm_dec_nr_pxds(mm);
+               }
+ }
+-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
++static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
+ {
+       int i;
+       bool failed = false;
+@@ -212,22 +280,22 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
+       if (mm == &init_mm)
+               gfp &= ~__GFP_ACCOUNT;
+-      for(i = 0; i < PREALLOCATED_PMDS; i++) {
+-              pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
+-              if (!pmd)
++      for(i = 0; i < PREALLOCATED_PXDS; i++) {
++              pxd_t *pxd = (pxd_t *)__get_free_page(gfp);
++              if (!pxd)
+                       failed = true;
+-              if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
+-                      free_page((unsigned long)pmd);
+-                      pmd = NULL;
++              if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
++                      free_page((unsigned long)pxd);
++                      pxd = NULL;
+                       failed = true;
+               }
+-              if (pmd)
+-                      mm_inc_nr_pmds(mm);
+-              pmds[i] = pmd;
++              if (pxd)
++                      mm_inc_nr_pxds(mm);
++              pxds[i] = pxd;
+       }
+       if (failed) {
+-              free_pmds(mm, pmds);
++              free_pxds(mm, pxds);
+               return -ENOMEM;
+       }
+@@ -240,43 +308,47 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
+  * preallocate which never got a corresponding vma will need to be
+  * freed manually.
+  */
+-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
+ {
+       int i;
+-      for(i = 0; i < PREALLOCATED_PMDS; i++) {
++      for(i = 0; i < PREALLOCATED_PXDS; i++) {
+               pgd_t pgd = pgdp[i];
+               if (pgd_val(pgd) != 0) {
+-                      pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
++                      pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
+-                      pgdp[i] = native_make_pgd(0);
++                      set_pgd(pgdp + i, native_make_pgd(0));
+-                      paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+-                      pmd_free(mm, pmd);
+-                      mm_dec_nr_pmds(mm);
++                      paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
++                      pxd_free(mm, pxd);
++                      mm_dec_nr_pxds(mm);
+               }
+       }
+ }
+-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
+ {
+-      pud_t *pud;
++      pyd_t *pyd;
+       int i;
+-      if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
++      if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
+               return;
+-      pud = pud_offset(pgd, 0);
++#ifdef CONFIG_X86_64
++      pyd = pyd_offset(mm, 0L);
++#else
++      pyd = pyd_offset(pgd, 0L);
++#endif
+-      for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
+-              pmd_t *pmd = pmds[i];
++      for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
++              pxd_t *pxd = pxds[i];
+               if (i >= KERNEL_PGD_BOUNDARY)
+-                      memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+-                             sizeof(pmd_t) * PTRS_PER_PMD);
++                      memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
++                             sizeof(pxd_t) * PTRS_PER_PMD);
+-              pud_populate(mm, pud, pmd);
++              pyd_populate(mm, pyd, pxd);
+       }
+ }
+@@ -358,7 +430,7 @@ static inline void _pgd_free(pgd_t *pgd)
+ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+       pgd_t *pgd;
+-      pmd_t *pmds[PREALLOCATED_PMDS];
++      pxd_t *pxds[PREALLOCATED_PXDS];
+       pgd = _pgd_alloc();
+@@ -367,11 +439,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+       mm->pgd = pgd;
+-      if (preallocate_pmds(mm, pmds) != 0)
++      if (preallocate_pxds(mm, pxds) != 0)
+               goto out_free_pgd;
+       if (paravirt_pgd_alloc(mm) != 0)
+-              goto out_free_pmds;
++              goto out_free_pxds;
+       /*
+        * Make sure that pre-populating the pmds is atomic with
+@@ -381,14 +453,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+       spin_lock(&pgd_lock);
+       pgd_ctor(mm, pgd);
+-      pgd_prepopulate_pmd(mm, pgd, pmds);
++      pgd_prepopulate_pxd(mm, pgd, pxds);
+       spin_unlock(&pgd_lock);
+       return pgd;
+-out_free_pmds:
+-      free_pmds(mm, pmds);
++out_free_pxds:
++      free_pxds(mm, pxds);
+ out_free_pgd:
+       _pgd_free(pgd);
+ out:
+@@ -397,7 +469,7 @@ out:
+ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ {
+-      pgd_mop_up_pmds(mm, pgd);
++      pgd_mop_up_pxds(mm, pgd);
+       pgd_dtor(pgd);
+       paravirt_pgd_free(mm, pgd);
+       _pgd_free(pgd);
+@@ -530,6 +602,50 @@ void __init reserve_top_address(unsigned long reserve)
+ int fixmaps_set;
++static void fix_user_fixmap(enum fixed_addresses idx, unsigned long address)
++{
++#ifdef CONFIG_X86_64
++      pgd_t *pgd;
++      pud_t *pud;
++      pmd_t *pmd;
++
++      switch (idx) {
++      default:
++              return;
++
++#ifdef CONFIG_X86_VSYSCALL_EMULATION
++      case VSYSCALL_PAGE:
++              break;
++#endif
++      }
++
++      pgd = pgd_offset_k(address);
++      if (!(pgd_val(*pgd) & _PAGE_USER)) {
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              unsigned int cpu;
++              pgd_t *pgd_cpu;
++
++              for_each_possible_cpu(cpu) {
++                      pgd_cpu = pgd_offset_cpu(cpu, kernel, address);
++                      set_pgd(pgd_cpu, __pgd(pgd_val(*pgd_cpu) | _PAGE_USER));
++
++                      pgd_cpu = pgd_offset_cpu(cpu, user, address);
++                      set_pgd(pgd_cpu, __pgd(pgd_val(*pgd_cpu) | _PAGE_USER));
++              }
++#endif
++              set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
++      }
++
++      pud = pud_offset(pgd, address);
++      if (!(pud_val(*pud) & _PAGE_USER))
++              set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
++
++      pmd = pmd_offset(pud, address);
++      if (!(pmd_val(*pmd) & _PAGE_USER))
++              set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
++#endif
++}
++
+ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
+ {
+       unsigned long address = __fix_to_virt(idx);
+@@ -540,9 +656,10 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
+       }
+       set_pte_vaddr(address, pte);
+       fixmaps_set++;
++      fix_user_fixmap(idx, address);
+ }
+-void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
++void native_set_fixmap(unsigned int idx, phys_addr_t phys,
+                      pgprot_t flags)
+ {
+       __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
+@@ -606,9 +723,11 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+       prot = pgprot_4k_2_large(prot);
++      pax_open_kernel();
+       set_pte((pte_t *)pmd, pfn_pte(
+               (u64)addr >> PAGE_SHIFT,
+               __pgprot(pgprot_val(prot) | _PAGE_PSE)));
++      pax_close_kernel();
+       return 1;
+ }
+diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
+index 9adce77..b698e8be 100644
+--- a/arch/x86/mm/pgtable_32.c
++++ b/arch/x86/mm/pgtable_32.c
+@@ -46,10 +46,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
+               return;
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
++
++      pax_open_kernel();
+       if (!pte_none(pteval))
+               set_pte_at(&init_mm, vaddr, pte, pteval);
+       else
+               pte_clear(&init_mm, vaddr, pte);
++      pax_close_kernel();
+       /*
+        * It's enough to flush this one mapping.
+diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
+index f65a33f..f408a99 100644
+--- a/arch/x86/mm/setup_nx.c
++++ b/arch/x86/mm/setup_nx.c
+@@ -6,8 +6,10 @@
+ #include <asm/proto.h>
+ #include <asm/cpufeature.h>
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ static int disable_nx;
++#ifndef CONFIG_PAX_PAGEEXEC
+ /*
+  * noexec = on|off
+  *
+@@ -29,12 +31,17 @@ static int __init noexec_setup(char *str)
+       return 0;
+ }
+ early_param("noexec", noexec_setup);
++#endif
++
++#endif
+ void x86_configure_nx(void)
+ {
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+       if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+               __supported_pte_mask |= _PAGE_NX;
+       else
++#endif
+               __supported_pte_mask &= ~_PAGE_NX;
+ }
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 4dbe656..b298320 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -47,7 +47,11 @@ void leave_mm(int cpu)
+               BUG();
+       if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
+               cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+               load_cr3(swapper_pg_dir);
++#endif
++
+               /*
+                * This gets called in the idle path where RCU
+                * functions differently.  Tracing normally
+@@ -61,6 +65,51 @@ EXPORT_SYMBOL_GPL(leave_mm);
+ #endif /* CONFIG_SMP */
++static void pax_switch_mm(struct mm_struct *next, unsigned int cpu)
++{
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      pax_open_kernel();
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (static_cpu_has(X86_FEATURE_PCIDUDEREF))
++              __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
++      else
++#endif
++
++              __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
++
++      __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
++
++      pax_close_kernel();
++
++      BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++      if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
++              if (static_cpu_has(X86_FEATURE_INVPCID)) {
++                      u64 descriptor[2];
++                      descriptor[0] = PCID_USER;
++                      asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
++                      if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
++                              descriptor[0] = PCID_KERNEL;
++                              asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
++                      }
++              } else {
++                      write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
++                      if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
++                              write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++                      else
++                              write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
++              }
++      } else
++#endif
++
++              load_cr3(get_cpu_pgd(cpu, kernel));
++#endif
++
++}
++
+ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+              struct task_struct *tsk)
+ {
+@@ -75,9 +124,15 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+                       struct task_struct *tsk)
+ {
+       unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++      int tlbstate = TLBSTATE_OK;
++#endif
+       if (likely(prev != next)) {
+ #ifdef CONFIG_SMP
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++              tlbstate = this_cpu_read(cpu_tlbstate.state);
++#endif
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               this_cpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+@@ -96,7 +151,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+                * We need to prevent an outcome in which CPU 1 observes
+                * the new PTE value and CPU 0 observes bit 1 clear in
+                * mm_cpumask.  (If that occurs, then the IPI will never
+-               * be sent, and CPU 0's TLB will contain a stale entry.)
++               * be sent, and CPU 1's TLB will contain a stale entry.)
+                *
+                * The bad outcome can occur if either CPU's load is
+                * reordered before that CPU's store, so both CPUs must
+@@ -111,7 +166,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+                * ordering guarantee we need.
+                *
+                */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++              pax_switch_mm(next, cpu);
++#else
+               load_cr3(next->pgd);
++#endif
+               trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+@@ -137,9 +196,31 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_mm_ldt(next);
+ #endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++              if (!(__supported_pte_mask & _PAGE_NX)) {
++                      smp_mb__before_atomic();
++                      cpumask_clear_cpu(cpu, &prev->context.cpu_user_cs_mask);
++                      smp_mb__after_atomic();
++                      cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
++              }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++              if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++                           prev->context.user_cs_limit != next->context.user_cs_limit))
++                      set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#ifdef CONFIG_SMP
++              else if (unlikely(tlbstate != TLBSTATE_OK))
++                      set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
+       }
++      else {
++              pax_switch_mm(next, cpu);
++
+ #ifdef CONFIG_SMP
+-        else {
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
+@@ -160,13 +241,30 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+                        * As above, load_cr3() is serializing and orders TLB
+                        * fills with respect to the mm_cpumask write.
+                        */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+                       load_cr3(next->pgd);
+                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
++#endif
++
+                       load_mm_cr4(next);
+                       load_mm_ldt(next);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++                      if (!(__supported_pte_mask & _PAGE_NX))
++                              cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++                      if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
++#endif
++                              set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+               }
++#endif
+       }
+-#endif
+ }
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
+new file mode 100644
+index 0000000..3fda3f3
+--- /dev/null
++++ b/arch/x86/mm/uderef_64.c
+@@ -0,0 +1,37 @@
++#include <linux/mm.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++/* PaX: due to the special call convention these functions must
++ * - remain leaf functions under all configurations,
++ * - never be called directly, only dereferenced from the wrappers.
++ */
++void __used __pax_open_userland(void)
++{
++      unsigned int cpu;
++
++      if (unlikely(!segment_eq(get_fs(), USER_DS)))
++              return;
++
++      cpu = raw_get_cpu();
++      BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
++      write_cr3(__pa_nodebug(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
++      raw_put_cpu_no_resched();
++}
++EXPORT_SYMBOL(__pax_open_userland);
++
++void __used __pax_close_userland(void)
++{
++      unsigned int cpu;
++
++      if (unlikely(!segment_eq(get_fs(), USER_DS)))
++              return;
++
++      cpu = raw_get_cpu();
++      BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
++      write_cr3(__pa_nodebug(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
++      raw_put_cpu_no_resched();
++}
++EXPORT_SYMBOL(__pax_close_userland);
++#endif
+diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
+index f2a7faf..b77bb6c 100644
+--- a/arch/x86/net/bpf_jit.S
++++ b/arch/x86/net/bpf_jit.S
+@@ -9,6 +9,7 @@
+  */
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ /*
+  * Calling convention :
+@@ -39,6 +40,7 @@ FUNC(sk_load_word_positive_offset)
+       jle     bpf_slow_path_word
+       mov     (SKBDATA,%rsi),%eax
+       bswap   %eax                    /* ntohl() */
++      pax_force_retaddr
+       ret
+ FUNC(sk_load_half)
+@@ -52,6 +54,7 @@ FUNC(sk_load_half_positive_offset)
+       jle     bpf_slow_path_half
+       movzwl  (SKBDATA,%rsi),%eax
+       rol     $8,%ax                  # ntohs()
++      pax_force_retaddr
+       ret
+ FUNC(sk_load_byte)
+@@ -62,6 +65,7 @@ FUNC(sk_load_byte_positive_offset)
+       cmp     %esi,%r9d   /* if (offset >= hlen) goto bpf_slow_path_byte */
+       jle     bpf_slow_path_byte
+       movzbl  (SKBDATA,%rsi),%eax
++      pax_force_retaddr
+       ret
+ /* rsi contains offset and can be scratched */
+@@ -85,6 +89,7 @@ bpf_slow_path_word:
+       js      bpf_error
+       mov     - MAX_BPF_STACK + 32(%rbp),%eax
+       bswap   %eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_half:
+@@ -93,12 +98,14 @@ bpf_slow_path_half:
+       mov     - MAX_BPF_STACK + 32(%rbp),%ax
+       rol     $8,%ax
+       movzwl  %ax,%eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_byte:
+       bpf_slow_path_common(1)
+       js      bpf_error
+       movzbl  - MAX_BPF_STACK + 32(%rbp),%eax
++      pax_force_retaddr
+       ret
+ #define sk_negative_common(SIZE)                              \
+@@ -123,6 +130,7 @@ FUNC(sk_load_word_negative_offset)
+       sk_negative_common(4)
+       mov     (%rax), %eax
+       bswap   %eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_half_neg:
+@@ -134,6 +142,7 @@ FUNC(sk_load_half_negative_offset)
+       mov     (%rax),%ax
+       rol     $8,%ax
+       movzwl  %ax,%eax
++      pax_force_retaddr
+       ret
+ bpf_slow_path_byte_neg:
+@@ -143,6 +152,7 @@ bpf_slow_path_byte_neg:
+ FUNC(sk_load_byte_negative_offset)
+       sk_negative_common(1)
+       movzbl  (%rax), %eax
++      pax_force_retaddr
+       ret
+ bpf_error:
+@@ -153,4 +163,5 @@ bpf_error:
+       mov     - MAX_BPF_STACK + 16(%rbp),%r14
+       mov     - MAX_BPF_STACK + 24(%rbp),%r15
+       leaveq
++      pax_force_retaddr
+       ret
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index fe04a04..99be1fa 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -14,7 +14,11 @@
+ #include <asm/cacheflush.h>
+ #include <linux/bpf.h>
++#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
++int bpf_jit_enable __read_only;
++#else
+ int bpf_jit_enable __read_mostly;
++#endif
+ /*
+  * assembly code in arch/x86/net/bpf_jit.S
+@@ -183,7 +187,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
+ static void jit_fill_hole(void *area, unsigned int size)
+ {
+       /* fill whole space with int3 instructions */
++      pax_open_kernel();
+       memset(area, 0xcc, size);
++      pax_close_kernel();
+ }
+ struct jit_context {
+@@ -1076,7 +1082,9 @@ common_load:
+                               pr_err("bpf_jit_compile fatal error\n");
+                               return -EFAULT;
+                       }
++                      pax_open_kernel();
+                       memcpy(image + proglen, temp, ilen);
++                      pax_close_kernel();
+               }
+               proglen += ilen;
+               addrs[i] = proglen;
+@@ -1169,7 +1177,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+       if (image) {
+               bpf_flush_icache(header, image + proglen);
+-              set_memory_ro((unsigned long)header, header->pages);
+               prog->bpf_func = (void *)image;
+               prog->jited = 1;
+       }
+@@ -1188,12 +1195,8 @@ void bpf_jit_free(struct bpf_prog *fp)
+       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+       struct bpf_binary_header *header = (void *)addr;
+-      if (!fp->jited)
+-              goto free_filter;
++      if (fp->jited)
++              bpf_jit_binary_free(header);
+-      set_memory_rw(addr, header->pages);
+-      bpf_jit_binary_free(header);
+-
+-free_filter:
+       bpf_prog_unlock_free(fp);
+ }
+diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
+index cb31a44..b942435 100644
+--- a/arch/x86/oprofile/backtrace.c
++++ b/arch/x86/oprofile/backtrace.c
+@@ -47,11 +47,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
+       struct stack_frame_ia32 *fp;
+       unsigned long bytes;
+-      bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++      bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+       if (bytes != 0)
+               return NULL;
+-      fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
++      fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
+       oprofile_add_trace(bufhead[0].return_address);
+@@ -93,7 +93,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
+       struct stack_frame bufhead[2];
+       unsigned long bytes;
+-      bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++      bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+       if (bytes != 0)
+               return NULL;
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 28c0412..568d0a4 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -23,6 +23,7 @@
+ #include <asm/nmi.h>
+ #include <asm/msr.h>
+ #include <asm/apic.h>
++#include <asm/pgtable.h>
+ #include "op_counter.h"
+ #include "op_x86_model.h"
+@@ -615,7 +616,7 @@ enum __force_cpu_type {
+ static int force_cpu_type;
+-static int set_cpu_type(const char *str, struct kernel_param *kp)
++static int set_cpu_type(const char *str, const struct kernel_param *kp)
+ {
+       if (!strcmp(str, "timer")) {
+               force_cpu_type = timer;
+@@ -786,8 +787,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
+       if (ret)
+               return ret;
+-      if (!model->num_virt_counters)
+-              model->num_virt_counters = model->num_counters;
++      if (!model->num_virt_counters) {
++              pax_open_kernel();
++              const_cast(model->num_virt_counters) = model->num_counters;
++              pax_close_kernel();
++      }
+       mux_init(ops);
+diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
+index 660a83c..6ff762b 100644
+--- a/arch/x86/oprofile/op_model_amd.c
++++ b/arch/x86/oprofile/op_model_amd.c
+@@ -518,9 +518,11 @@ static int op_amd_init(struct oprofile_operations *ops)
+               num_counters = AMD64_NUM_COUNTERS;
+       }
+-      op_amd_spec.num_counters = num_counters;
+-      op_amd_spec.num_controls = num_counters;
+-      op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
++      pax_open_kernel();
++      const_cast(op_amd_spec.num_counters) = num_counters;
++      const_cast(op_amd_spec.num_controls) = num_counters;
++      const_cast(op_amd_spec.num_virt_counters) = max(num_counters, NUM_VIRT_COUNTERS);
++      pax_close_kernel();
+       return 0;
+ }
+diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
+index 350f709..77882e0 100644
+--- a/arch/x86/oprofile/op_model_ppro.c
++++ b/arch/x86/oprofile/op_model_ppro.c
+@@ -19,6 +19,7 @@
+ #include <asm/msr.h>
+ #include <asm/apic.h>
+ #include <asm/nmi.h>
++#include <asm/pgtable.h>
+ #include "op_x86_model.h"
+ #include "op_counter.h"
+@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
+       num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
+-      op_arch_perfmon_spec.num_counters = num_counters;
+-      op_arch_perfmon_spec.num_controls = num_counters;
++      pax_open_kernel();
++      const_cast(op_arch_perfmon_spec.num_counters) = num_counters;
++      const_cast(op_arch_perfmon_spec.num_controls) = num_counters;
++      pax_close_kernel();
+ }
+ static int arch_perfmon_init(struct oprofile_operations *ignore)
+diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
+index 71e8a67..6a313bb 100644
+--- a/arch/x86/oprofile/op_x86_model.h
++++ b/arch/x86/oprofile/op_x86_model.h
+@@ -52,7 +52,7 @@ struct op_x86_model_spec {
+       void            (*switch_ctrl)(struct op_x86_model_spec const *model,
+                                      struct op_msrs const * const msrs);
+ #endif
+-};
++} __do_const;
+ struct op_counter_config;
+diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
+index 5a18aed..22eac20 100644
+--- a/arch/x86/pci/intel_mid_pci.c
++++ b/arch/x86/pci/intel_mid_pci.c
+@@ -288,7 +288,7 @@ int __init intel_mid_pci_init(void)
+       pci_mmcfg_late_init();
+       pcibios_enable_irq = intel_mid_pci_irq_enable;
+       pcibios_disable_irq = intel_mid_pci_irq_disable;
+-      pci_root_ops = intel_mid_pci_ops;
++      memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
+       pci_soc_mode = 1;
+       /* Continue with standard init */
+       return 1;
+diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
+index 9bd1154..e9d4656 100644
+--- a/arch/x86/pci/irq.c
++++ b/arch/x86/pci/irq.c
+@@ -51,7 +51,7 @@ struct irq_router {
+ struct irq_router_handler {
+       u16 vendor;
+       int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
+-};
++} __do_const;
+ int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
+ void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
+@@ -792,7 +792,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
+       return 0;
+ }
+-static __initdata struct irq_router_handler pirq_routers[] = {
++static __initconst const struct irq_router_handler pirq_routers[] = {
+       { PCI_VENDOR_ID_INTEL, intel_router_probe },
+       { PCI_VENDOR_ID_AL, ali_router_probe },
+       { PCI_VENDOR_ID_ITE, ite_router_probe },
+@@ -819,7 +819,7 @@ static struct pci_dev *pirq_router_dev;
+ static void __init pirq_find_router(struct irq_router *r)
+ {
+       struct irq_routing_table *rt = pirq_table;
+-      struct irq_router_handler *h;
++      const struct irq_router_handler *h;
+ #ifdef CONFIG_PCI_BIOS
+       if (!rt->signature) {
+@@ -1092,7 +1092,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
++static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
+       {
+               .callback = fix_broken_hp_bios_irq9,
+               .ident = "HP Pavilion N5400 Series Laptop",
+diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
+index 9770e55..76067ec 100644
+--- a/arch/x86/pci/pcbios.c
++++ b/arch/x86/pci/pcbios.c
+@@ -79,7 +79,7 @@ union bios32 {
+ static struct {
+       unsigned long address;
+       unsigned short segment;
+-} bios32_indirect __initdata = { 0, __KERNEL_CS };
++} bios32_indirect __initdata = { 0, __PCIBIOS_CS };
+ /*
+  * Returns the entry point for the given service, NULL on error
+@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
+       unsigned long length;           /* %ecx */
+       unsigned long entry;            /* %edx */
+       unsigned long flags;
++      struct desc_struct d, *gdt;
+       local_irq_save(flags);
+-      __asm__("lcall *(%%edi); cld"
++
++      gdt = get_cpu_gdt_table(smp_processor_id());
++
++      pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
++      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++      pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
++      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++
++      __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
+               : "=a" (return_code),
+                 "=b" (address),
+                 "=c" (length),
+                 "=d" (entry)
+               : "0" (service),
+                 "1" (0),
+-                "D" (&bios32_indirect));
++                "D" (&bios32_indirect),
++                "r"(__PCIBIOS_DS)
++              : "memory");
++
++      pax_open_kernel();
++      gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
++      gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
++      gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
++      gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
++      pax_close_kernel();
++
+       local_irq_restore(flags);
+       switch (return_code) {
+-              case 0:
+-                      return address + entry;
+-              case 0x80:      /* Not present */
+-                      printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+-                      return 0;
+-              default: /* Shouldn't happen */
+-                      printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+-                              service, return_code);
++      case 0: {
++              int cpu;
++              unsigned char flags;
++
++              printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
++              if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
++                      printk(KERN_WARNING "bios32_service: not valid\n");
+                       return 0;
++              }
++              address = address + PAGE_OFFSET;
++              length += 16UL; /* some BIOSs underreport this... */
++              flags = 4;
++              if (length >= 64*1024*1024) {
++                      length >>= PAGE_SHIFT;
++                      flags |= 8;
++              }
++
++              for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++                      gdt = get_cpu_gdt_table(cpu);
++                      pack_descriptor(&d, address, length, 0x9b, flags);
++                      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++                      pack_descriptor(&d, address, length, 0x93, flags);
++                      write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++              }
++              return entry;
++      }
++      case 0x80:      /* Not present */
++              printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
++              return 0;
++      default: /* Shouldn't happen */
++              printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
++                      service, return_code);
++              return 0;
+       }
+ }
+ static struct {
+       unsigned long address;
+       unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
+-static int pci_bios_present;
++static int pci_bios_present __read_only;
+ static int __init check_pcibios(void)
+ {
+@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
+       unsigned long flags, pcibios_entry;
+       if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+-              pci_indirect.address = pcibios_entry + PAGE_OFFSET;
++              pci_indirect.address = pcibios_entry;
+               local_irq_save(flags);
+-              __asm__(
+-                      "lcall *(%%edi); cld\n\t"
++              __asm__("movw %w6, %%ds\n\t"
++                      "lcall *%%ss:(%%edi); cld\n\t"
++                      "push %%ss\n\t"
++                      "pop %%ds\n\t"
+                       "jc 1f\n\t"
+                       "xor %%ah, %%ah\n"
+                       "1:"
+@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
+                         "=b" (ebx),
+                         "=c" (ecx)
+                       : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+-                        "D" (&pci_indirect)
++                        "D" (&pci_indirect),
++                        "r" (__PCIBIOS_DS)
+                       : "memory");
+               local_irq_restore(flags);
+@@ -202,7 +248,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+               break;
+       }
+-      __asm__("lcall *(%%esi); cld\n\t"
++      __asm__("movw %w6, %%ds\n\t"
++              "lcall *%%ss:(%%esi); cld\n\t"
++              "push %%ss\n\t"
++              "pop %%ds\n\t"
+               "jc 1f\n\t"
+               "xor %%ah, %%ah\n"
+               "1:"
+@@ -211,7 +260,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+               : "1" (number),
+                 "b" (bx),
+                 "D" ((long)reg),
+-                "S" (&pci_indirect));
++                "S" (&pci_indirect),
++                "r" (__PCIBIOS_DS));
+       /*
+        * Zero-extend the result beyond 8 or 16 bits, do not trust the
+        * BIOS having done it:
+@@ -250,7 +300,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+               break;
+       }
+-      __asm__("lcall *(%%esi); cld\n\t"
++      __asm__("movw %w6, %%ds\n\t"
++              "lcall *%%ss:(%%esi); cld\n\t"
++              "push %%ss\n\t"
++              "pop %%ds\n\t"
+               "jc 1f\n\t"
+               "xor %%ah, %%ah\n"
+               "1:"
+@@ -259,7 +312,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+                 "c" (value),
+                 "b" (bx),
+                 "D" ((long)reg),
+-                "S" (&pci_indirect));
++                "S" (&pci_indirect),
++                "r" (__PCIBIOS_DS));
+       raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+@@ -362,10 +416,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
+       DBG("PCI: Fetching IRQ routing table... ");
+       __asm__("push %%es\n\t"
++              "movw %w8, %%ds\n\t"
+               "push %%ds\n\t"
+               "pop  %%es\n\t"
+-              "lcall *(%%esi); cld\n\t"
++              "lcall *%%ss:(%%esi); cld\n\t"
+               "pop %%es\n\t"
++              "push %%ss\n\t"
++              "pop %%ds\n"
+               "jc 1f\n\t"
+               "xor %%ah, %%ah\n"
+               "1:"
+@@ -376,7 +433,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
+                 "1" (0),
+                 "D" ((long) &opt),
+                 "S" (&pci_indirect),
+-                "m" (opt)
++                "m" (opt),
++                "r" (__PCIBIOS_DS)
+               : "memory");
+       DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
+       if (ret & 0xff00)
+@@ -400,7 +458,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
+ {
+       int ret;
+-      __asm__("lcall *(%%esi); cld\n\t"
++      __asm__("movw %w5, %%ds\n\t"
++              "lcall *%%ss:(%%esi); cld\n\t"
++              "push %%ss\n\t"
++              "pop %%ds\n"
+               "jc 1f\n\t"
+               "xor %%ah, %%ah\n"
+               "1:"
+@@ -408,7 +469,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
+               : "0" (PCIBIOS_SET_PCI_HW_INT),
+                 "b" ((dev->bus->number << 8) | dev->devfn),
+                 "c" ((irq << 8) | (pin + 10)),
+-                "S" (&pci_indirect));
++                "S" (&pci_indirect),
++                "r" (__PCIBIOS_DS));
+       return !(ret & 0xff00);
+ }
+ EXPORT_SYMBOL(pcibios_set_irq_routing);
+diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c
+index 7948be3..73a1aaa 100644
+--- a/arch/x86/pci/vmd.c
++++ b/arch/x86/pci/vmd.c
+@@ -389,7 +389,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
+ #define ASSIGN_VMD_DMA_OPS(source, dest, fn)  \
+       do {                                    \
+               if (source->fn)                 \
+-                      dest->fn = vmd_##fn;    \
++                      const_cast(dest->fn) = vmd_##fn;        \
+       } while (0)
+ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
+@@ -403,6 +403,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
+       if (!source)
+               return;
++      pax_open_kernel();
+       ASSIGN_VMD_DMA_OPS(source, dest, alloc);
+       ASSIGN_VMD_DMA_OPS(source, dest, free);
+       ASSIGN_VMD_DMA_OPS(source, dest, mmap);
+@@ -420,6 +421,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
+ #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+       ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
+ #endif
++      pax_close_kernel();
+       add_dma_domain(domain);
+ }
+ #undef ASSIGN_VMD_DMA_OPS
+diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
+index cef39b0..0e5aebe 100644
+--- a/arch/x86/platform/efi/efi_32.c
++++ b/arch/x86/platform/efi/efi_32.c
+@@ -63,11 +63,27 @@ pgd_t * __init efi_call_phys_prolog(void)
+       struct desc_ptr gdt_descr;
+       pgd_t *save_pgd;
++#ifdef CONFIG_PAX_KERNEXEC
++      struct desc_struct d;
++#endif
++
+       /* Current pgd is swapper_pg_dir, we'll restore it later: */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      save_pgd = get_cpu_pgd(smp_processor_id(), kernel);
++#else
+       save_pgd = swapper_pg_dir;
++#endif
++
+       load_cr3(initial_page_table);
+       __flush_tlb_all();
++#ifdef CONFIG_PAX_KERNEXEC
++      pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++      pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+       gdt_descr.address = __pa(get_cpu_gdt_table(0));
+       gdt_descr.size = GDT_SIZE - 1;
+       load_gdt(&gdt_descr);
+@@ -79,6 +95,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
+ {
+       struct desc_ptr gdt_descr;
++#ifdef CONFIG_PAX_KERNEXEC
++      struct desc_struct d;
++
++      memset(&d, 0, sizeof d);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++      write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+       gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
+       gdt_descr.size = GDT_SIZE - 1;
+       load_gdt(&gdt_descr);
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 8dd3784..9846546 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -92,6 +92,11 @@ pgd_t * __init efi_call_phys_prolog(void)
+               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+       }
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      load_cr3(swapper_pg_dir);
++#endif
++
+ out:
+       __flush_tlb_all();
+@@ -119,6 +124,10 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
+       kfree(save_pgd);
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
++#endif
++
+       __flush_tlb_all();
+       early_code_mapping_set_exec(0);
+ }
+@@ -219,8 +228,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+       unsigned npages;
+       pgd_t *pgd;
+-      if (efi_enabled(EFI_OLD_MEMMAP))
++      if (efi_enabled(EFI_OLD_MEMMAP)) {
++              /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
++               * able to execute the EFI services.
++               */
++              if (__supported_pte_mask & _PAGE_NX) {
++                      unsigned long addr = (unsigned long) __va(0);
++                      pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) &  ~_PAGE_NX);
++
++                      pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
++#ifdef CONFIG_PAX_PER_CPU_PGD
++                      set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
++#endif
++                      set_pgd(pgd_offset_k(addr), pe);
++              }
++
+               return 0;
++      }
+       efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+       pgd = efi_pgd;
+diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
+index 040192b..7d3300f 100644
+--- a/arch/x86/platform/efi/efi_stub_32.S
++++ b/arch/x86/platform/efi/efi_stub_32.S
+@@ -6,7 +6,9 @@
+  */
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
++#include <asm/segment.h>
+ /*
+  * efi_call_phys(void *, ...) is a function with variable parameters.
+@@ -20,7 +22,7 @@
+  * service functions will comply with gcc calling convention, too.
+  */
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+       /*
+        * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
+        * The mapping of lower virtual memory has been created in prolog and
+        * epilog.
+        */
+-      movl    $1f, %edx
+-      subl    $__PAGE_OFFSET, %edx
+-      jmp     *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++      movl    $(__KERNEXEC_EFI_DS), %edx
++      mov     %edx, %ds
++      mov     %edx, %es
++      mov     %edx, %ss
++      addl    $2f,(1f)
++      ljmp    *(1f)
++
++__INITDATA
++1:    .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
++.previous
++
++2:
++      subl    $2b,(1b)
++#else
++      jmp     1f-__PAGE_OFFSET
+ 1:
++#endif
+       /*
+        * 2. Now on the top of stack is the return
+@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
+        * parameter 2, ..., param n. To make things easy, we save the return
+        * address of efi_call_phys in a global variable.
+        */
+-      popl    %edx
+-      movl    %edx, saved_return_addr
+-      /* get the function pointer into ECX*/
+-      popl    %ecx
+-      movl    %ecx, efi_rt_function_ptr
+-      movl    $2f, %edx
+-      subl    $__PAGE_OFFSET, %edx
+-      pushl   %edx
++      popl    (saved_return_addr)
++      popl    (efi_rt_function_ptr)
+       /*
+        * 3. Clear PG bit in %CR0.
+@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
+       /*
+        * 5. Call the physical function.
+        */
+-      jmp     *%ecx
++      call    *(efi_rt_function_ptr-__PAGE_OFFSET)
+-2:
+       /*
+        * 6. After EFI runtime service returns, control will return to
+        * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
+       movl    %cr0, %edx
+       orl     $0x80000000, %edx
+       movl    %edx, %cr0
+-      jmp     1f
+-1:
++
+       /*
+        * 8. Now restore the virtual mode from flat mode by
+        * adding EIP with PAGE_OFFSET.
+        */
+-      movl    $1f, %edx
+-      jmp     *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++      movl    $(__KERNEL_DS), %edx
++      mov     %edx, %ds
++      mov     %edx, %es
++      mov     %edx, %ss
++      ljmp    $(__KERNEL_CS),$1f
++#else
++      jmp     1f+__PAGE_OFFSET
++#endif
+ 1:
+       /*
+        * 9. Balance the stack. And because EAX contain the return value,
+        * we'd better not clobber it.
+        */
+-      leal    efi_rt_function_ptr, %edx
+-      movl    (%edx), %ecx
+-      pushl   %ecx
++      pushl   (efi_rt_function_ptr)
+       /*
+-       * 10. Push the saved return address onto the stack and return.
++       * 10. Return to the saved return address.
+        */
+-      leal    saved_return_addr, %edx
+-      movl    (%edx), %ecx
+-      pushl   %ecx
+-      ret
++      jmpl    *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+-.data
++__INITDATA
+ saved_return_addr:
+       .long 0
+ efi_rt_function_ptr:
+diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
+index cd95075..a7b6d47 100644
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -11,6 +11,7 @@
+ #include <asm/msr.h>
+ #include <asm/processor-flags.h>
+ #include <asm/page_types.h>
++#include <asm/alternative-asm.h>
+ #define SAVE_XMM                      \
+       mov %rsp, %rax;                 \
+@@ -53,5 +54,6 @@ ENTRY(efi_call)
+       addq $48, %rsp
+       RESTORE_XMM
+       popq %rbp
++      pax_force_retaddr 0, 1
+       ret
+ ENDPROC(efi_call)
+diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
+index ce119d2..42159d9 100644
+--- a/arch/x86/platform/intel-mid/intel-mid.c
++++ b/arch/x86/platform/intel-mid/intel-mid.c
+@@ -62,9 +62,9 @@
+ enum intel_mid_timer_options intel_mid_timer_options;
+ /* intel_mid_ops to store sub arch ops */
+-static struct intel_mid_ops *intel_mid_ops;
++static const struct intel_mid_ops *intel_mid_ops;
+ /* getter function for sub arch ops*/
+-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
++static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
+ enum intel_mid_cpu_type __intel_mid_cpu_chip;
+ EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
+@@ -72,9 +72,10 @@ static void intel_mid_power_off(void)
+ {
+ };
+-static void intel_mid_reboot(void)
++static void __noreturn intel_mid_reboot(void)
+ {
+       intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
++      BUG();
+ }
+ static unsigned long __init intel_mid_calibrate_tsc(void)
+diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+index 3c1c386..59a68ed 100644
+--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
++++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+@@ -13,6 +13,6 @@
+ /* For every CPU addition a new get_<cpuname>_ops interface needs
+  * to be added.
+  */
+-extern void *get_penwell_ops(void);
+-extern void *get_cloverview_ops(void);
+-extern void *get_tangier_ops(void);
++extern const void *get_penwell_ops(void);
++extern const void *get_cloverview_ops(void);
++extern const void *get_tangier_ops(void);
+diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
+index 1eb47b6..dadfb57 100644
+--- a/arch/x86/platform/intel-mid/mfld.c
++++ b/arch/x86/platform/intel-mid/mfld.c
+@@ -61,12 +61,12 @@ static void __init penwell_arch_setup(void)
+       pm_power_off = mfld_power_off;
+ }
+-void *get_penwell_ops(void)
++const void *get_penwell_ops(void)
+ {
+       return &penwell_ops;
+ }
+-void *get_cloverview_ops(void)
++const void *get_cloverview_ops(void)
+ {
+       return &penwell_ops;
+ }
+diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
+index 59253db..81bb534 100644
+--- a/arch/x86/platform/intel-mid/mrfld.c
++++ b/arch/x86/platform/intel-mid/mrfld.c
+@@ -94,7 +94,7 @@ static struct intel_mid_ops tangier_ops = {
+       .arch_setup = tangier_arch_setup,
+ };
+-void *get_tangier_ops(void)
++const void *get_tangier_ops(void)
+ {
+       return &tangier_ops;
+ }
+diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
+index f5bad40..da1428a 100644
+--- a/arch/x86/platform/intel-quark/imr_selftest.c
++++ b/arch/x86/platform/intel-quark/imr_selftest.c
+@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
+  */
+ static void __init imr_self_test(void)
+ {
+-      phys_addr_t base  = virt_to_phys(&_text);
++      phys_addr_t base  = virt_to_phys((void *)ktla_ktva((unsigned long)_text));
+       size_t size = virt_to_phys(&__end_rodata) - base;
+       const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
+       int ret;
+diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
+index d6ee929..0454327 100644
+--- a/arch/x86/platform/olpc/olpc_dt.c
++++ b/arch/x86/platform/olpc/olpc_dt.c
+@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
+       return res;
+ }
+-static struct of_pdt_ops prom_olpc_ops __initdata = {
++static const struct of_pdt_ops prom_olpc_ops __initconst = {
+       .nextprop = olpc_dt_nextprop,
+       .getproplen = olpc_dt_getproplen,
+       .getproperty = olpc_dt_getproperty,
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index b12c26e..089a429 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -160,11 +160,8 @@ static void do_fpu_end(void)
+ static void fix_processor_context(void)
+ {
+       int cpu = smp_processor_id();
+-      struct tss_struct *t = &per_cpu(cpu_tss, cpu);
+-#ifdef CONFIG_X86_64
+-      struct desc_struct *desc = get_cpu_gdt_table(cpu);
+-      tss_desc tss;
+-#endif
++      struct tss_struct *t = cpu_tss + cpu;
++
+       set_tss_desc(cpu, t);   /*
+                                * This just modifies memory; should not be
+                                * necessary. But... This is necessary, because
+@@ -173,10 +170,6 @@ static void fix_processor_context(void)
+                                */
+ #ifdef CONFIG_X86_64
+-      memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
+-      tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
+-      write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
+-
+       syscall_init();                         /* This sets MSR_*STAR and related */
+ #endif
+       load_TR_desc();                         /* This does ltr */
+diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
+index 1d0fa0e..5003de0 100644
+--- a/arch/x86/power/hibernate_asm_32.S
++++ b/arch/x86/power/hibernate_asm_32.S
+@@ -11,6 +11,7 @@
+ #include <asm/page_types.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/processor-flags.h>
++#include <asm/smap.h>
+ .text
+@@ -74,6 +75,7 @@ done:
+       pushl saved_context_eflags
+       popfl
++      ASM_CLAC
+       /* Saved in save_processor_state. */
+       movl $saved_context, %eax
+diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
+index ce8da3a..c3c8b8c 100644
+--- a/arch/x86/power/hibernate_asm_64.S
++++ b/arch/x86/power/hibernate_asm_64.S
+@@ -22,6 +22,7 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/processor-flags.h>
+ #include <asm/frame.h>
++#include <asm/smap.h>
+ ENTRY(swsusp_arch_suspend)
+       movq    $saved_context, %rax
+@@ -133,6 +134,7 @@ ENTRY(restore_registers)
+       movq    pt_regs_r15(%rax), %r15
+       pushq   pt_regs_flags(%rax)
+       popfq
++      ASM_CLAC
+       /* Saved in save_processor_state. */
+       lgdt    saved_context_gdt_desc(%rax)
+diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
+index 5db706f1..267f907 100644
+--- a/arch/x86/realmode/init.c
++++ b/arch/x86/realmode/init.c
+@@ -85,7 +85,13 @@ static void __init setup_real_mode(void)
+               __va(real_mode_header->trampoline_header);
+ #ifdef CONFIG_X86_32
+-      trampoline_header->start = __pa_symbol(startup_32_smp);
++      trampoline_header->start = __pa_symbol(ktla_ktva((unsigned long)startup_32_smp));
++
++#ifdef CONFIG_PAX_KERNEXEC
++      trampoline_header->start -= LOAD_PHYSICAL_ADDR;
++#endif
++
++      trampoline_header->boot_cs = __BOOT_CS;
+       trampoline_header->gdt_limit = __BOOT_DS + 7;
+       trampoline_header->gdt_base = __pa_symbol(boot_gdt);
+ #else
+@@ -101,7 +107,7 @@ static void __init setup_real_mode(void)
+       *trampoline_cr4_features = mmu_cr4_features;
+       trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+-      trampoline_pgd[0] = trampoline_pgd_entry.pgd;
++      trampoline_pgd[0] = trampoline_pgd_entry.pgd & ~_PAGE_NX;
+       trampoline_pgd[511] = init_level4_pgt[511].pgd;
+ #endif
+ }
+diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
+index a28221d..93c40f1 100644
+--- a/arch/x86/realmode/rm/header.S
++++ b/arch/x86/realmode/rm/header.S
+@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
+ #endif
+       /* APM/BIOS reboot */
+       .long   pa_machine_real_restart_asm
+-#ifdef CONFIG_X86_64
++#ifdef CONFIG_X86_32
++      .long   __KERNEL_CS
++#else
+       .long   __KERNEL32_CS
+ #endif
+ END(real_mode_header)
+diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
+index d66c607..3def845 100644
+--- a/arch/x86/realmode/rm/reboot.S
++++ b/arch/x86/realmode/rm/reboot.S
+@@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
+       lgdtl   pa_tr_gdt
+       /* Disable paging to drop us out of long mode */
++      movl    %cr4, %eax
++      andl    $~X86_CR4_PCIDE, %eax
++      movl    %eax, %cr4
++
+       movl    %cr0, %eax
+       andl    $~X86_CR0_PG, %eax
+       movl    %eax, %cr0
+diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
+index 48ddd76..c26749f 100644
+--- a/arch/x86/realmode/rm/trampoline_32.S
++++ b/arch/x86/realmode/rm/trampoline_32.S
+@@ -24,6 +24,12 @@
+ #include <asm/page_types.h>
+ #include "realmode.h"
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) (pa_ ## X)
++#endif
++
+       .text
+       .code16
+@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
+       cli                     # We should be safe anyway
+-      movl    tr_start, %eax  # where we need to go
+-
+       movl    $0xA5A5A5A5, trampoline_status
+                               # write marker for master knows we're running
+@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
+       movw    $1, %dx                 # protected mode (PE) bit
+       lmsw    %dx                     # into protected mode
+-      ljmpl   $__BOOT_CS, $pa_startup_32
++      ljmpl *(trampoline_header)
+       .section ".text32","ax"
+       .code32
+@@ -66,7 +70,7 @@ ENTRY(startup_32)                    # note: also used from wakeup_asm.S
+       .balign 8
+ GLOBAL(trampoline_header)
+       tr_start:               .space  4
+-      tr_gdt_pad:             .space  2
++      tr_boot_cs:             .space  2
+       tr_gdt:                 .space  6
+ END(trampoline_header)
+       
+diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
+index dac7b20..72dbaca 100644
+--- a/arch/x86/realmode/rm/trampoline_64.S
++++ b/arch/x86/realmode/rm/trampoline_64.S
+@@ -93,6 +93,7 @@ ENTRY(startup_32)
+       movl    %edx, %gs
+       movl    pa_tr_cr4, %eax
++      andl    $~X86_CR4_PCIDE, %eax
+       movl    %eax, %cr4              # Enable PAE mode
+       # Setup trampoline 4 level pagetables
+@@ -106,7 +107,7 @@ ENTRY(startup_32)
+       wrmsr
+       # Enable paging and in turn activate Long Mode
+-      movl    $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
++      movl    $(X86_CR0_PG | X86_CR0_PE), %eax
+       movl    %eax, %cr0
+       /*
+diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
+index 9e7e147..25a4158 100644
+--- a/arch/x86/realmode/rm/wakeup_asm.S
++++ b/arch/x86/realmode/rm/wakeup_asm.S
+@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
+       lgdtl   pmode_gdt
+       /* This really couldn't... */
+-      movl    pmode_entry, %eax
+       movl    pmode_cr0, %ecx
+       movl    %ecx, %cr0
+-      ljmpl   $__KERNEL_CS, $pa_startup_32
+-      /* -> jmp *%eax in trampoline_32.S */
++
++      ljmpl   *pmode_entry
+ #else
+       jmp     trampoline_start
+ #endif
+diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
+index 604a37e..e49702a 100644
+--- a/arch/x86/tools/Makefile
++++ b/arch/x86/tools/Makefile
+@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
+ $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
++HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
+ hostprogs-y   += relocs
+ relocs-objs     := relocs_32.o relocs_64.o relocs_common.o
+ PHONY += relocs
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index 0c2fae8..1d2a079 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -1,5 +1,7 @@
+ /* This is included from relocs_32/64.c */
++#include "../../../include/generated/autoconf.h"
++
+ #define ElfW(type)            _ElfW(ELF_BITS, type)
+ #define _ElfW(bits, type)     __ElfW(bits, type)
+ #define __ElfW(bits, type)    Elf##bits##_##type
+@@ -11,6 +13,7 @@
+ #define Elf_Sym                       ElfW(Sym)
+ static Elf_Ehdr ehdr;
++static Elf_Phdr *phdr;
+ struct relocs {
+       uint32_t        *offset;
+@@ -45,6 +48,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
+       "^(xen_irq_disable_direct_reloc$|"
+       "xen_save_fl_direct_reloc$|"
+       "VDSO|"
++      "__rap_hash_|"
+       "__crc_)",
+ /*
+@@ -386,9 +390,39 @@ static void read_ehdr(FILE *fp)
+       }
+ }
++static void read_phdrs(FILE *fp)
++{
++      unsigned int i;
++
++      phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
++      if (!phdr) {
++              die("Unable to allocate %d program headers\n",
++                  ehdr.e_phnum);
++      }
++      if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++              die("Seek to %d failed: %s\n",
++                      ehdr.e_phoff, strerror(errno));
++      }
++      if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++              die("Cannot read ELF program headers: %s\n",
++                      strerror(errno));
++      }
++      for(i = 0; i < ehdr.e_phnum; i++) {
++              phdr[i].p_type      = elf_word_to_cpu(phdr[i].p_type);
++              phdr[i].p_offset    = elf_off_to_cpu(phdr[i].p_offset);
++              phdr[i].p_vaddr     = elf_addr_to_cpu(phdr[i].p_vaddr);
++              phdr[i].p_paddr     = elf_addr_to_cpu(phdr[i].p_paddr);
++              phdr[i].p_filesz    = elf_word_to_cpu(phdr[i].p_filesz);
++              phdr[i].p_memsz     = elf_word_to_cpu(phdr[i].p_memsz);
++              phdr[i].p_flags     = elf_word_to_cpu(phdr[i].p_flags);
++              phdr[i].p_align     = elf_word_to_cpu(phdr[i].p_align);
++      }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+-      int i;
++      unsigned int i;
+       Elf_Shdr shdr;
+       secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -423,7 +457,7 @@ static void read_shdrs(FILE *fp)
+ static void read_strtabs(FILE *fp)
+ {
+-      int i;
++      unsigned int i;
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -448,7 +482,7 @@ static void read_strtabs(FILE *fp)
+ static void read_symtabs(FILE *fp)
+ {
+-      int i,j;
++      unsigned int i,j;
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -479,9 +513,11 @@ static void read_symtabs(FILE *fp)
+ }
+-static void read_relocs(FILE *fp)
++static void read_relocs(FILE *fp, int use_real_mode)
+ {
+-      int i,j;
++      unsigned int i,j;
++      uint32_t base;
++
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_REL_TYPE) {
+@@ -501,9 +537,22 @@ static void read_relocs(FILE *fp)
+                       die("Cannot read symbol table: %s\n",
+                               strerror(errno));
+               }
++              base = 0;
++
++#ifdef CONFIG_X86_32
++              for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
++                      if (phdr[j].p_type != PT_LOAD )
++                              continue;
++                      if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++                              continue;
++                      base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++                      break;
++              }
++#endif
++
+               for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+                       Elf_Rel *rel = &sec->reltab[j];
+-                      rel->r_offset = elf_addr_to_cpu(rel->r_offset);
++                      rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
+                       rel->r_info   = elf_xword_to_cpu(rel->r_info);
+ #if (SHT_REL_TYPE == SHT_RELA)
+                       rel->r_addend = elf_xword_to_cpu(rel->r_addend);
+@@ -515,7 +564,7 @@ static void read_relocs(FILE *fp)
+ static void print_absolute_symbols(void)
+ {
+-      int i;
++      unsigned int i;
+       const char *format;
+       if (ELF_BITS == 64)
+@@ -528,7 +577,7 @@ static void print_absolute_symbols(void)
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               struct section *sec = &secs[i];
+               char *sym_strtab;
+-              int j;
++              unsigned int j;
+               if (sec->shdr.sh_type != SHT_SYMTAB) {
+                       continue;
+@@ -555,7 +604,7 @@ static void print_absolute_symbols(void)
+ static void print_absolute_relocs(void)
+ {
+-      int i, printed = 0;
++      unsigned int i, printed = 0;
+       const char *format;
+       if (ELF_BITS == 64)
+@@ -568,7 +617,7 @@ static void print_absolute_relocs(void)
+               struct section *sec_applies, *sec_symtab;
+               char *sym_strtab;
+               Elf_Sym *sh_symtab;
+-              int j;
++              unsigned int j;
+               if (sec->shdr.sh_type != SHT_REL_TYPE) {
+                       continue;
+               }
+@@ -645,13 +694,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
+ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+                       Elf_Sym *sym, const char *symname))
+ {
+-      int i;
++      unsigned int i;
+       /* Walk through the relocations */
+       for (i = 0; i < ehdr.e_shnum; i++) {
+               char *sym_strtab;
+               Elf_Sym *sh_symtab;
+               struct section *sec_applies, *sec_symtab;
+-              int j;
++              unsigned int j;
+               struct section *sec = &secs[i];
+               if (sec->shdr.sh_type != SHT_REL_TYPE) {
+@@ -697,7 +746,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+  * kernel data and does not require special treatment.
+  *
+  */
+-static int per_cpu_shndx      = -1;
++static unsigned int per_cpu_shndx = ~0;
+ static Elf_Addr per_cpu_load_addr;
+ static void percpu_init(void)
+@@ -830,6 +879,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+ {
+       unsigned r_type = ELF32_R_TYPE(rel->r_info);
+       int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
++      char *sym_strtab = sec->link->link->strtab;
++
++      /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++      if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++              return 0;
++
++#ifdef CONFIG_PAX_KERNEXEC
++      /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++      if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++              return 0;
++      if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++              return 0;
++      if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++              return 0;
++      if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++              return 0;
++#endif
+       switch (r_type) {
+       case R_386_NONE:
+@@ -968,7 +1034,7 @@ static int write32_as_text(uint32_t v, FILE *f)
+ static void emit_relocs(int as_text, int use_real_mode)
+ {
+-      int i;
++      unsigned int i;
+       int (*write_reloc)(uint32_t, FILE *) = write32;
+       int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+                       const char *symname);
+@@ -1078,10 +1144,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
+ {
+       regex_init(use_real_mode);
+       read_ehdr(fp);
++      read_phdrs(fp);
+       read_shdrs(fp);
+       read_strtabs(fp);
+       read_symtabs(fp);
+-      read_relocs(fp);
++      read_relocs(fp, use_real_mode);
+       if (ELF_BITS == 64)
+               percpu_init();
+       if (show_absolute_syms) {
+diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
+index 744afdc..a0b8a0d 100644
+--- a/arch/x86/um/mem_32.c
++++ b/arch/x86/um/mem_32.c
+@@ -20,7 +20,7 @@ static int __init gate_vma_init(void)
+       gate_vma.vm_start = FIXADDR_USER_START;
+       gate_vma.vm_end = FIXADDR_USER_END;
+       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+-      gate_vma.vm_page_prot = __P101;
++      gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+       return 0;
+ }
+diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
+index 48e3858..ab4458c 100644
+--- a/arch/x86/um/tls_32.c
++++ b/arch/x86/um/tls_32.c
+@@ -261,7 +261,7 @@ out:
+       if (unlikely(task == current &&
+                    !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
+               printk(KERN_ERR "get_tls_entry: task with pid %d got here "
+-                              "without flushed TLS.", current->pid);
++                              "without flushed TLS.", task_pid_nr(current));
+       }
+       return 0;
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index c7b15f3..cc09a65 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -10,6 +10,7 @@ config XEN
+       select XEN_HAVE_VPMU
+       depends on X86_64 || (X86_32 && X86_PAE)
+       depends on X86_LOCAL_APIC && X86_TSC
++      depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
+       help
+         This is the Linux Xen port.  Enabling this will allow the
+         kernel to boot in a paravirtualized environment under the
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index b86ebb1..e8a6e63 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -134,8 +134,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+ struct shared_info xen_dummy_shared_info;
+-void *xen_initial_gdt;
+-
+ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+ __read_mostly int xen_have_vector_callback;
+ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+@@ -594,8 +592,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+ {
+       unsigned long va = dtr->address;
+       unsigned int size = dtr->size + 1;
+-      unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
+-      unsigned long frames[pages];
++      unsigned long frames[65536 / PAGE_SIZE];
+       int f;
+       /*
+@@ -643,8 +640,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+ {
+       unsigned long va = dtr->address;
+       unsigned int size = dtr->size + 1;
+-      unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
+-      unsigned long frames[pages];
++      unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
+       int f;
+       /*
+@@ -652,7 +648,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+        * 8-byte entries, or 16 4k pages..
+        */
+-      BUG_ON(size > 65536);
++      BUG_ON(size > GDT_SIZE);
+       BUG_ON(va & ~PAGE_MASK);
+       for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
+@@ -781,7 +777,7 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
+        * so we should never see them.  Warn if
+        * there's an unexpected IST-using fault handler.
+        */
+-      if (addr == (unsigned long)debug)
++      if (addr == (unsigned long)int1)
+               addr = (unsigned long)xen_debug;
+       else if (addr == (unsigned long)int3)
+               addr = (unsigned long)xen_int3;
+@@ -1290,7 +1286,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+       .end_context_switch = xen_end_context_switch,
+ };
+-static void xen_reboot(int reason)
++static __noreturn void xen_reboot(int reason)
+ {
+       struct sched_shutdown r = { .reason = reason };
+       int cpu;
+@@ -1298,26 +1294,26 @@ static void xen_reboot(int reason)
+       for_each_online_cpu(cpu)
+               xen_pmu_finish(cpu);
+-      if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
+-              BUG();
++      HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
++      BUG();
+ }
+-static void xen_restart(char *msg)
++static __noreturn void xen_restart(char *msg)
+ {
+       xen_reboot(SHUTDOWN_reboot);
+ }
+-static void xen_emergency_restart(void)
++static __noreturn void xen_emergency_restart(void)
+ {
+       xen_reboot(SHUTDOWN_reboot);
+ }
+-static void xen_machine_halt(void)
++static __noreturn void xen_machine_halt(void)
+ {
+       xen_reboot(SHUTDOWN_poweroff);
+ }
+-static void xen_machine_power_off(void)
++static __noreturn void xen_machine_power_off(void)
+ {
+       if (pm_power_off)
+               pm_power_off();
+@@ -1471,8 +1467,11 @@ static void __ref xen_setup_gdt(int cpu)
+       pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
+       pv_cpu_ops.load_gdt = xen_load_gdt_boot;
+-      setup_stack_canary_segment(0);
+-      switch_to_new_gdt(0);
++      setup_stack_canary_segment(cpu);
++#ifdef CONFIG_X86_64
++      load_percpu_segment(cpu);
++#endif
++      switch_to_new_gdt(cpu);
+       pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
+       pv_cpu_ops.load_gdt = xen_load_gdt;
+@@ -1590,9 +1589,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+        */
+       __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+-      /* Work out if we support NX */
+-      x86_configure_nx();
+-
+       /* Get mfn list */
+       xen_build_dynamic_phys_to_machine();
+@@ -1602,6 +1598,19 @@ asmlinkage __visible void __init xen_start_kernel(void)
+        */
+       xen_setup_gdt(0);
++      /* Work out if we support NX */
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
++      if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
++          (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
++              unsigned l, h;
++
++              __supported_pte_mask |= _PAGE_NX;
++              rdmsr(MSR_EFER, l, h);
++              l |= EFER_NX;
++              wrmsr(MSR_EFER, l, h);
++      }
++#endif
++
+       xen_init_irq_ops();
+       xen_init_cpuid_mask();
+@@ -1619,13 +1628,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+       machine_ops = xen_machine_ops;
+-      /*
+-       * The only reliable way to retain the initial address of the
+-       * percpu gdt_page is to remember it here, so we can go and
+-       * mark it RW later, when the initial percpu area is freed.
+-       */
+-      xen_initial_gdt = &per_cpu(gdt_page, 0);
+-
+       xen_smp_init();
+ #ifdef CONFIG_ACPI_NUMA
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 7d5afdb..c89588c 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1940,7 +1940,14 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+                * L3_k[511] -> level2_fixmap_pgt */
+               convert_pfn_mfn(level3_kernel_pgt);
++              convert_pfn_mfn(level3_vmalloc_start_pgt[0]);
++              convert_pfn_mfn(level3_vmalloc_start_pgt[1]);
++              convert_pfn_mfn(level3_vmalloc_start_pgt[2]);
++              convert_pfn_mfn(level3_vmalloc_start_pgt[3]);
++              convert_pfn_mfn(level3_vmalloc_end_pgt);
++              convert_pfn_mfn(level3_vmemmap_pgt);
+               /* L3_k[511][506] -> level1_fixmap_pgt */
++              /* L3_k[511][507] -> level1_vsyscall_pgt */
+               convert_pfn_mfn(level2_fixmap_pgt);
+       }
+       /* We get [511][511] and have Xen's version of level2_kernel_pgt */
+@@ -1970,11 +1977,25 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+               set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
++              set_page_prot(level3_vmalloc_start_pgt[0], PAGE_KERNEL_RO);
++              set_page_prot(level3_vmalloc_start_pgt[1], PAGE_KERNEL_RO);
++              set_page_prot(level3_vmalloc_start_pgt[2], PAGE_KERNEL_RO);
++              set_page_prot(level3_vmalloc_start_pgt[3], PAGE_KERNEL_RO);
++              set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
++              set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+               set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
++              set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+-              set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
++              set_page_prot(level1_modules_pgt[0], PAGE_KERNEL_RO);
++              set_page_prot(level1_modules_pgt[1], PAGE_KERNEL_RO);
++              set_page_prot(level1_modules_pgt[2], PAGE_KERNEL_RO);
++              set_page_prot(level1_modules_pgt[3], PAGE_KERNEL_RO);
++              set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO);
++              set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO);
++              set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO);
++              set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
+               /* Pin down new L4 */
+               pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+@@ -2385,6 +2406,7 @@ static void __init xen_post_allocator_init(void)
+       pv_mmu_ops.set_pud = xen_set_pud;
+ #if CONFIG_PGTABLE_LEVELS == 4
+       pv_mmu_ops.set_pgd = xen_set_pgd;
++      pv_mmu_ops.set_pgd_batched = xen_set_pgd;
+ #endif
+       /* This will work as long as patching hasn't happened yet
+@@ -2414,6 +2436,10 @@ static void xen_leave_lazy_mmu(void)
+       preempt_enable();
+ }
++static void xen_pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++}
++
+ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+       .read_cr2 = xen_read_cr2,
+       .write_cr2 = xen_write_cr2,
+@@ -2426,7 +2452,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+       .flush_tlb_single = xen_flush_tlb_single,
+       .flush_tlb_others = xen_flush_tlb_others,
+-      .pte_update = paravirt_nop,
++      .pte_update = xen_pte_update,
+       .pgd_alloc = xen_pgd_alloc,
+       .pgd_free = xen_pgd_free,
+@@ -2463,6 +2489,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+       .pud_val = PV_CALLEE_SAVE(xen_pud_val),
+       .make_pud = PV_CALLEE_SAVE(xen_make_pud),
+       .set_pgd = xen_set_pgd_hyper,
++      .set_pgd_batched = xen_set_pgd_hyper,
+       .alloc_pud = xen_alloc_pmd_init,
+       .release_pud = xen_release_pmd_init,
+diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
+index 32bdc2c..073b8a5 100644
+--- a/arch/x86/xen/pmu.c
++++ b/arch/x86/xen/pmu.c
+@@ -444,6 +444,7 @@ static unsigned long xen_get_guest_ip(void)
+               return 0;
+       }
++      // TODO: adjust with the segment base
+       return xenpmu_data->pmu.r.regs.ip;
+ }
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 6228403..2354210 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -312,17 +312,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
+       if (xen_pv_domain()) {
+               if (!xen_feature(XENFEAT_writable_page_tables))
+-                      /* We've switched to the "real" per-cpu gdt, so make
+-                       * sure the old memory can be recycled. */
+-                      make_lowmem_page_readwrite(xen_initial_gdt);
+-
+ #ifdef CONFIG_X86_32
+               /*
+                * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+                * expects __USER_DS
+                */
+-              loadsegment(ds, __USER_DS);
+-              loadsegment(es, __USER_DS);
++              loadsegment(ds, __KERNEL_DS);
++              loadsegment(es, __KERNEL_DS);
+ #endif
+               xen_filter_cpu_maps();
+@@ -412,7 +408,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+ #ifdef CONFIG_X86_32
+       /* Note: PVH is not yet supported on x86_32. */
+       ctxt->user_regs.fs = __KERNEL_PERCPU;
+-      ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
++      savesegment(gs, ctxt->user_regs.gs);
+ #endif
+       memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
+@@ -420,8 +416,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+               ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
+               ctxt->flags = VGCF_IN_KERNEL;
+               ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
+-              ctxt->user_regs.ds = __USER_DS;
+-              ctxt->user_regs.es = __USER_DS;
++              ctxt->user_regs.ds = __KERNEL_DS;
++              ctxt->user_regs.es = __KERNEL_DS;
+               ctxt->user_regs.ss = __KERNEL_DS;
+               xen_copy_trap_info(ctxt->trap_ctxt);
+@@ -763,7 +759,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
+ void __init xen_smp_init(void)
+ {
+-      smp_ops = xen_smp_ops;
++      memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
+       xen_fill_possible_map();
+ }
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index feb6d40..e59382c 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -85,7 +85,7 @@ ENTRY(xen_iret)
+       pushw %fs
+       movl $(__KERNEL_PERCPU), %eax
+       movl %eax, %fs
+-      movl %fs:xen_vcpu, %eax
++      mov PER_CPU_VAR(xen_vcpu), %eax
+       POP_FS
+ #else
+       movl %ss:xen_vcpu, %eax
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index 7f8d8ab..8ecf53e 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -50,6 +50,18 @@ ENTRY(startup_xen)
+       mov %_ASM_SI, xen_start_info
+       mov $init_thread_union+THREAD_SIZE, %_ASM_SP
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
++      movl $cpu_gdt_table,%edi
++      movl $__per_cpu_load,%eax
++      movw %ax,__KERNEL_PERCPU + 2(%edi)
++      rorl $16,%eax
++      movb %al,__KERNEL_PERCPU + 4(%edi)
++      movb %ah,__KERNEL_PERCPU + 7(%edi)
++      movl $__per_cpu_end - 1,%eax
++      subl $__per_cpu_start,%eax
++      movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
++
+       jmp xen_start_kernel
+       __FINIT
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 3cbce3b..f1221bc 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -16,8 +16,6 @@ void xen_syscall_target(void);
+ void xen_syscall32_target(void);
+ #endif
+-extern void *xen_initial_gdt;
+-
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
+index 525bd3d..ef888b1 100644
+--- a/arch/xtensa/variants/dc232b/include/variant/core.h
++++ b/arch/xtensa/variants/dc232b/include/variant/core.h
+@@ -119,9 +119,9 @@
+   ----------------------------------------------------------------------*/
+ #define XCHAL_ICACHE_LINESIZE         32      /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE         32      /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH                5       /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH                5       /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE         (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH)   /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_SIZE             16384   /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE             16384   /* D-cache size in bytes or 0 */
+diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
+index 2f33760..835e50a 100644
+--- a/arch/xtensa/variants/fsf/include/variant/core.h
++++ b/arch/xtensa/variants/fsf/include/variant/core.h
+@@ -11,6 +11,7 @@
+ #ifndef _XTENSA_CORE_H
+ #define _XTENSA_CORE_H
++#include <linux/const.h>
+ /****************************************************************************
+           Parameters Useful for Any Code, USER or PRIVILEGED
+@@ -112,9 +113,9 @@
+   ----------------------------------------------------------------------*/
+ #define XCHAL_ICACHE_LINESIZE         16      /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE         16      /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH                4       /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH                4       /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE         (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_SIZE             8192    /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE             8192    /* D-cache size in bytes or 0 */
+diff --git a/block/bio.c b/block/bio.c
+index aa73540..ced15ee 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1144,7 +1144,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+               /*
+                * Overflow, abort
+                */
+-              if (end < start)
++              if (end < start || end - start > INT_MAX - nr_pages)
+                       return ERR_PTR(-EINVAL);
+               nr_pages += end - start;
+@@ -1269,7 +1269,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+               /*
+                * Overflow, abort
+                */
+-              if (end < start)
++              if (end < start || end - start > INT_MAX - nr_pages)
+                       return ERR_PTR(-EINVAL);
+               nr_pages += end - start;
+@@ -1777,7 +1777,7 @@ EXPORT_SYMBOL(bio_endio);
+  * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
+  * @bio is not freed before the split.
+  */
+-struct bio *bio_split(struct bio *bio, int sectors,
++struct bio *bio_split(struct bio *bio, unsigned int sectors,
+                     gfp_t gfp, struct bio_set *bs)
+ {
+       struct bio *split = NULL;
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index b08ccbb..87fe492 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -561,10 +561,10 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+       for (i = 0; i < BLKG_RWSTAT_NR; i++)
+               seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+-                         (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
++                         (unsigned long long)atomic64_read_unchecked(&rwstat->aux_cnt[i]));
+-      v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
+-              atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
++      v = atomic64_read_unchecked(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
++              atomic64_read_unchecked(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
+       seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+       return v;
+ }
+@@ -716,7 +716,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
+               else
+                       stat = (void *)blkg + off;
+-              sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
++              sum += blkg_stat_read(stat) + atomic64_read_unchecked(&stat->aux_cnt);
+       }
+       rcu_read_unlock();
+@@ -760,7 +760,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
+                       rwstat = (void *)pos_blkg + off;
+               for (i = 0; i < BLKG_RWSTAT_NR; i++)
+-                      atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
++                      atomic64_add_unchecked(atomic64_read_unchecked(&rwstat->aux_cnt[i]) +
+                               percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
+                               &sum.aux_cnt[i]);
+       }
+@@ -886,13 +886,13 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
+               rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
+                                       offsetof(struct blkcg_gq, stat_bytes));
+-              rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
+-              wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
++              rbytes = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
++              wbytes = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+               rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
+                                       offsetof(struct blkcg_gq, stat_ios));
+-              rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
+-              wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
++              rios = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
++              wios = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+               spin_unlock_irq(blkg->q->queue_lock);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 36c7ac3..ba1f2fd 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -3529,8 +3529,11 @@ int __init blk_dev_init(void)
+       if (!kblockd_workqueue)
+               panic("Failed to create kblockd\n");
+-      request_cachep = kmem_cache_create("blkdev_requests",
+-                      sizeof(struct request), 0, SLAB_PANIC, NULL);
++      request_cachep = kmem_cache_create_usercopy("blkdev_requests",
++                      sizeof(struct request), 0, SLAB_PANIC,
++                      offsetof(struct request, __cmd),
++                      sizeof(((struct request *)0)->__cmd),
++                      NULL);
+       blk_requestq_cachep = kmem_cache_create("request_queue",
+                       sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+diff --git a/block/blk-map.c b/block/blk-map.c
+index b8657fa..dad7c1e 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -219,7 +219,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+       if (!len || !kbuf)
+               return -EINVAL;
+-      do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
++      do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
+       if (do_copy)
+               bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+       else
+diff --git a/block/blk-softirq.c b/block/blk-softirq.c
+index 53b1737..08177d2e 100644
+--- a/block/blk-softirq.c
++++ b/block/blk-softirq.c
+@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+  * Softirq action handler - move entries to local list and loop over them
+  * while passing them to the queue registered handler.
+  */
+-static void blk_done_softirq(struct softirq_action *h)
++static __latent_entropy void blk_done_softirq(void)
+ {
+       struct list_head *cpu_list, local_list;
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 5e24d88..c345d62 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -1965,8 +1965,8 @@ static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
+ {
+       struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
+                                       offsetof(struct blkcg_gq, stat_bytes));
+-      u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+-              atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
++      u64 sum = atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
++              atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
+       return __blkg_prfill_u64(sf, pd, sum >> 9);
+ }
+diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
+index 556826a..4e7c5fd 100644
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
+       cgc = compat_alloc_user_space(sizeof(*cgc));
+       cgc32 = compat_ptr(arg);
+-      if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
++      if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
+           get_user(data, &cgc32->buffer) ||
+           put_user(compat_ptr(data), &cgc->buffer) ||
+           copy_in_user(&cgc->buflen, &cgc32->buflen,
+@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
+               err |= __get_user(f->spec1, &uf->spec1);
+               err |= __get_user(f->fmt_gap, &uf->fmt_gap);
+               err |= __get_user(name, &uf->name);
+-              f->name = compat_ptr(name);
++              f->name = (void __force_kernel *)compat_ptr(name);
+               if (err) {
+                       err = -EFAULT;
+                       goto out;
+diff --git a/block/genhd.c b/block/genhd.c
+index fcd6d4f..96e433b 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -471,21 +471,24 @@ static char *bdevt_str(dev_t devt, char *buf)
+ /*
+  * Register device numbers dev..(dev+range-1)
+- * range must be nonzero
++ * Noop if @range is zero.
+  * The hash chain is sorted on range, so that subranges can override.
+  */
+ void blk_register_region(dev_t devt, unsigned long range, struct module *module,
+                        struct kobject *(*probe)(dev_t, int *, void *),
+                        int (*lock)(dev_t, void *), void *data)
+ {
+-      kobj_map(bdev_map, devt, range, module, probe, lock, data);
++      if (range)
++              kobj_map(bdev_map, devt, range, module, probe, lock, data);
+ }
+ EXPORT_SYMBOL(blk_register_region);
++/* undo blk_register_region(), noop if @range is zero */
+ void blk_unregister_region(dev_t devt, unsigned long range)
+ {
+-      kobj_unmap(bdev_map, devt, range);
++      if (range)
++              kobj_unmap(bdev_map, devt, range);
+ }
+ EXPORT_SYMBOL(blk_unregister_region);
+diff --git a/block/partitions/efi.c b/block/partitions/efi.c
+index bcd86e5..fe457ef 100644
+--- a/block/partitions/efi.c
++++ b/block/partitions/efi.c
+@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+       if (!gpt)
+               return NULL;
++      if (!le32_to_cpu(gpt->num_partition_entries))
++              return NULL;
++      pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
++      if (!pte)
++              return NULL;
++
+       count = le32_to_cpu(gpt->num_partition_entries) *
+                 le32_to_cpu(gpt->sizeof_partition_entry);
+-      if (!count)
+-              return NULL;
+-      pte = kmalloc(count, GFP_KERNEL);
+-      if (!pte)
+-              return NULL;
+-
+       if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
+                       (u8 *) pte, count) < count) {
+               kfree(pte);
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 0774799..a0012ea 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
+       return put_user(0, p);
+ }
+-static int sg_get_timeout(struct request_queue *q)
++static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
+ {
+       return jiffies_to_clock_t(q->sg_timeout);
+ }
+diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
+index 058c8d7..55229dd 100644
+--- a/crypto/cast6_generic.c
++++ b/crypto/cast6_generic.c
+@@ -181,8 +181,9 @@ static inline void QBAR(u32 *block, u8 *Kr, u32 *Km)
+       block[2] ^= F1(block[3], Kr[0], Km[0]);
+ }
+-void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
++void __cast6_encrypt(void *_c, u8 *outbuf, const u8 *inbuf)
+ {
++      struct cast6_ctx *c = _c;
+       const __be32 *src = (const __be32 *)inbuf;
+       __be32 *dst = (__be32 *)outbuf;
+       u32 block[4];
+@@ -219,8 +220,9 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
+       __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
+ }
+-void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
++void __cast6_decrypt(void *_c, u8 *outbuf, const u8 *inbuf)
+ {
++      struct cast6_ctx *c = _c;
+       const __be32 *src = (const __be32 *)inbuf;
+       __be32 *dst = (__be32 *)outbuf;
+       u32 block[4];
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 0c654e5..cf01e3e 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -65,7 +65,7 @@ struct cryptd_blkcipher_ctx {
+ struct cryptd_blkcipher_request_ctx {
+       crypto_completion_t complete;
+-};
++} __no_const;
+ struct cryptd_hash_ctx {
+       atomic_t refcnt;
+@@ -84,7 +84,7 @@ struct cryptd_aead_ctx {
+ struct cryptd_aead_request_ctx {
+       crypto_completion_t complete;
+-};
++} __no_const;
+ static void cryptd_queue_worker(struct work_struct *work);
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 1c57054..e029935 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -490,7 +490,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+                       dump_alloc += CRYPTO_REPORT_MAXSIZE;
+               {
+-                      struct netlink_dump_control c = {
++                      netlink_dump_control_no_const c = {
+                               .dump = link->dump,
+                               .done = link->done,
+                               .min_dump_alloc = dump_alloc,
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index ee9cfb9..30b36ed 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -392,7 +392,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+       int ret;
+       pinst->kobj.kset = pcrypt_kset;
+-      ret = kobject_add(&pinst->kobj, NULL, name);
++      ret = kobject_add(&pinst->kobj, NULL, "%s", name);
+       if (!ret)
+               kobject_uevent(&pinst->kobj, KOBJ_ADD);
+diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
+index f550b5d..8488beb 100644
+--- a/crypto/salsa20_generic.c
++++ b/crypto/salsa20_generic.c
+@@ -104,7 +104,7 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
+ static const char sigma[16] = "expand 32-byte k";
+ static const char tau[16] = "expand 16-byte k";
+-static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
++static void __salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
+ {
+       const char *constants;
+@@ -128,7 +128,7 @@ static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
+       ctx->input[15] = U8TO32_LITTLE(constants + 12);
+ }
+-static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
++static void __salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
+ {
+       ctx->input[6] = U8TO32_LITTLE(iv + 0);
+       ctx->input[7] = U8TO32_LITTLE(iv + 4);
+@@ -136,7 +136,7 @@ static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
+       ctx->input[9] = 0;
+ }
+-static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
++static void __salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
+                                 const u8 *src, unsigned int bytes)
+ {
+       u8 buf[64];
+@@ -170,7 +170,7 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key,
+                 unsigned int keysize)
+ {
+       struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
+-      salsa20_keysetup(ctx, key, keysize);
++      __salsa20_keysetup(ctx, key, keysize);
+       return 0;
+ }
+@@ -186,24 +186,24 @@ static int encrypt(struct blkcipher_desc *desc,
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 64);
+-      salsa20_ivsetup(ctx, walk.iv);
++      __salsa20_ivsetup(ctx, walk.iv);
+       if (likely(walk.nbytes == nbytes))
+       {
+-              salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
++              __salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
+                                     walk.src.virt.addr, nbytes);
+               return blkcipher_walk_done(desc, &walk, 0);
+       }
+       while (walk.nbytes >= 64) {
+-              salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
++              __salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
+                                     walk.src.virt.addr,
+                                     walk.nbytes - (walk.nbytes % 64));
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
+       }
+       if (walk.nbytes) {
+-              salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
++              __salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
+                                     walk.src.virt.addr, walk.nbytes);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
+index 52ce17a..fc10b38 100644
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -62,14 +62,20 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ {
+       struct scatter_walk walk;
+       struct scatterlist tmp[2];
++      void *realbuf = buf;
+       if (!nbytes)
+               return;
+       sg = scatterwalk_ffwd(tmp, sg, start);
+-      if (sg_page(sg) == virt_to_page(buf) &&
+-          sg->offset == offset_in_page(buf))
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      if (object_starts_on_stack(buf))
++              realbuf = buf - current->stack + current->lowmem_stack;
++#endif
++
++      if (sg_page(sg) == virt_to_page(realbuf) &&
++          sg->offset == offset_in_page(realbuf))
+               return;
+       scatterwalk_start(&walk, sg);
+diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
+index 94970a7..f0c8d26 100644
+--- a/crypto/serpent_generic.c
++++ b/crypto/serpent_generic.c
+@@ -442,8 +442,9 @@ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
+ }
+ EXPORT_SYMBOL_GPL(serpent_setkey);
+-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
++void __serpent_encrypt(void *_ctx, u8 *dst, const u8 *src)
+ {
++      struct serpent_ctx *ctx = _ctx;
+       const u32 *k = ctx->expkey;
+       const __le32 *s = (const __le32 *)src;
+       __le32  *d = (__le32 *)dst;
+@@ -507,8 +508,9 @@ static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+       __serpent_encrypt(ctx, dst, src);
+ }
+-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
++void __serpent_decrypt(void *_ctx, u8 *dst, const u8 *src)
+ {
++      struct serpent_ctx *ctx = _ctx;
+       const u32 *k = ctx->expkey;
+       const __le32 *s = (const __le32 *)src;
+       __le32  *d = (__le32 *)dst;
+diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
+index f71b756..b96847c 100644
+--- a/drivers/acpi/ac.c
++++ b/drivers/acpi/ac.c
+@@ -70,7 +70,7 @@ static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ extern struct proc_dir_entry *acpi_lock_ac_dir(void);
+-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
++extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+ #endif
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index c5557d0..8ece624 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -406,7 +406,7 @@ static int video_set_report_key_events(const struct dmi_system_id *id)
+       return 0;
+ }
+-static struct dmi_system_id video_dmi_table[] = {
++static const struct dmi_system_id video_dmi_table[] = {
+       /*
+        * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+        */
+diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
+index a7dbb2b..93e986e 100644
+--- a/drivers/acpi/acpica/acutils.h
++++ b/drivers/acpi/acpica/acutils.h
+@@ -274,7 +274,7 @@ void acpi_ut_init_stack_ptr_trace(void);
+ void acpi_ut_track_stack_ptr(void);
+-void
++__nocapture(2) void
+ acpi_ut_trace(u32 line_number,
+             const char *function_name,
+             const char *module_name, u32 component_id);
+diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
+index 46bd65d..ec9da48 100644
+--- a/drivers/acpi/acpica/dbhistry.c
++++ b/drivers/acpi/acpica/dbhistry.c
+@@ -155,7 +155,7 @@ void acpi_db_display_history(void)
+       for (i = 0; i < acpi_gbl_num_history; i++) {
+               if (acpi_gbl_history_buffer[history_index].command) {
+-                      acpi_os_printf("%3ld %s\n",
++                      acpi_os_printf("%3u %s\n",
+                                      acpi_gbl_history_buffer[history_index].
+                                      cmd_num,
+                                      acpi_gbl_history_buffer[history_index].
+diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
+index 7cd5d2e..a837ce6 100644
+--- a/drivers/acpi/acpica/dbinput.c
++++ b/drivers/acpi/acpica/dbinput.c
+@@ -606,7 +606,7 @@ static u32 acpi_db_get_line(char *input_buffer)
+           (acpi_gbl_db_parsed_buf, sizeof(acpi_gbl_db_parsed_buf),
+            input_buffer)) {
+               acpi_os_printf
+-                  ("Buffer overflow while parsing input line (max %u characters)\n",
++                  ("Buffer overflow while parsing input line (max %lu characters)\n",
+                    sizeof(acpi_gbl_db_parsed_buf));
+               return (0);
+       }
+@@ -862,24 +862,24 @@ acpi_db_command_dispatch(char *input_buffer,
+               if (param_count == 0) {
+                       acpi_os_printf
+-                          ("Current debug level for file output is:    %8.8lX\n",
++                          ("Current debug level for file output is:    %8.8X\n",
+                            acpi_gbl_db_debug_level);
+                       acpi_os_printf
+-                          ("Current debug level for console output is: %8.8lX\n",
++                          ("Current debug level for console output is: %8.8X\n",
+                            acpi_gbl_db_console_debug_level);
+               } else if (param_count == 2) {
+                       temp = acpi_gbl_db_console_debug_level;
+                       acpi_gbl_db_console_debug_level =
+                           strtoul(acpi_gbl_db_args[1], NULL, 16);
+                       acpi_os_printf
+-                          ("Debug Level for console output was %8.8lX, now %8.8lX\n",
++                          ("Debug Level for console output was %8.8X, now %8.8X\n",
+                            temp, acpi_gbl_db_console_debug_level);
+               } else {
+                       temp = acpi_gbl_db_debug_level;
+                       acpi_gbl_db_debug_level =
+                           strtoul(acpi_gbl_db_args[1], NULL, 16);
+                       acpi_os_printf
+-                          ("Debug Level for file output was %8.8lX, now %8.8lX\n",
++                          ("Debug Level for file output was %8.8X, now %8.8X\n",
+                            temp, acpi_gbl_db_debug_level);
+               }
+               break;
+diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c
+index a414e1f..de70230 100644
+--- a/drivers/acpi/acpica/dbstats.c
++++ b/drivers/acpi/acpica/dbstats.c
+@@ -377,17 +377,17 @@ acpi_status acpi_db_display_statistics(char *type_arg)
+                              "ACPI_TYPE", "NODES", "OBJECTS");
+               for (i = 0; i < ACPI_TYPE_NS_NODE_MAX; i++) {
+-                      acpi_os_printf("%16.16s % 10ld% 10ld\n",
++                      acpi_os_printf("%16.16s % 10d% 10d\n",
+                                      acpi_ut_get_type_name(i),
+                                      acpi_gbl_node_type_count[i],
+                                      acpi_gbl_obj_type_count[i]);
+               }
+-              acpi_os_printf("%16.16s % 10ld% 10ld\n", "Misc/Unknown",
++              acpi_os_printf("%16.16s % 10d% 10d\n", "Misc/Unknown",
+                              acpi_gbl_node_type_count_misc,
+                              acpi_gbl_obj_type_count_misc);
+-              acpi_os_printf("%16.16s % 10ld% 10ld\n", "TOTALS:",
++              acpi_os_printf("%16.16s % 10d% 10d\n", "TOTALS:",
+                              acpi_gbl_num_nodes, acpi_gbl_num_objects);
+               break;
+@@ -415,16 +415,16 @@ acpi_status acpi_db_display_statistics(char *type_arg)
+       case CMD_STAT_MISC:
+               acpi_os_printf("\nMiscellaneous Statistics:\n\n");
+-              acpi_os_printf("Calls to AcpiPsFind:.. ........% 7ld\n",
++              acpi_os_printf("Calls to AcpiPsFind:.. ........% 7u\n",
+                              acpi_gbl_ps_find_count);
+-              acpi_os_printf("Calls to AcpiNsLookup:..........% 7ld\n",
++              acpi_os_printf("Calls to AcpiNsLookup:..........% 7u\n",
+                              acpi_gbl_ns_lookup_count);
+               acpi_os_printf("\n");
+               acpi_os_printf("Mutex usage:\n\n");
+               for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+-                      acpi_os_printf("%-28s:     % 7ld\n",
++                      acpi_os_printf("%-28s:     % 7u\n",
+                                      acpi_ut_get_mutex_name(i),
+                                      acpi_gbl_mutex_info[i].use_count);
+               }
+@@ -434,87 +434,87 @@ acpi_status acpi_db_display_statistics(char *type_arg)
+               acpi_os_printf("\nInternal object sizes:\n\n");
+-              acpi_os_printf("Common         %3d\n",
++              acpi_os_printf("Common         %3lu\n",
+                              sizeof(struct acpi_object_common));
+-              acpi_os_printf("Number         %3d\n",
++              acpi_os_printf("Number         %3lu\n",
+                              sizeof(struct acpi_object_integer));
+-              acpi_os_printf("String         %3d\n",
++              acpi_os_printf("String         %3lu\n",
+                              sizeof(struct acpi_object_string));
+-              acpi_os_printf("Buffer         %3d\n",
++              acpi_os_printf("Buffer         %3lu\n",
+                              sizeof(struct acpi_object_buffer));
+-              acpi_os_printf("Package        %3d\n",
++              acpi_os_printf("Package        %3lu\n",
+                              sizeof(struct acpi_object_package));
+-              acpi_os_printf("BufferField    %3d\n",
++              acpi_os_printf("BufferField    %3lu\n",
+                              sizeof(struct acpi_object_buffer_field));
+-              acpi_os_printf("Device         %3d\n",
++              acpi_os_printf("Device         %3lu\n",
+                              sizeof(struct acpi_object_device));
+-              acpi_os_printf("Event          %3d\n",
++              acpi_os_printf("Event          %3lu\n",
+                              sizeof(struct acpi_object_event));
+-              acpi_os_printf("Method         %3d\n",
++              acpi_os_printf("Method         %3lu\n",
+                              sizeof(struct acpi_object_method));
+-              acpi_os_printf("Mutex          %3d\n",
++              acpi_os_printf("Mutex          %3lu\n",
+                              sizeof(struct acpi_object_mutex));
+-              acpi_os_printf("Region         %3d\n",
++              acpi_os_printf("Region         %3lu\n",
+                              sizeof(struct acpi_object_region));
+-              acpi_os_printf("PowerResource  %3d\n",
++              acpi_os_printf("PowerResource  %3lu\n",
+                              sizeof(struct acpi_object_power_resource));
+-              acpi_os_printf("Processor      %3d\n",
++              acpi_os_printf("Processor      %3lu\n",
+                              sizeof(struct acpi_object_processor));
+-              acpi_os_printf("ThermalZone    %3d\n",
++              acpi_os_printf("ThermalZone    %3lu\n",
+                              sizeof(struct acpi_object_thermal_zone));
+-              acpi_os_printf("RegionField    %3d\n",
++              acpi_os_printf("RegionField    %3lu\n",
+                              sizeof(struct acpi_object_region_field));
+-              acpi_os_printf("BankField      %3d\n",
++              acpi_os_printf("BankField      %3lu\n",
+                              sizeof(struct acpi_object_bank_field));
+-              acpi_os_printf("IndexField     %3d\n",
++              acpi_os_printf("IndexField     %3lu\n",
+                              sizeof(struct acpi_object_index_field));
+-              acpi_os_printf("Reference      %3d\n",
++              acpi_os_printf("Reference      %3lu\n",
+                              sizeof(struct acpi_object_reference));
+-              acpi_os_printf("Notify         %3d\n",
++              acpi_os_printf("Notify         %3lu\n",
+                              sizeof(struct acpi_object_notify_handler));
+-              acpi_os_printf("AddressSpace   %3d\n",
++              acpi_os_printf("AddressSpace   %3lu\n",
+                              sizeof(struct acpi_object_addr_handler));
+-              acpi_os_printf("Extra          %3d\n",
++              acpi_os_printf("Extra          %3lu\n",
+                              sizeof(struct acpi_object_extra));
+-              acpi_os_printf("Data           %3d\n",
++              acpi_os_printf("Data           %3lu\n",
+                              sizeof(struct acpi_object_data));
+               acpi_os_printf("\n");
+-              acpi_os_printf("ParseObject    %3d\n",
++              acpi_os_printf("ParseObject    %3lu\n",
+                              sizeof(struct acpi_parse_obj_common));
+-              acpi_os_printf("ParseObjectNamed %3d\n",
++              acpi_os_printf("ParseObjectNamed %3lu\n",
+                              sizeof(struct acpi_parse_obj_named));
+-              acpi_os_printf("ParseObjectAsl %3d\n",
++              acpi_os_printf("ParseObjectAsl %3lu\n",
+                              sizeof(struct acpi_parse_obj_asl));
+-              acpi_os_printf("OperandObject  %3d\n",
++              acpi_os_printf("OperandObject  %3lu\n",
+                              sizeof(union acpi_operand_object));
+-              acpi_os_printf("NamespaceNode  %3d\n",
++              acpi_os_printf("NamespaceNode  %3lu\n",
+                              sizeof(struct acpi_namespace_node));
+-              acpi_os_printf("AcpiObject     %3d\n",
++              acpi_os_printf("AcpiObject     %3lu\n",
+                              sizeof(union acpi_object));
+               acpi_os_printf("\n");
+-              acpi_os_printf("Generic State  %3d\n",
++              acpi_os_printf("Generic State  %3lu\n",
+                              sizeof(union acpi_generic_state));
+-              acpi_os_printf("Common State   %3d\n",
++              acpi_os_printf("Common State   %3lu\n",
+                              sizeof(struct acpi_common_state));
+-              acpi_os_printf("Control State  %3d\n",
++              acpi_os_printf("Control State  %3lu\n",
+                              sizeof(struct acpi_control_state));
+-              acpi_os_printf("Update State   %3d\n",
++              acpi_os_printf("Update State   %3lu\n",
+                              sizeof(struct acpi_update_state));
+-              acpi_os_printf("Scope State    %3d\n",
++              acpi_os_printf("Scope State    %3lu\n",
+                              sizeof(struct acpi_scope_state));
+-              acpi_os_printf("Parse Scope    %3d\n",
++              acpi_os_printf("Parse Scope    %3lu\n",
+                              sizeof(struct acpi_pscope_state));
+-              acpi_os_printf("Package State  %3d\n",
++              acpi_os_printf("Package State  %3lu\n",
+                              sizeof(struct acpi_pkg_state));
+-              acpi_os_printf("Thread State   %3d\n",
++              acpi_os_printf("Thread State   %3lu\n",
+                              sizeof(struct acpi_thread_state));
+-              acpi_os_printf("Result Values  %3d\n",
++              acpi_os_printf("Result Values  %3lu\n",
+                              sizeof(struct acpi_result_values));
+-              acpi_os_printf("Notify Info    %3d\n",
++              acpi_os_printf("Notify Info    %3lu\n",
+                              sizeof(struct acpi_notify_info));
+               break;
+diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
+index f76e0ea..4b83315 100644
+--- a/drivers/acpi/acpica/hwxfsleep.c
++++ b/drivers/acpi/acpica/hwxfsleep.c
+@@ -70,11 +70,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
+ /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
+ static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
+-      {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
+-       acpi_hw_extended_sleep},
+-      {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
+-       acpi_hw_extended_wake_prep},
+-      {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
++      {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
++       .extended_function = acpi_hw_extended_sleep},
++      {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
++       .extended_function = acpi_hw_extended_wake_prep},
++      {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
++       .extended_function = acpi_hw_extended_wake}
+ };
+ /*
+diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
+index 5744222..4ac231a 100644
+--- a/drivers/acpi/acpica/utdebug.c
++++ b/drivers/acpi/acpica/utdebug.c
+@@ -189,7 +189,7 @@ acpi_debug_print(u32 requested_debug_level,
+        * Display the module name, current line number, thread ID (if requested),
+        * current procedure nesting level, and the current procedure name
+        */
+-      acpi_os_printf("%9s-%04ld ", module_name, line_number);
++      acpi_os_printf("%9s-%04u ", module_name, line_number);
+ #ifdef ACPI_APPLICATION
+       /*
+diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
+index 6e9f14c..7f9a99d 100644
+--- a/drivers/acpi/apei/apei-internal.h
++++ b/drivers/acpi/apei/apei-internal.h
+@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type {
+       u32 flags;
+       apei_exec_ins_func_t run;
+-};
++} __do_const;
+ struct apei_exec_context {
+       u32 ip;
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 60746ef..02a1ddc 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -483,7 +483,7 @@ static void __ghes_print_estatus(const char *pfx,
+                                const struct acpi_hest_generic *generic,
+                                const struct acpi_hest_generic_status *estatus)
+ {
+-      static atomic_t seqno;
++      static atomic_unchecked_t seqno;
+       unsigned int curr_seqno;
+       char pfx_seq[64];
+@@ -494,7 +494,7 @@ static void __ghes_print_estatus(const char *pfx,
+               else
+                       pfx = KERN_ERR;
+       }
+-      curr_seqno = atomic_inc_return(&seqno);
++      curr_seqno = atomic_inc_return_unchecked(&seqno);
+       snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
+       printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+              pfx_seq, generic->header.source_id);
+@@ -544,7 +544,7 @@ static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+               cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+               if (memcmp(estatus, cache_estatus, len))
+                       continue;
+-              atomic_inc(&cache->count);
++              atomic_inc_unchecked(&cache->count);
+               now = sched_clock();
+               if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
+                       cached = 1;
+@@ -578,7 +578,7 @@ static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
+       cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+       memcpy(cache_estatus, estatus, len);
+       cache->estatus_len = len;
+-      atomic_set(&cache->count, 0);
++      atomic_set_unchecked(&cache->count, 0);
+       cache->generic = generic;
+       cache->time_in = sched_clock();
+       return cache;
+@@ -628,7 +628,7 @@ static void ghes_estatus_cache_add(
+                       slot_cache = cache;
+                       break;
+               }
+-              count = atomic_read(&cache->count);
++              count = atomic_read_unchecked(&cache->count);
+               period = duration;
+               do_div(period, (count + 1));
+               if (period > max_period) {
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index ab23479..9aa32bf 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -75,7 +75,7 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ extern struct proc_dir_entry *acpi_lock_battery_dir(void);
+-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
++extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
+ enum acpi_battery_files {
+       info_tag = 0,
+diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
+index 75f128e..0fbae68 100644
+--- a/drivers/acpi/bgrt.c
++++ b/drivers/acpi/bgrt.c
+@@ -17,40 +17,40 @@
+ static struct kobject *bgrt_kobj;
+-static ssize_t show_version(struct device *dev,
+-                          struct device_attribute *attr, char *buf)
++static ssize_t show_version(struct kobject *kobj,
++                          struct kobj_attribute *attr, char *buf)
+ {
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version);
+ }
+-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
++static KOBJECT_ATTR(version, S_IRUGO, show_version, NULL);
+-static ssize_t show_status(struct device *dev,
+-                         struct device_attribute *attr, char *buf)
++static ssize_t show_status(struct kobject *kobj,
++                         struct kobj_attribute *attr, char *buf)
+ {
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status);
+ }
+-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
++static KOBJECT_ATTR(status, S_IRUGO, show_status, NULL);
+-static ssize_t show_type(struct device *dev,
+-                       struct device_attribute *attr, char *buf)
++static ssize_t show_type(struct kobject *kobj,
++                       struct kobj_attribute *attr, char *buf)
+ {
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type);
+ }
+-static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
++static KOBJECT_ATTR(type, S_IRUGO, show_type, NULL);
+-static ssize_t show_xoffset(struct device *dev,
+-                          struct device_attribute *attr, char *buf)
++static ssize_t show_xoffset(struct kobject *kobj,
++                          struct kobj_attribute *attr, char *buf)
+ {
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x);
+ }
+-static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
++static KOBJECT_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
+-static ssize_t show_yoffset(struct device *dev,
+-                          struct device_attribute *attr, char *buf)
++static ssize_t show_yoffset(struct kobject *kobj,
++                          struct kobj_attribute *attr, char *buf)
+ {
+       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y);
+ }
+-static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
++static KOBJECT_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
+ static ssize_t image_read(struct file *file, struct kobject *kobj,
+              struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
+       if (!bgrt_image)
+               return -ENODEV;
+-      bin_attr_image.private = bgrt_image;
+-      bin_attr_image.size = bgrt_image_size;
++      pax_open_kernel();
++      const_cast(bin_attr_image.private) = bgrt_image;
++      const_cast(bin_attr_image.size) = bgrt_image_size;
++      pax_close_kernel();
+       bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
+       if (!bgrt_kobj)
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index bdc67ba..a82756b 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -47,13 +47,13 @@ struct acpi_blacklist_item {
+       u32 is_critical_error;
+ };
+-static struct dmi_system_id acpi_rev_dmi_table[] __initdata;
++static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
+ /*
+  * POLICY: If *anything* doesn't work, put it on the blacklist.
+  *       If they are critical errors, mark it critical, and abort driver load.
+  */
+-static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
++static const struct acpi_blacklist_item acpi_blacklist[] __initconst = {
+       /* Compaq Presario 1700 */
+       {"PTLTD ", "  DSDT  ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
+        "Multiple problems", 1},
+@@ -144,7 +144,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
+ }
+ #endif
+-static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
++static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = {
+ #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
+       /*
+        * DELL XPS 13 (2015) switches sound between HDA and I2S
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 85b7d07..cfc2a30 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -66,7 +66,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
+ }
+ #endif
+-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
++static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
+       /*
+        * Invoke DSDT corruption work-around on all Toshiba Satellite.
+        * https://bugzilla.kernel.org/show_bug.cgi?id=14679
+@@ -82,7 +82,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+       {}
+ };
+ #else
+-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
++static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
+       {}
+ };
+ #endif
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index 31abb0b..462db58 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -477,7 +477,7 @@ static int acpi_button_remove(struct acpi_device *device)
+       return 0;
+ }
+-static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
++static int param_set_lid_init_state(const char *val, const struct kernel_param *kp)
+ {
+       int result = 0;
+@@ -495,7 +495,7 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
+       return result;
+ }
+-static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
++static int param_get_lid_init_state(char *buffer, const struct kernel_param *kp)
+ {
+       switch (lid_init_state) {
+       case ACPI_BUTTON_LID_INIT_OPEN:
+diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
+index c68e724..e863008 100644
+--- a/drivers/acpi/custom_method.c
++++ b/drivers/acpi/custom_method.c
+@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+       struct acpi_table_header table;
+       acpi_status status;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      return -EPERM;
++#endif
++
+       if (!(*ppos)) {
+               /* parse the table header to get the table length */
+               if (count <= sizeof(struct acpi_table_header))
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 993fd31..cc15d14 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -1026,6 +1026,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
+ #endif /* CONFIG_PM_SLEEP */
++static void acpi_dev_pm_detach(struct device *dev, bool power_off);
++
+ static struct dev_pm_domain acpi_general_pm_domain = {
+       .ops = {
+               .runtime_suspend = acpi_subsys_runtime_suspend,
+@@ -1042,6 +1044,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
+               .restore_early = acpi_subsys_resume_early,
+ #endif
+       },
++      .detach = acpi_dev_pm_detach
+ };
+ /**
+@@ -1119,7 +1122,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+               acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+       }
+-      dev->pm_domain->detach = acpi_dev_pm_detach;
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index e7bd57c..e26a064 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1559,7 +1559,7 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
+       return 0;
+ }
+-static struct dmi_system_id ec_dmi_table[] __initdata = {
++static const struct dmi_system_id ec_dmi_table[] __initconst = {
+       {
+       ec_correct_ecdt, "MSI MS-171F", {
+       DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
+@@ -1619,7 +1619,7 @@ error:
+       return ret;
+ }
+-static int param_set_event_clearing(const char *val, struct kernel_param *kp)
++static int param_set_event_clearing(const char *val, const struct kernel_param *kp)
+ {
+       int result = 0;
+@@ -1637,7 +1637,7 @@ static int param_set_event_clearing(const char *val, struct kernel_param *kp)
+       return result;
+ }
+-static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
++static int param_get_event_clearing(char *buffer, const struct kernel_param *kp)
+ {
+       switch (ec_event_clearing) {
+       case ACPI_EC_EVT_TIMING_STATUS:
+diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
+index 849f9d2..c97dd81 100644
+--- a/drivers/acpi/osi.c
++++ b/drivers/acpi/osi.c
+@@ -318,7 +318,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
+  * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden
+  * by acpi_osi=!Linux/acpi_osi=!Darwin command line options.
+  */
+-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
++static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
+       {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Fujitsu Siemens",
+diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
+index f62c68e..e90b61f 100644
+--- a/drivers/acpi/pci_slot.c
++++ b/drivers/acpi/pci_slot.c
+@@ -174,7 +174,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
++static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
+       /*
+        * Fujitsu Primequest machines will return 1023 to indicate an
+        * error if the _SUN method is evaluated on SxFy objects that
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index cea5252..c688abf 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -841,7 +841,7 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
+ {
+       int i, count = CPUIDLE_DRIVER_STATE_START;
+       struct acpi_processor_cx *cx;
+-      struct cpuidle_state *state;
++      cpuidle_state_no_const *state;
+       struct cpuidle_driver *drv = &acpi_idle_driver;
+       if (max_cstate == 0)
+@@ -1250,7 +1250,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
+ {
+       int i;
+       struct acpi_lpi_state *lpi;
+-      struct cpuidle_state *state;
++      cpuidle_state_no_const *state;
+       struct cpuidle_driver *drv = &acpi_idle_driver;
+       if (!pr->flags.has_lpi)
+diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
+index 7cfbda4..74f738c 100644
+--- a/drivers/acpi/processor_pdc.c
++++ b/drivers/acpi/processor_pdc.c
+@@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
+       return 0;
+ }
+-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
++static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
+       {
+       set_no_mwait, "Extensa 5220", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 2b38c1b..61fcc2b 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -171,7 +171,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
++static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
+       {
+       .callback = init_old_suspend_ordering,
+       .ident = "Abit KN9 (nForce4 variant)",
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index 358165e..5e37640 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -227,7 +227,7 @@ module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name,
+ module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
+ module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
+-static int param_set_trace_state(const char *val, struct kernel_param *kp)
++static int param_set_trace_state(const char *val, const struct kernel_param *kp)
+ {
+       acpi_status status;
+       const char *method = trace_method_name;
+@@ -263,7 +263,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
+       return 0;
+ }
+-static int param_get_trace_state(char *buffer, struct kernel_param *kp)
++static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
+ {
+       if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
+               return sprintf(buffer, "disable");
+@@ -292,7 +292,7 @@ MODULE_PARM_DESC(aml_debug_output,
+                "To enable/disable the ACPI Debug Object output.");
+ /* /sys/module/acpi/parameters/acpica_version */
+-static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
++static int param_get_acpica_version(char *buffer, const struct kernel_param *kp)
+ {
+       int result;
+@@ -484,11 +484,11 @@ static u32 num_counters;
+ static struct attribute **all_attrs;
+ static u32 acpi_gpe_count;
+-static struct attribute_group interrupt_stats_attr_group = {
++static attribute_group_no_const interrupt_stats_attr_group = {
+       .name = "interrupts",
+ };
+-static struct kobj_attribute *counter_attrs;
++static kobj_attribute_no_const *counter_attrs;
+ static void delete_gpe_attr_array(void)
+ {
+@@ -774,13 +774,13 @@ static void __exit interrupt_stats_exit(void)
+ }
+ static ssize_t
+-acpi_show_profile(struct device *dev, struct device_attribute *attr,
++acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
+                 char *buf)
+ {
+       return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
+ }
+-static const struct device_attribute pm_profile_attr =
++static const struct kobj_attribute pm_profile_attr =
+       __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
+ static ssize_t hotplug_enabled_show(struct kobject *kobj,
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index f4ebe39..f937534 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -1208,7 +1208,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
+       return 0;
+ }
+-static struct dmi_system_id thermal_dmi_table[] __initdata = {
++static const struct dmi_system_id thermal_dmi_table[] __initconst = {
+       /*
+        * Award BIOS on this AOpen makes thermal control almost worthless.
+        * http://bugzilla.kernel.org/show_bug.cgi?id=8842
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index a6b36fc53..dc320a6 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -41,7 +41,6 @@ ACPI_MODULE_NAME("video");
+ void acpi_video_unregister_backlight(void);
+ static bool backlight_notifier_registered;
+-static struct notifier_block backlight_nb;
+ static struct work_struct backlight_notify_work;
+ static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
+@@ -319,6 +318,10 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
+       return NOTIFY_OK;
+ }
++static struct notifier_block backlight_nb = {
++      .notifier_call = acpi_video_backlight_notify,
++};
++
+ /*
+  * Determine which type of backlight interface to use on this system,
+  * First check cmdline, then dmi quirks, then do autodetect.
+@@ -349,8 +352,6 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
+                                   &video_caps, NULL);
+               INIT_WORK(&backlight_notify_work,
+                         acpi_video_backlight_notify_work);
+-              backlight_nb.notifier_call = acpi_video_backlight_notify;
+-              backlight_nb.priority = 0;
+               if (backlight_register_notifier(&backlight_nb) == 0)
+                       backlight_notifier_registered = true;
+               init_done = true;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 16288e7..91ab5f3 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -120,7 +120,7 @@ static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+ static int binder_stop_on_user_error;
+ static int binder_set_stop_on_user_error(const char *val,
+-                                       struct kernel_param *kp)
++                                       const struct kernel_param *kp)
+ {
+       int ret;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 223a770..295a507 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -105,7 +105,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
+ static void ata_dev_xfermask(struct ata_device *dev);
+ static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
+-atomic_t ata_print_id = ATOMIC_INIT(0);
++atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
+ struct ata_force_param {
+       const char      *name;
+@@ -4988,7 +4988,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+       struct ata_port *ap;
+       unsigned int tag;
+-      WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++      BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+       ap = qc->ap;
+       qc->flags = 0;
+@@ -5005,7 +5005,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+       struct ata_port *ap;
+       struct ata_link *link;
+-      WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++      BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+       WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+       ap = qc->ap;
+       link = qc->dev->link;
+@@ -6117,6 +6117,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+               return;
+       spin_lock(&lock);
++      pax_open_kernel();
+       for (cur = ops->inherits; cur; cur = cur->inherits) {
+               void **inherit = (void **)cur;
+@@ -6130,8 +6131,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+               if (IS_ERR(*pp))
+                       *pp = NULL;
+-      ops->inherits = NULL;
++      const_cast(ops->inherits) = NULL;
++      pax_close_kernel();
+       spin_unlock(&lock);
+ }
+@@ -6327,7 +6329,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+       /* give ports names and add SCSI hosts */
+       for (i = 0; i < host->n_ports; i++) {
+-              host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
++              host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
+               host->ports[i]->local_port_no = i + 1;
+       }
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e207b33..145ebf0 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4689,7 +4689,7 @@ int ata_sas_port_init(struct ata_port *ap)
+       if (rc)
+               return rc;
+-      ap->print_id = atomic_inc_return(&ata_print_id);
++      ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(ata_sas_port_init);
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index 3b301a4..ff15676 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -53,7 +53,7 @@ enum {
+       ATA_DNXFER_QUIET        = (1 << 31),
+ };
+-extern atomic_t ata_print_id;
++extern atomic_unchecked_t ata_print_id;
+ extern int atapi_passthru16;
+ extern int libata_fua;
+ extern int libata_noacpi;
+diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
+index b4d5477..9ec8e0b 100644
+--- a/drivers/ata/pata_arasan_cf.c
++++ b/drivers/ata/pata_arasan_cf.c
+@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
+       /* Handle platform specific quirks */
+       if (quirk) {
+               if (quirk & CF_BROKEN_PIO) {
+-                      ap->ops->set_piomode = NULL;
++                      pax_open_kernel();
++                      const_cast(ap->ops->set_piomode) = NULL;
++                      pax_close_kernel();
+                       ap->pio_mask = 0;
+               }
+               if (quirk & CF_BROKEN_MWDMA)
+diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
+index f9b983a..887b9d8 100644
+--- a/drivers/atm/adummy.c
++++ b/drivers/atm/adummy.c
+@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
+               vcc->pop(vcc, skb);
+       else
+               dev_kfree_skb_any(skb);
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       return 0;
+ }
+diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
+index f1a9198..f466a4a 100644
+--- a/drivers/atm/ambassador.c
++++ b/drivers/atm/ambassador.c
+@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
+   PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
+   
+   // VC layer stats
+-  atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++  atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+   
+   // free the descriptor
+   kfree (tx_descr);
+@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
+         dump_skb ("<<<", vc, skb);
+         
+         // VC layer stats
+-        atomic_inc(&atm_vcc->stats->rx);
++        atomic_inc_unchecked(&atm_vcc->stats->rx);
+         __net_timestamp(skb);
+         // end of our responsibility
+         atm_vcc->push (atm_vcc, skb);
+@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
+       } else {
+               PRINTK (KERN_INFO, "dropped over-size frame");
+       // should we count this?
+-      atomic_inc(&atm_vcc->stats->rx_drop);
++      atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+       }
+       
+     } else {
+@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
+   }
+   
+   if (check_area (skb->data, skb->len)) {
+-    atomic_inc(&atm_vcc->stats->tx_err);
++    atomic_inc_unchecked(&atm_vcc->stats->tx_err);
+     return -ENOMEM; // ?
+   }
+   
+diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
+index 480fa6f..947067c 100644
+--- a/drivers/atm/atmtcp.c
++++ b/drivers/atm/atmtcp.c
+@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+               if (vcc->pop) vcc->pop(vcc,skb);
+               else dev_kfree_skb(skb);
+               if (dev_data) return 0;
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -ENOLINK;
+       }
+       size = skb->len+sizeof(struct atmtcp_hdr);
+@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       if (!new_skb) {
+               if (vcc->pop) vcc->pop(vcc,skb);
+               else dev_kfree_skb(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -ENOBUFS;
+       }
+       hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       if (vcc->pop) vcc->pop(vcc,skb);
+       else dev_kfree_skb(skb);
+       out_vcc->push(out_vcc,new_skb);
+-      atomic_inc(&vcc->stats->tx);
+-      atomic_inc(&out_vcc->stats->rx);
++      atomic_inc_unchecked(&vcc->stats->tx);
++      atomic_inc_unchecked(&out_vcc->stats->rx);
+       return 0;
+ }
+@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       read_unlock(&vcc_sklist_lock);
+       if (!out_vcc) {
+               result = -EUNATCH;
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               goto done;
+       }
+       skb_pull(skb,sizeof(struct atmtcp_hdr));
+@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+       __net_timestamp(new_skb);
+       skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
+       out_vcc->push(out_vcc,new_skb);
+-      atomic_inc(&vcc->stats->tx);
+-      atomic_inc(&out_vcc->stats->rx);
++      atomic_inc_unchecked(&vcc->stats->tx);
++      atomic_inc_unchecked(&out_vcc->stats->rx);
+ done:
+       if (vcc->pop) vcc->pop(vcc,skb);
+       else dev_kfree_skb(skb);
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
+index 6339efd..2b441d5 100644
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
+               DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
+                   vcc->dev->number);
+               length = 0;
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+       }
+       else {
+               length = ATM_CELL_SIZE-1; /* no HEC */
+@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+                           size);
+               }
+               eff = length = 0;
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+       }
+       else {
+               size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
+@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+                           "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
+                           vcc->dev->number,vcc->vci,length,size << 2,descr);
+                       length = eff = 0;
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+               }
+       }
+       skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
+@@ -770,7 +770,7 @@ rx_dequeued++;
+                       vcc->push(vcc,skb);
+                       pushed++;
+               }
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+       }
+       wake_up(&eni_dev->rx_wait);
+ }
+@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
+                                DMA_TO_DEVICE);
+               if (vcc->pop) vcc->pop(vcc,skb);
+               else dev_kfree_skb_irq(skb);
+-              atomic_inc(&vcc->stats->tx);
++              atomic_inc_unchecked(&vcc->stats->tx);
+               wake_up(&eni_dev->tx_wait);
+ dma_complete++;
+       }
+diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
+index 85aaf22..8730d15 100644
+--- a/drivers/atm/firestream.c
++++ b/drivers/atm/firestream.c
+@@ -753,7 +753,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
+                               }
+                       }
+-                      atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++                      atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+                       fs_dprintk (FS_DEBUG_TXMEM, "i");
+                       fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
+@@ -820,7 +820,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
+ #endif
+                               skb_put (skb, qe->p1 & 0xffff); 
+                               ATM_SKB(skb)->vcc = atm_vcc;
+-                              atomic_inc(&atm_vcc->stats->rx);
++                              atomic_inc_unchecked(&atm_vcc->stats->rx);
+                               __net_timestamp(skb);
+                               fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
+                               atm_vcc->push (atm_vcc, skb);
+@@ -841,12 +841,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
+                               kfree (pe);
+                       }
+                       if (atm_vcc)
+-                              atomic_inc(&atm_vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+                       break;
+               case 0x1f: /*  Reassembly abort: no buffers. */
+                       /* Silently increment error counter. */
+                       if (atm_vcc)
+-                              atomic_inc(&atm_vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+                       break;
+               default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
+                       printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", 
+diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
+index 75dde90..4309ead 100644
+--- a/drivers/atm/fore200e.c
++++ b/drivers/atm/fore200e.c
+@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
+ #endif
+               /* check error condition */
+               if (*entry->status & STATUS_ERROR)
+-                  atomic_inc(&vcc->stats->tx_err);
++                  atomic_inc_unchecked(&vcc->stats->tx_err);
+               else
+-                  atomic_inc(&vcc->stats->tx);
++                  atomic_inc_unchecked(&vcc->stats->tx);
+           }
+       }
+@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
+     if (skb == NULL) {
+       DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return -ENOMEM;
+     } 
+@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
+       dev_kfree_skb_any(skb);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return -ENOMEM;
+     }
+     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+     vcc->push(vcc, skb);
+-    atomic_inc(&vcc->stats->rx);
++    atomic_inc_unchecked(&vcc->stats->rx);
+     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
+               DPRINTK(2, "damaged PDU on %d.%d.%d\n",
+                       fore200e->atm_dev->number,
+                       entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+           }
+       }
+@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
+               goto retry_here;
+           }
+-          atomic_inc(&vcc->stats->tx_err);
++          atomic_inc_unchecked(&vcc->stats->tx_err);
+           fore200e->tx_sat++;
+           DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
+diff --git a/drivers/atm/he.c b/drivers/atm/he.c
+index 0f5cb37..c8bcdef 100644
+--- a/drivers/atm/he.c
++++ b/drivers/atm/he.c
+@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+               if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+                       hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       goto return_host_buffers;
+               }
+@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+                               RBRQ_LEN_ERR(he_dev->rbrq_head)
+                                                       ? "LEN_ERR" : "",
+                                                       vcc->vpi, vcc->vci);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto return_host_buffers;
+               }
+@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+               vcc->push(vcc, skb);
+               spin_lock(&he_dev->global_lock);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+ return_host_buffers:
+               ++pdus_assembled;
+@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
+                                       tpd->vcc->pop(tpd->vcc, tpd->skb);
+                               else
+                                       dev_kfree_skb_any(tpd->skb);
+-                              atomic_inc(&tpd->vcc->stats->tx_err);
++                              atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
+                       }
+                       dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+                       return;
+@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                       vcc->pop(vcc, skb);
+               else
+                       dev_kfree_skb_any(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -EINVAL;
+       }
+@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                       vcc->pop(vcc, skb);
+               else
+                       dev_kfree_skb_any(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -EINVAL;
+       }
+ #endif
+@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                       vcc->pop(vcc, skb);
+               else
+                       dev_kfree_skb_any(skb);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               spin_unlock_irqrestore(&he_dev->global_lock, flags);
+               return -ENOMEM;
+       }
+@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+                                       vcc->pop(vcc, skb);
+                               else
+                                       dev_kfree_skb_any(skb);
+-                              atomic_inc(&vcc->stats->tx_err);
++                              atomic_inc_unchecked(&vcc->stats->tx_err);
+                               spin_unlock_irqrestore(&he_dev->global_lock, flags);
+                               return -ENOMEM;
+                       }
+@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       __enqueue_tpd(he_dev, tpd, cid);
+       spin_unlock_irqrestore(&he_dev->global_lock, flags);
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       return 0;
+ }
+diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
+index 5fc81e2..42907ae 100644
+--- a/drivers/atm/horizon.c
++++ b/drivers/atm/horizon.c
+@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
+       {
+         struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
+         // VC layer stats
+-        atomic_inc(&vcc->stats->rx);
++        atomic_inc_unchecked(&vcc->stats->rx);
+         __net_timestamp(skb);
+         // end of our responsibility
+         vcc->push (vcc, skb);
+@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
+       dev->tx_iovec = NULL;
+       
+       // VC layer stats
+-      atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++      atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+       
+       // free the skb
+       hrz_kfree_skb (skb);
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 074616b..d6b3d5f 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
+               else
+                       dev_kfree_skb(skb);
+-              atomic_inc(&vcc->stats->tx);
++              atomic_inc_unchecked(&vcc->stats->tx);
+       }
+       atomic_dec(&scq->used);
+@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                       if ((sb = dev_alloc_skb(64)) == NULL) {
+                               printk("%s: Can't allocate buffers for aal0.\n",
+                                      card->name);
+-                              atomic_add(i, &vcc->stats->rx_drop);
++                              atomic_add_unchecked(i, &vcc->stats->rx_drop);
+                               break;
+                       }
+                       if (!atm_charge(vcc, sb->truesize)) {
+                               RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
+                                        card->name);
+-                              atomic_add(i - 1, &vcc->stats->rx_drop);
++                              atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
+                               dev_kfree_skb(sb);
+                               break;
+                       }
+@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                       ATM_SKB(sb)->vcc = vcc;
+                       __net_timestamp(sb);
+                       vcc->push(vcc, sb);
+-                      atomic_inc(&vcc->stats->rx);
++                      atomic_inc_unchecked(&vcc->stats->rx);
+                       cell += ATM_CELL_PAYLOAD;
+               }
+@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                                "(CDC: %08x)\n",
+                                card->name, len, rpp->len, readl(SAR_REG_CDC));
+                       recycle_rx_pool_skb(card, rpp);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       return;
+               }
+               if (stat & SAR_RSQE_CRC) {
+                       RXPRINTK("%s: AAL5 CRC error.\n", card->name);
+                       recycle_rx_pool_skb(card, rpp);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       return;
+               }
+               if (skb_queue_len(&rpp->queue) > 1) {
+@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                               RXPRINTK("%s: Can't alloc RX skb.\n",
+                                        card->name);
+                               recycle_rx_pool_skb(card, rpp);
+-                              atomic_inc(&vcc->stats->rx_err);
++                              atomic_inc_unchecked(&vcc->stats->rx_err);
+                               return;
+                       }
+                       if (!atm_charge(vcc, skb->truesize)) {
+@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+                       __net_timestamp(skb);
+                       vcc->push(vcc, skb);
+-                      atomic_inc(&vcc->stats->rx);
++                      atomic_inc_unchecked(&vcc->stats->rx);
+                       return;
+               }
+@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+               __net_timestamp(skb);
+               vcc->push(vcc, skb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+               if (skb->truesize > SAR_FB_SIZE_3)
+                       add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
+               if (vcc->qos.aal != ATM_AAL0) {
+                       RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
+                               card->name, vpi, vci);
+-                      atomic_inc(&vcc->stats->rx_drop);
++                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       goto drop;
+               }
+       
+               if ((sb = dev_alloc_skb(64)) == NULL) {
+                       printk("%s: Can't allocate buffers for AAL0.\n",
+                              card->name);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto drop;
+               }
+@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
+               ATM_SKB(sb)->vcc = vcc;
+               __net_timestamp(sb);
+               vcc->push(vcc, sb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+ drop:
+               skb_pull(queue, 64);
+@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+       if (vc == NULL) {
+               printk("%s: NULL connection in send().\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+       if (!test_bit(VCF_TX, &vc->flags)) {
+               printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+               break;
+       default:
+               printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+       if (skb_shinfo(skb)->nr_frags != 0) {
+               printk("%s: No scatter-gather yet.\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+       err = queue_skb(card, vc, skb, oam);
+       if (err) {
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb(skb);
+               return err;
+       }
+@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
+       skb = dev_alloc_skb(64);
+       if (!skb) {
+               printk("%s: Out of memory in send_oam().\n", card->name);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               return -ENOMEM;
+       }
+       atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 809dd1e..ee10755 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
+       status = (u_short) (buf_desc_ptr->desc_mode);  
+       if (status & (RX_CER | RX_PTE | RX_OFL))  
+       {  
+-                atomic_inc(&vcc->stats->rx_err);
++                atomic_inc_unchecked(&vcc->stats->rx_err);
+               IF_ERR(printk("IA: bad packet, dropping it");)  
+                 if (status & RX_CER) { 
+                     IF_ERR(printk(" cause: packet CRC error\n");)
+@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
+       len = dma_addr - buf_addr;  
+         if (len > iadev->rx_buf_sz) {
+            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
+-           atomic_inc(&vcc->stats->rx_err);
++           atomic_inc_unchecked(&vcc->stats->rx_err);
+          goto out_free_desc;
+         }
+                 
+@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+           ia_vcc = INPH_IA_VCC(vcc);
+           if (ia_vcc == NULL)
+           {
+-             atomic_inc(&vcc->stats->rx_err);
++             atomic_inc_unchecked(&vcc->stats->rx_err);
+              atm_return(vcc, skb->truesize);
+              dev_kfree_skb_any(skb);
+              goto INCR_DLE;
+@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+           if ((length > iadev->rx_buf_sz) || (length > 
+                               (skb->len - sizeof(struct cpcs_trailer))))
+           {
+-             atomic_inc(&vcc->stats->rx_err);
++             atomic_inc_unchecked(&vcc->stats->rx_err);
+              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
+                                                             length, skb->len);)
+              atm_return(vcc, skb->truesize);
+@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+         IF_RX(printk("rx_dle_intr: skb push");)  
+         vcc->push(vcc,skb);  
+-        atomic_inc(&vcc->stats->rx);
++        atomic_inc_unchecked(&vcc->stats->rx);
+           iadev->rx_pkt_cnt++;
+       }  
+ INCR_DLE:
+@@ -2829,15 +2829,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
+          {
+              struct k_sonet_stats *stats;
+              stats = &PRIV(_ia_dev[board])->sonet_stats;
+-             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
+-             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
+-             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
+-             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
+-             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
+-             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
+-             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
+-             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
+-             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
++             printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
++             printk("line_bip   : %d\n", atomic_read_unchecked(&stats->line_bip));
++             printk("path_bip   : %d\n", atomic_read_unchecked(&stats->path_bip));
++             printk("line_febe  : %d\n", atomic_read_unchecked(&stats->line_febe));
++             printk("path_febe  : %d\n", atomic_read_unchecked(&stats->path_febe));
++             printk("corr_hcs   : %d\n", atomic_read_unchecked(&stats->corr_hcs));
++             printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
++             printk("tx_cells   : %d\n", atomic_read_unchecked(&stats->tx_cells));
++             printk("rx_cells   : %d\n", atomic_read_unchecked(&stats->rx_cells));
+          }
+             ia_cmds.status = 0;
+             break;
+@@ -2942,7 +2942,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
+       if ((desc == 0) || (desc > iadev->num_tx_desc))  
+       {  
+               IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
+-                atomic_inc(&vcc->stats->tx);
++                atomic_inc_unchecked(&vcc->stats->tx);
+               if (vcc->pop)   
+                   vcc->pop(vcc, skb);   
+               else  
+@@ -3047,14 +3047,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
+         ATM_DESC(skb) = vcc->vci;
+         skb_queue_tail(&iadev->tx_dma_q, skb);
+-        atomic_inc(&vcc->stats->tx);
++        atomic_inc_unchecked(&vcc->stats->tx);
+         iadev->tx_pkt_cnt++;
+       /* Increment transaction counter */  
+       writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
+         
+ #if 0        
+         /* add flow control logic */ 
+-        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
++        if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
+           if (iavcc->vc_desc_cnt > 10) {
+              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
+             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
+diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
+index ce43ae3..969de38 100644
+--- a/drivers/atm/lanai.c
++++ b/drivers/atm/lanai.c
+@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
+       vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
+       lanai_endtx(lanai, lvcc);
+       lanai_free_skb(lvcc->tx.atmvcc, skb);
+-      atomic_inc(&lvcc->tx.atmvcc->stats->tx);
++      atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
+ }
+ /* Try to fill the buffer - don't call unless there is backlog */
+@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
+       ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
+       __net_timestamp(skb);
+       lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
+-      atomic_inc(&lvcc->rx.atmvcc->stats->rx);
++      atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
+     out:
+       lvcc->rx.buf.ptr = end;
+       cardvcc_write(lvcc, endptr, vcc_rxreadptr);
+@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+               DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
+                   "vcc %d\n", lanai->number, (unsigned int) s, vci);
+               lanai->stats.service_rxnotaal5++;
+-              atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++              atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+               return 0;
+       }
+       if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
+@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+               int bytes;
+               read_unlock(&vcc_sklist_lock);
+               DPRINTK("got trashed rx pdu on vci %d\n", vci);
+-              atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++              atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+               lvcc->stats.x.aal5.service_trash++;
+               bytes = (SERVICE_GET_END(s) * 16) -
+                   (((unsigned long) lvcc->rx.buf.ptr) -
+@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+       }
+       if (s & SERVICE_STREAM) {
+               read_unlock(&vcc_sklist_lock);
+-              atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++              atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+               lvcc->stats.x.aal5.service_stream++;
+               printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
+                   "PDU on VCI %d!\n", lanai->number, vci);
+@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+               return 0;
+       }
+       DPRINTK("got rx crc error on vci %d\n", vci);
+-      atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++      atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+       lvcc->stats.x.aal5.service_rxcrc++;
+       lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
+       cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
+diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
+index 700ed15..a3a8a73 100644
+--- a/drivers/atm/nicstar.c
++++ b/drivers/atm/nicstar.c
+@@ -1633,7 +1633,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       if ((vc = (vc_map *) vcc->dev_data) == NULL) {
+               printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
+                      card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       if (!vc->tx) {
+               printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
+                      card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+@@ -1649,14 +1649,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+               printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
+                      card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+       if (skb_shinfo(skb)->nr_frags != 0) {
+               printk("nicstar%d: No scatter-gather yet.\n", card->index);
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+@@ -1704,11 +1704,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+       }
+       if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+-              atomic_inc(&vcc->stats->tx_err);
++              atomic_inc_unchecked(&vcc->stats->tx_err);
+               dev_kfree_skb_any(skb);
+               return -EIO;
+       }
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       return 0;
+ }
+@@ -2025,14 +2025,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               printk
+                                   ("nicstar%d: Can't allocate buffers for aal0.\n",
+                                    card->index);
+-                              atomic_add(i, &vcc->stats->rx_drop);
++                              atomic_add_unchecked(i, &vcc->stats->rx_drop);
+                               break;
+                       }
+                       if (!atm_charge(vcc, sb->truesize)) {
+                               RXPRINTK
+                                   ("nicstar%d: atm_charge() dropped aal0 packets.\n",
+                                    card->index);
+-                              atomic_add(i - 1, &vcc->stats->rx_drop);        /* already increased by 1 */
++                              atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);      /* already increased by 1 */
+                               dev_kfree_skb_any(sb);
+                               break;
+                       }
+@@ -2047,7 +2047,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       ATM_SKB(sb)->vcc = vcc;
+                       __net_timestamp(sb);
+                       vcc->push(vcc, sb);
+-                      atomic_inc(&vcc->stats->rx);
++                      atomic_inc_unchecked(&vcc->stats->rx);
+                       cell += ATM_CELL_PAYLOAD;
+               }
+@@ -2064,7 +2064,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       if (iovb == NULL) {
+                               printk("nicstar%d: Out of iovec buffers.\n",
+                                      card->index);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                               recycle_rx_buf(card, skb);
+                               return;
+                       }
+@@ -2088,7 +2088,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                  small or large buffer itself. */
+       } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
+               printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+               recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+                                     NS_MAX_IOVECS);
+               NS_PRV_IOVCNT(iovb) = 0;
+@@ -2108,7 +2108,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                           ("nicstar%d: Expected a small buffer, and this is not one.\n",
+                            card->index);
+                       which_list(card, skb);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       recycle_rx_buf(card, skb);
+                       vc->rx_iov = NULL;
+                       recycle_iov_buf(card, iovb);
+@@ -2121,7 +2121,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                           ("nicstar%d: Expected a large buffer, and this is not one.\n",
+                            card->index);
+                       which_list(card, skb);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+                                             NS_PRV_IOVCNT(iovb));
+                       vc->rx_iov = NULL;
+@@ -2144,7 +2144,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               printk(" - PDU size mismatch.\n");
+                       else
+                               printk(".\n");
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+                                             NS_PRV_IOVCNT(iovb));
+                       vc->rx_iov = NULL;
+@@ -2158,14 +2158,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       /* skb points to a small buffer */
+                       if (!atm_charge(vcc, skb->truesize)) {
+                               push_rxbufs(card, skb);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       } else {
+                               skb_put(skb, len);
+                               dequeue_sm_buf(card, skb);
+                               ATM_SKB(skb)->vcc = vcc;
+                               __net_timestamp(skb);
+                               vcc->push(vcc, skb);
+-                              atomic_inc(&vcc->stats->rx);
++                              atomic_inc_unchecked(&vcc->stats->rx);
+                       }
+               } else if (NS_PRV_IOVCNT(iovb) == 2) {  /* One small plus one large buffer */
+                       struct sk_buff *sb;
+@@ -2176,14 +2176,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                       if (len <= NS_SMBUFSIZE) {
+                               if (!atm_charge(vcc, sb->truesize)) {
+                                       push_rxbufs(card, sb);
+-                                      atomic_inc(&vcc->stats->rx_drop);
++                                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                               } else {
+                                       skb_put(sb, len);
+                                       dequeue_sm_buf(card, sb);
+                                       ATM_SKB(sb)->vcc = vcc;
+                                       __net_timestamp(sb);
+                                       vcc->push(vcc, sb);
+-                                      atomic_inc(&vcc->stats->rx);
++                                      atomic_inc_unchecked(&vcc->stats->rx);
+                               }
+                               push_rxbufs(card, skb);
+@@ -2192,7 +2192,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               if (!atm_charge(vcc, skb->truesize)) {
+                                       push_rxbufs(card, skb);
+-                                      atomic_inc(&vcc->stats->rx_drop);
++                                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                               } else {
+                                       dequeue_lg_buf(card, skb);
+                                       skb_push(skb, NS_SMBUFSIZE);
+@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                                       ATM_SKB(skb)->vcc = vcc;
+                                       __net_timestamp(skb);
+                                       vcc->push(vcc, skb);
+-                                      atomic_inc(&vcc->stats->rx);
++                                      atomic_inc_unchecked(&vcc->stats->rx);
+                               }
+                               push_rxbufs(card, sb);
+@@ -2223,7 +2223,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                                       printk
+                                           ("nicstar%d: Out of huge buffers.\n",
+                                            card->index);
+-                                      atomic_inc(&vcc->stats->rx_drop);
++                                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                                       recycle_iovec_rx_bufs(card,
+                                                             (struct iovec *)
+                                                             iovb->data,
+@@ -2274,7 +2274,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                                       card->hbpool.count++;
+                               } else
+                                       dev_kfree_skb_any(hb);
+-                              atomic_inc(&vcc->stats->rx_drop);
++                              atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       } else {
+                               /* Copy the small buffer to the huge buffer */
+                               sb = (struct sk_buff *)iov->iov_base;
+@@ -2308,7 +2308,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+                               ATM_SKB(hb)->vcc = vcc;
+                               __net_timestamp(hb);
+                               vcc->push(vcc, hb);
+-                              atomic_inc(&vcc->stats->rx);
++                              atomic_inc_unchecked(&vcc->stats->rx);
+                       }
+               }
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index 6ac2b2b..6373ebdc 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -849,7 +849,7 @@ static void solos_bh(unsigned long card_arg)
+                               }
+                               atm_charge(vcc, skb->truesize);
+                               vcc->push(vcc, skb);
+-                              atomic_inc(&vcc->stats->rx);
++                              atomic_inc_unchecked(&vcc->stats->rx);
+                               break;
+                       case PKT_STATUS:
+@@ -1130,7 +1130,7 @@ static uint32_t fpga_tx(struct solos_card *card)
+                       vcc = SKB_CB(oldskb)->vcc;
+                       if (vcc) {
+-                              atomic_inc(&vcc->stats->tx);
++                              atomic_inc_unchecked(&vcc->stats->tx);
+                               solos_pop(vcc, oldskb);
+                       } else {
+                               dev_kfree_skb_irq(oldskb);
+diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
+index 0215934..ce9f5b1 100644
+--- a/drivers/atm/suni.c
++++ b/drivers/atm/suni.c
+@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
+ #define ADD_LIMITED(s,v) \
+-    atomic_add((v),&stats->s); \
+-    if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
++    atomic_add_unchecked((v),&stats->s); \
++    if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
+ static void suni_hz(unsigned long from_timer)
+diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
+index 5120a96..e2572bd 100644
+--- a/drivers/atm/uPD98402.c
++++ b/drivers/atm/uPD98402.c
+@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
+       struct sonet_stats tmp;
+       int error = 0;
+-      atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
++      atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+       sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
+       if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
+       if (zero && !error) {
+@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
+ #define ADD_LIMITED(s,v) \
+-    { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
+-    if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
+-      atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
++    { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
++    if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+ static void stat_event(struct atm_dev *dev)
+@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
+               if (reason & uPD98402_INT_PFM) stat_event(dev);
+               if (reason & uPD98402_INT_PCO) {
+                       (void) GET(PCOCR); /* clear interrupt cause */
+-                      atomic_add(GET(HECCT),
++                      atomic_add_unchecked(GET(HECCT),
+                           &PRIV(dev)->sonet_stats.uncorr_hcs);
+               }
+               if ((reason & uPD98402_INT_RFO) && 
+@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
+       PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
+         uPD98402_INT_LOS),PIMR); /* enable them */
+       (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
+-      atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+-      atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
+-      atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
++      atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
+       return 0;
+ }
+diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
+index cecfb94..87009ec 100644
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
+               }
+               if (!size) {
+                       dev_kfree_skb_irq(skb);
+-                      if (vcc) atomic_inc(&vcc->stats->rx_err);
++                      if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
+                       continue;
+               }
+               if (!atm_charge(vcc,skb->truesize)) {
+@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
+               skb->len = size;
+               ATM_SKB(skb)->vcc = vcc;
+               vcc->push(vcc,skb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+       }
+       zout(pos & 0xffff,MTA(mbx));
+ #if 0 /* probably a stupid idea */
+@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
+                       skb_queue_head(&zatm_vcc->backlog,skb);
+                       break;
+               }
+-      atomic_inc(&vcc->stats->tx);
++      atomic_inc_unchecked(&vcc->stats->tx);
+       wake_up(&zatm_vcc->tx_wait);
+ }
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 6470eb8..3a7d92b 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -1136,7 +1136,7 @@ int subsys_interface_register(struct subsys_interface *sif)
+               return -EINVAL;
+       mutex_lock(&subsys->p->mutex);
+-      list_add_tail(&sif->node, &subsys->p->interfaces);
++      pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
+       if (sif->add_dev) {
+               subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+               while ((dev = subsys_dev_iter_next(&iter)))
+@@ -1161,7 +1161,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
+       subsys = sif->subsys;
+       mutex_lock(&subsys->p->mutex);
+-      list_del_init(&sif->node);
++      pax_list_del_init((struct list_head *)&sif->node);
+       if (sif->remove_dev) {
+               subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+               while ((dev = subsys_dev_iter_next(&iter)))
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 8fc654f..36e28e9 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -476,7 +476,9 @@ static int remove_nodes(struct device *dev,
+ static int release_nodes(struct device *dev, struct list_head *first,
+                        struct list_head *end, unsigned long flags)
+-      __releases(&dev->devres_lock)
++      __releases(&dev->devres_lock);
++static int release_nodes(struct device *dev, struct list_head *first,
++                       struct list_head *end, unsigned long flags)
+ {
+       LIST_HEAD(todo);
+       int cnt;
+diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
+index 44a74cf..a5dd826 100644
+--- a/drivers/base/devtmpfs.c
++++ b/drivers/base/devtmpfs.c
+@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
+       if (!thread)
+               return 0;
+-      err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
++      err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
+       if (err)
+               printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+       else
+@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
+       *err = sys_unshare(CLONE_NEWNS);
+       if (*err)
+               goto out;
+-      *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
++      *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
+       if (*err)
+               goto out;
+-      sys_chdir("/.."); /* will traverse into overmounted root */
+-      sys_chroot(".");
++      sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
++      sys_chroot((char __force_user *)".");
+       complete(&setup_done);
+       while (1) {
+               spin_lock(&req_lock);
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 5548f96..3cbdfc1 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -638,7 +638,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
+ struct node_attr {
+       struct device_attribute attr;
+       enum node_states state;
+-};
++} __do_const;
+ static ssize_t show_node_state(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
+index 279e539..4c9d7fb 100644
+--- a/drivers/base/platform-msi.c
++++ b/drivers/base/platform-msi.c
+@@ -24,6 +24,8 @@
+ #include <linux/msi.h>
+ #include <linux/slab.h>
++#include <asm/pgtable.h>
++
+ #define DEV_ID_SHIFT  21
+ #define MAX_DEV_MSIS  (1 << (32 - DEV_ID_SHIFT))
+@@ -81,10 +83,12 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+       BUG_ON(!ops);
++      pax_open_kernel();
+       if (ops->msi_init == NULL)
+-              ops->msi_init = platform_msi_init;
++              const_cast(ops->msi_init) = platform_msi_init;
+       if (ops->set_desc == NULL)
+-              ops->set_desc = platform_msi_set_desc;
++              const_cast(ops->set_desc) = platform_msi_set_desc;
++      pax_close_kernel();
+ }
+ static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+@@ -102,16 +106,18 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+       struct irq_chip *chip = info->chip;
+       BUG_ON(!chip);
++      pax_open_kernel();
+       if (!chip->irq_mask)
+-              chip->irq_mask = irq_chip_mask_parent;
++              const_cast(chip->irq_mask) = irq_chip_mask_parent;
+       if (!chip->irq_unmask)
+-              chip->irq_unmask = irq_chip_unmask_parent;
++              const_cast(chip->irq_unmask) = irq_chip_unmask_parent;
+       if (!chip->irq_eoi)
+-              chip->irq_eoi = irq_chip_eoi_parent;
++              const_cast(chip->irq_eoi) = irq_chip_eoi_parent;
+       if (!chip->irq_set_affinity)
+-              chip->irq_set_affinity = msi_domain_set_affinity;
++              const_cast(chip->irq_set_affinity) = msi_domain_set_affinity;
+       if (!chip->irq_write_msi_msg)
+-              chip->irq_write_msi_msg = platform_msi_write_msg;
++              const_cast(chip->irq_write_msi_msg) = platform_msi_write_msg;
++      pax_close_kernel();
+ }
+ static void platform_msi_free_descs(struct device *dev, int base, int nvec)
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index a1f2aff..58bf1bc 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1621,8 +1621,10 @@ int genpd_dev_pm_attach(struct device *dev)
+               goto out;
+       }
+-      dev->pm_domain->detach = genpd_dev_pm_detach;
+-      dev->pm_domain->sync = genpd_dev_pm_sync;
++      pax_open_kernel();
++      const_cast(dev->pm_domain->detach) = genpd_dev_pm_detach;
++      const_cast(dev->pm_domain->sync) = genpd_dev_pm_sync;
++      pax_close_kernel();
+       mutex_lock(&pd->lock);
+       ret = genpd_poweron(pd, 0);
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 82a081e..b13ec3b 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -16,35 +16,32 @@
+ typedef int (*pm_callback_t)(struct device *);
+-static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
+-{
+-      pm_callback_t cb;
+-      const struct dev_pm_ops *ops;
+-
+-      if (dev->pm_domain)
+-              ops = &dev->pm_domain->ops;
+-      else if (dev->type && dev->type->pm)
+-              ops = dev->type->pm;
+-      else if (dev->class && dev->class->pm)
+-              ops = dev->class->pm;
+-      else if (dev->bus && dev->bus->pm)
+-              ops = dev->bus->pm;
+-      else
+-              ops = NULL;
+-
+-      if (ops)
+-              cb = *(pm_callback_t *)((void *)ops + cb_offset);
+-      else
+-              cb = NULL;
+-
+-      if (!cb && dev->driver && dev->driver->pm)
+-              cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+-
+-      return cb;
+-}
+-
+-#define RPM_GET_CALLBACK(dev, callback) \
+-              __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
++#define RPM_GET_CALLBACK(dev, callback)                       \
++({                                                    \
++      pm_callback_t cb;                               \
++      const struct dev_pm_ops *ops;                   \
++                                                      \
++      if (dev->pm_domain)                             \
++              ops = &dev->pm_domain->ops;             \
++      else if (dev->type && dev->type->pm)            \
++              ops = dev->type->pm;                    \
++      else if (dev->class && dev->class->pm)          \
++              ops = dev->class->pm;                   \
++      else if (dev->bus && dev->bus->pm)              \
++              ops = dev->bus->pm;                     \
++      else                                            \
++              ops = NULL;                             \
++                                                      \
++      if (ops)                                        \
++              cb = ops->callback;                     \
++      else                                            \
++              cb = NULL;                              \
++                                                      \
++      if (!cb && dev->driver && dev->driver->pm)      \
++              cb = dev->driver->pm->callback;         \
++                                                      \
++      cb;                                             \
++})
+ static int rpm_resume(struct device *dev, int rpmflags);
+ static int rpm_suspend(struct device *dev, int rpmflags);
+@@ -263,8 +260,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
+  * @cb: Runtime PM callback to run.
+  * @dev: Device to run the callback for.
+  */
++static int __rpm_callback(int (*cb)(struct device *), struct device *dev) __must_hold(&dev->power.lock);
+ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+-      __releases(&dev->power.lock) __acquires(&dev->power.lock)
+ {
+       int retval;
+@@ -412,8 +409,8 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+  *
+  * This function must be called under dev->power.lock with interrupts disabled.
+  */
++static int rpm_suspend(struct device *dev, int rpmflags) __must_hold(&dev->power.lock);
+ static int rpm_suspend(struct device *dev, int rpmflags)
+-      __releases(&dev->power.lock) __acquires(&dev->power.lock)
+ {
+       int (*callback)(struct device *);
+       struct device *parent = NULL;
+@@ -594,8 +591,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+  *
+  * This function must be called under dev->power.lock with interrupts disabled.
+  */
++static int rpm_resume(struct device *dev, int rpmflags) __must_hold(&dev->power.lock);
+ static int rpm_resume(struct device *dev, int rpmflags)
+-      __releases(&dev->power.lock) __acquires(&dev->power.lock)
+ {
+       int (*callback)(struct device *);
+       struct device *parent = NULL;
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index a7b4679..d302490 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
+                       return -EIO;
+               }
+       }
+-      return sprintf(buf, p);
++      return sprintf(buf, "%s", p);
+ }
+ static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index 62e4de2..38961cd 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -36,14 +36,14 @@ static bool pm_abort_suspend __read_mostly;
+  * They need to be modified together atomically, so it's better to use one
+  * atomic variable to hold them both.
+  */
+-static atomic_t combined_event_count = ATOMIC_INIT(0);
++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
+ #define IN_PROGRESS_BITS      (sizeof(int) * 4)
+ #define MAX_IN_PROGRESS               ((1 << IN_PROGRESS_BITS) - 1)
+ static void split_counters(unsigned int *cnt, unsigned int *inpr)
+ {
+-      unsigned int comb = atomic_read(&combined_event_count);
++      unsigned int comb = atomic_read_unchecked(&combined_event_count);
+       *cnt = (comb >> IN_PROGRESS_BITS);
+       *inpr = comb & MAX_IN_PROGRESS;
+@@ -538,7 +538,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
+               ws->start_prevent_time = ws->last_time;
+       /* Increment the counter of events in progress. */
+-      cec = atomic_inc_return(&combined_event_count);
++      cec = atomic_inc_return_unchecked(&combined_event_count);
+       trace_wakeup_source_activate(ws->name, cec);
+ }
+@@ -664,7 +664,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
+        * Increment the counter of registered wakeup events and decrement the
+        * couter of wakeup events in progress simultaneously.
+        */
+-      cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
++      cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
+       trace_wakeup_source_deactivate(ws->name, cec);
+       split_counters(&cnt, &inpr);
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 1ee3d40..a41544a 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -400,7 +400,7 @@ static const struct file_operations regmap_reg_ranges_fops = {
+ static int regmap_access_show(struct seq_file *s, void *ignored)
+ {
+       struct regmap *map = s->private;
+-      int i, reg_len;
++      unsigned int i, reg_len;
+       reg_len = regmap_calc_reg_len(map->max_register);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index e964d06..633487f 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -402,8 +402,8 @@ static void regmap_unlock_mutex(void *__map)
+       mutex_unlock(&map->mutex);
+ }
++static void regmap_lock_spinlock(void *__map) __acquires(&map->spinlock);
+ static void regmap_lock_spinlock(void *__map)
+-__acquires(&map->spinlock)
+ {
+       struct regmap *map = __map;
+       unsigned long flags;
+@@ -412,8 +412,8 @@ __acquires(&map->spinlock)
+       map->spinlock_flags = flags;
+ }
++static void regmap_unlock_spinlock(void *__map) __releases(&map->spinlock);
+ static void regmap_unlock_spinlock(void *__map)
+-__releases(&map->spinlock)
+ {
+       struct regmap *map = __map;
+       spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
+diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
+index 8d98a32..61d3165 100644
+--- a/drivers/base/syscore.c
++++ b/drivers/base/syscore.c
+@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
+ void register_syscore_ops(struct syscore_ops *ops)
+ {
+       mutex_lock(&syscore_ops_lock);
+-      list_add_tail(&ops->node, &syscore_ops_list);
++      pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
+       mutex_unlock(&syscore_ops_lock);
+ }
+ EXPORT_SYMBOL_GPL(register_syscore_ops);
+@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
+ void unregister_syscore_ops(struct syscore_ops *ops)
+ {
+       mutex_lock(&syscore_ops_lock);
+-      list_del(&ops->node);
++      pax_list_del((struct list_head *)&ops->node);
+       mutex_unlock(&syscore_ops_lock);
+ }
+ EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index db9d6bb..9c5dc78 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -3017,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
+       while (!list_empty(&h->reqQ)) {
+               c = list_entry(h->reqQ.next, CommandList_struct, list);
+               /* can't do anything if fifo is full */
+-              if ((h->access.fifo_full(h))) {
++              if ((h->access->fifo_full(h))) {
+                       dev_warn(&h->pdev->dev, "fifo full\n");
+                       break;
+               }
+@@ -3027,7 +3027,7 @@ static void start_io(ctlr_info_t *h)
+               h->Qdepth--;
+               /* Tell the controller execute command */
+-              h->access.submit_command(h, c);
++              h->access->submit_command(h, c);
+               /* Put job onto the completed Q */
+               addQ(&h->cmpQ, c);
+@@ -3453,17 +3453,17 @@ startio:
+ static inline unsigned long get_next_completion(ctlr_info_t *h)
+ {
+-      return h->access.command_completed(h);
++      return h->access->command_completed(h);
+ }
+ static inline int interrupt_pending(ctlr_info_t *h)
+ {
+-      return h->access.intr_pending(h);
++      return h->access->intr_pending(h);
+ }
+ static inline long interrupt_not_for_us(ctlr_info_t *h)
+ {
+-      return ((h->access.intr_pending(h) == 0) ||
++      return ((h->access->intr_pending(h) == 0) ||
+               (h->interrupts_enabled == 0));
+ }
+@@ -3496,7 +3496,7 @@ static inline u32 next_command(ctlr_info_t *h)
+       u32 a;
+       if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+-              return h->access.command_completed(h);
++              return h->access->command_completed(h);
+       if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+               a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+@@ -4053,7 +4053,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+               trans_support & CFGTBL_Trans_use_short_tags);
+       /* Change the access methods to the performant access methods */
+-      h->access = SA5_performant_access;
++      h->access = &SA5_performant_access;
+       h->transMethod = CFGTBL_Trans_Performant;
+       return;
+@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
+       if (prod_index < 0)
+               return -ENODEV;
+       h->product_name = products[prod_index].product_name;
+-      h->access = *(products[prod_index].access);
++      h->access = products[prod_index].access;
+       if (cciss_board_disabled(h)) {
+               dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+@@ -5058,7 +5058,7 @@ reinit_after_soft_reset:
+       }
+       /* make sure the board interrupts are off */
+-      h->access.set_intr_mask(h, CCISS_INTR_OFF);
++      h->access->set_intr_mask(h, CCISS_INTR_OFF);
+       rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
+       if (rc)
+               goto clean2;
+@@ -5108,7 +5108,7 @@ reinit_after_soft_reset:
+                * fake ones to scoop up any residual completions.
+                */
+               spin_lock_irqsave(&h->lock, flags);
+-              h->access.set_intr_mask(h, CCISS_INTR_OFF);
++              h->access->set_intr_mask(h, CCISS_INTR_OFF);
+               spin_unlock_irqrestore(&h->lock, flags);
+               free_irq(h->intr[h->intr_mode], h);
+               rc = cciss_request_irq(h, cciss_msix_discard_completions,
+@@ -5128,9 +5128,9 @@ reinit_after_soft_reset:
+               dev_info(&h->pdev->dev, "Board READY.\n");
+               dev_info(&h->pdev->dev,
+                       "Waiting for stale completions to drain.\n");
+-              h->access.set_intr_mask(h, CCISS_INTR_ON);
++              h->access->set_intr_mask(h, CCISS_INTR_ON);
+               msleep(10000);
+-              h->access.set_intr_mask(h, CCISS_INTR_OFF);
++              h->access->set_intr_mask(h, CCISS_INTR_OFF);
+               rc = controller_reset_failed(h->cfgtable);
+               if (rc)
+@@ -5153,7 +5153,7 @@ reinit_after_soft_reset:
+       cciss_scsi_setup(h);
+       /* Turn the interrupts on so we can service requests */
+-      h->access.set_intr_mask(h, CCISS_INTR_ON);
++      h->access->set_intr_mask(h, CCISS_INTR_ON);
+       /* Get the firmware version */
+       inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+@@ -5225,7 +5225,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
+       kfree(flush_buf);
+       if (return_code != IO_OK)
+               dev_warn(&h->pdev->dev, "Error flushing cache\n");
+-      h->access.set_intr_mask(h, CCISS_INTR_OFF);
++      h->access->set_intr_mask(h, CCISS_INTR_OFF);
+       free_irq(h->intr[h->intr_mode], h);
+ }
+diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
+index 7fda30e..2f27946 100644
+--- a/drivers/block/cciss.h
++++ b/drivers/block/cciss.h
+@@ -101,7 +101,7 @@ struct ctlr_info
+       /* information about each logical volume */
+       drive_info_struct *drv[CISS_MAX_LUN];
+-      struct access_method access;
++      struct access_method *access;
+       /* queue and queue Info */ 
+       struct list_head reqQ;
+@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
+ }
+ static struct access_method SA5_access = {
+-      SA5_submit_command,
+-      SA5_intr_mask,
+-      SA5_fifo_full,
+-      SA5_intr_pending,
+-      SA5_completed,
++      .submit_command = SA5_submit_command,
++      .set_intr_mask = SA5_intr_mask,
++      .fifo_full = SA5_fifo_full,
++      .intr_pending = SA5_intr_pending,
++      .command_completed = SA5_completed,
+ };
+ static struct access_method SA5B_access = {
+-        SA5_submit_command,
+-        SA5B_intr_mask,
+-        SA5_fifo_full,
+-        SA5B_intr_pending,
+-        SA5_completed,
++      .submit_command = SA5_submit_command,
++      .set_intr_mask = SA5B_intr_mask,
++      .fifo_full = SA5_fifo_full,
++      .intr_pending = SA5B_intr_pending,
++      .command_completed = SA5_completed,
+ };
+ static struct access_method SA5_performant_access = {
+-      SA5_submit_command,
+-      SA5_performant_intr_mask,
+-      SA5_fifo_full,
+-      SA5_performant_intr_pending,
+-      SA5_performant_completed,
++      .submit_command = SA5_submit_command,
++      .set_intr_mask = SA5_performant_intr_mask,
++      .fifo_full = SA5_fifo_full,
++      .intr_pending = SA5_performant_intr_pending,
++      .command_completed = SA5_performant_completed,
+ };
+ struct board_type {
+diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
+index ab62b81..8f38450 100644
+--- a/drivers/block/drbd/drbd_bitmap.c
++++ b/drivers/block/drbd/drbd_bitmap.c
+@@ -1034,7 +1034,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
+               submit_bio(bio);
+               /* this should not count as user activity and cause the
+                * resync to throttle -- see drbd_rs_should_slow_down(). */
+-              atomic_add(len >> 9, &device->rs_sect_ev);
++              atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
+       }
+ }
+diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
+index 4cb8f21..fc2c3e2 100644
+--- a/drivers/block/drbd/drbd_int.h
++++ b/drivers/block/drbd/drbd_int.h
+@@ -383,7 +383,7 @@ struct drbd_epoch {
+       struct drbd_connection *connection;
+       struct list_head list;
+       unsigned int barrier_nr;
+-      atomic_t epoch_size; /* increased on every request added. */
++      atomic_unchecked_t epoch_size; /* increased on every request added. */
+       atomic_t active;     /* increased on every req. added, and dec on every finished. */
+       unsigned long flags;
+ };
+@@ -595,8 +595,8 @@ struct drbd_md {
+       u32 flags;
+       u32 md_size_sect;
+-      s32 al_offset;  /* signed relative sector offset to activity log */
+-      s32 bm_offset;  /* signed relative sector offset to bitmap */
++      s32 al_offset __intentional_overflow(0);        /* signed relative sector offset to activity log */
++      s32 bm_offset __intentional_overflow(0);        /* signed relative sector offset to bitmap */
+       /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
+       s32 meta_dev_idx;
+@@ -960,7 +960,7 @@ struct drbd_device {
+       unsigned int al_tr_number;
+       int al_tr_cycle;
+       wait_queue_head_t seq_wait;
+-      atomic_t packet_seq;
++      atomic_unchecked_t packet_seq;
+       unsigned int peer_seq;
+       spinlock_t peer_seq_lock;
+       unsigned long comm_bm_set; /* communicated number of set bits. */
+@@ -969,8 +969,8 @@ struct drbd_device {
+       struct mutex own_state_mutex;
+       struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
+       char congestion_reason;  /* Why we where congested... */
+-      atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
+-      atomic_t rs_sect_ev; /* for submitted resync data rate, both */
++      atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
++      atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
+       int rs_last_sect_ev; /* counter to compare with */
+       int rs_last_events;  /* counter of read or write "events" (unit sectors)
+                             * on the lower level device when we last looked. */
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 100be55..eead333 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -1363,7 +1363,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
+       p->sector = sector;
+       p->block_id = block_id;
+       p->blksize = blksize;
+-      p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
++      p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
+       return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
+ }
+@@ -1695,7 +1695,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
+               return -EIO;
+       p->sector = cpu_to_be64(req->i.sector);
+       p->block_id = (unsigned long)req;
+-      p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
++      p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
+       dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
+       if (device->state.conn >= C_SYNC_SOURCE &&
+           device->state.conn <= C_PAUSED_SYNC_T)
+@@ -1984,8 +1984,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
+       atomic_set(&device->unacked_cnt, 0);
+       atomic_set(&device->local_cnt, 0);
+       atomic_set(&device->pp_in_use_by_net, 0);
+-      atomic_set(&device->rs_sect_in, 0);
+-      atomic_set(&device->rs_sect_ev, 0);
++      atomic_set_unchecked(&device->rs_sect_in, 0);
++      atomic_set_unchecked(&device->rs_sect_ev, 0);
+       atomic_set(&device->ap_in_flight, 0);
+       atomic_set(&device->md_io.in_use, 0);
+@@ -2752,8 +2752,8 @@ void drbd_destroy_connection(struct kref *kref)
+       struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
+       struct drbd_resource *resource = connection->resource;
+-      if (atomic_read(&connection->current_epoch->epoch_size) !=  0)
+-              drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
++      if (atomic_read_unchecked(&connection->current_epoch->epoch_size) !=  0)
++              drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
+       kfree(connection->current_epoch);
+       idr_destroy(&connection->peer_devices);
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index f35db29..ac6c472 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -89,8 +89,8 @@ int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+ #include "drbd_nla.h"
+ #include <linux/genl_magic_func.h>
+-static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+-static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
++static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
++static atomic_unchecked_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
+ DEFINE_MUTEX(notification_mutex);
+@@ -4549,7 +4549,7 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
+       unsigned seq;
+       int err = -ENOMEM;
+-      seq = atomic_inc_return(&drbd_genl_seq);
++      seq = atomic_inc_return_unchecked(&drbd_genl_seq);
+       msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+       if (!msg)
+               goto failed;
+@@ -4601,7 +4601,7 @@ void notify_resource_state(struct sk_buff *skb,
+       int err;
+       if (!skb) {
+-              seq = atomic_inc_return(&notify_genl_seq);
++              seq = atomic_inc_return_unchecked(&notify_genl_seq);
+               skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+               err = -ENOMEM;
+               if (!skb)
+@@ -4652,7 +4652,7 @@ void notify_device_state(struct sk_buff *skb,
+       int err;
+       if (!skb) {
+-              seq = atomic_inc_return(&notify_genl_seq);
++              seq = atomic_inc_return_unchecked(&notify_genl_seq);
+               skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+               err = -ENOMEM;
+               if (!skb)
+@@ -4701,7 +4701,7 @@ void notify_connection_state(struct sk_buff *skb,
+       int err;
+       if (!skb) {
+-              seq = atomic_inc_return(&notify_genl_seq);
++              seq = atomic_inc_return_unchecked(&notify_genl_seq);
+               skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+               err = -ENOMEM;
+               if (!skb)
+@@ -4751,7 +4751,7 @@ void notify_peer_device_state(struct sk_buff *skb,
+       int err;
+       if (!skb) {
+-              seq = atomic_inc_return(&notify_genl_seq);
++              seq = atomic_inc_return_unchecked(&notify_genl_seq);
+               skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+               err = -ENOMEM;
+               if (!skb)
+@@ -4794,7 +4794,7 @@ void notify_helper(enum drbd_notification_type type,
+ {
+       struct drbd_resource *resource = device ? device->resource : connection->resource;
+       struct drbd_helper_info helper_info;
+-      unsigned int seq = atomic_inc_return(&notify_genl_seq);
++      unsigned int seq = atomic_inc_return_unchecked(&notify_genl_seq);
+       struct sk_buff *skb = NULL;
+       struct drbd_genlmsghdr *dh;
+       int err;
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 942384f..2a20af4 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -898,7 +898,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
+       struct drbd_device *device = peer_device->device;
+       int err;
+-      atomic_set(&device->packet_seq, 0);
++      atomic_set_unchecked(&device->packet_seq, 0);
+       device->peer_seq = 0;
+       device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
+@@ -1333,7 +1333,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
+       do {
+               next_epoch = NULL;
+-              epoch_size = atomic_read(&epoch->epoch_size);
++              epoch_size = atomic_read_unchecked(&epoch->epoch_size);
+               switch (ev & ~EV_CLEANUP) {
+               case EV_PUT:
+@@ -1373,7 +1373,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
+                                       rv = FE_DESTROYED;
+                       } else {
+                               epoch->flags = 0;
+-                              atomic_set(&epoch->epoch_size, 0);
++                              atomic_set_unchecked(&epoch->epoch_size, 0);
+                               /* atomic_set(&epoch->active, 0); is already zero */
+                               if (rv == FE_STILL_LIVE)
+                                       rv = FE_RECYCLED;
+@@ -1759,7 +1759,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
+               conn_wait_active_ee_empty(connection);
+               drbd_flush(connection);
+-              if (atomic_read(&connection->current_epoch->epoch_size)) {
++              if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
+                       epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+                       if (epoch)
+                               break;
+@@ -1773,11 +1773,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
+       }
+       epoch->flags = 0;
+-      atomic_set(&epoch->epoch_size, 0);
++      atomic_set_unchecked(&epoch->epoch_size, 0);
+       atomic_set(&epoch->active, 0);
+       spin_lock(&connection->epoch_lock);
+-      if (atomic_read(&connection->current_epoch->epoch_size)) {
++      if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
+               list_add(&epoch->list, &connection->current_epoch->list);
+               connection->current_epoch = epoch;
+               connection->epochs++;
+@@ -2030,7 +2030,9 @@ static int e_end_resync_block(struct drbd_work *w, int unused)
+ }
+ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
+-                          struct packet_info *pi) __releases(local)
++                          struct packet_info *pi) __releases(local);
++static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
++                          struct packet_info *pi)
+ {
+       struct drbd_device *device = peer_device->device;
+       struct drbd_peer_request *peer_req;
+@@ -2052,7 +2054,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
+       list_add_tail(&peer_req->w.list, &device->sync_ee);
+       spin_unlock_irq(&device->resource->req_lock);
+-      atomic_add(pi->size >> 9, &device->rs_sect_ev);
++      atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
+       if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
+                                    DRBD_FAULT_RS_WR) == 0)
+               return 0;
+@@ -2151,7 +2153,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
+               drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
+       }
+-      atomic_add(pi->size >> 9, &device->rs_sect_in);
++      atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
+       return err;
+ }
+@@ -2548,7 +2550,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
+               err = wait_for_and_update_peer_seq(peer_device, peer_seq);
+               drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
+-              atomic_inc(&connection->current_epoch->epoch_size);
++              atomic_inc_unchecked(&connection->current_epoch->epoch_size);
+               err2 = drbd_drain_block(peer_device, pi->size);
+               if (!err)
+                       err = err2;
+@@ -2589,7 +2591,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
+       spin_lock(&connection->epoch_lock);
+       peer_req->epoch = connection->current_epoch;
+-      atomic_inc(&peer_req->epoch->epoch_size);
++      atomic_inc_unchecked(&peer_req->epoch->epoch_size);
+       atomic_inc(&peer_req->epoch->active);
+       spin_unlock(&connection->epoch_lock);
+@@ -2735,7 +2737,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
+       curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+                     (int)part_stat_read(&disk->part0, sectors[1]) -
+-                      atomic_read(&device->rs_sect_ev);
++                      atomic_read_unchecked(&device->rs_sect_ev);
+       if (atomic_read(&device->ap_actlog_cnt)
+           || curr_events - device->rs_last_events > 64) {
+@@ -2881,7 +2883,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
+                       device->use_csums = true;
+               } else if (pi->cmd == P_OV_REPLY) {
+                       /* track progress, we may need to throttle */
+-                      atomic_add(size >> 9, &device->rs_sect_in);
++                      atomic_add_unchecked(size >> 9, &device->rs_sect_in);
+                       peer_req->w.cb = w_e_end_ov_reply;
+                       dec_rs_pending(device);
+                       /* drbd_rs_begin_io done when we sent this request,
+@@ -2954,7 +2956,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
+               goto out_free_e;
+ submit_for_resync:
+-      atomic_add(size >> 9, &device->rs_sect_ev);
++      atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
+ submit:
+       update_receiver_timing_details(connection, drbd_submit_peer_request);
+@@ -4907,7 +4909,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
+               list_add_tail(&peer_req->w.list, &device->sync_ee);
+               spin_unlock_irq(&device->resource->req_lock);
+-              atomic_add(pi->size >> 9, &device->rs_sect_ev);
++              atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
+               err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
+               if (err) {
+@@ -4931,7 +4933,7 @@ static int receive_rs_deallocated(struct drbd_connection *connection, struct pac
+               drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
+       }
+-      atomic_add(size >> 9, &device->rs_sect_in);
++      atomic_add_unchecked(size >> 9, &device->rs_sect_in);
+       return err;
+ }
+@@ -4940,7 +4942,7 @@ struct data_cmd {
+       int expect_payload;
+       unsigned int pkt_size;
+       int (*fn)(struct drbd_connection *, struct packet_info *);
+-};
++} __do_const;
+ static struct data_cmd drbd_cmd_handler[] = {
+       [P_DATA]            = { 1, sizeof(struct p_data), receive_Data },
+@@ -5068,7 +5070,7 @@ static void conn_disconnect(struct drbd_connection *connection)
+       if (!list_empty(&connection->current_epoch->list))
+               drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
+       /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+-      atomic_set(&connection->current_epoch->epoch_size, 0);
++      atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
+       connection->send.seen_any_write_yet = false;
+       drbd_info(connection, "Connection closed\n");
+@@ -5574,7 +5576,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
+               put_ldev(device);
+       }
+       dec_rs_pending(device);
+-      atomic_add(blksize >> 9, &device->rs_sect_in);
++      atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
+       return 0;
+ }
+@@ -5825,7 +5827,7 @@ static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
+ struct meta_sock_cmd {
+       size_t pkt_size;
+       int (*fn)(struct drbd_connection *connection, struct packet_info *);
+-};
++} __do_const;
+ static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
+ {
+diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
+index eea0c4a..4eba9a8 100644
+--- a/drivers/block/drbd/drbd_state.c
++++ b/drivers/block/drbd/drbd_state.c
+@@ -1507,9 +1507,10 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
+ void notify_resource_state_change(struct sk_buff *skb,
+                                 unsigned int seq,
+-                                struct drbd_resource_state_change *resource_state_change,
++                                void *_resource_state_change,
+                                 enum drbd_notification_type type)
+ {
++      struct drbd_resource_state_change *resource_state_change = _resource_state_change;
+       struct drbd_resource *resource = resource_state_change->resource;
+       struct resource_info resource_info = {
+               .res_role = resource_state_change->role[NEW],
+@@ -1523,9 +1524,10 @@ void notify_resource_state_change(struct sk_buff *skb,
+ void notify_connection_state_change(struct sk_buff *skb,
+                                   unsigned int seq,
+-                                  struct drbd_connection_state_change *connection_state_change,
++                                  void *_connection_state_change,
+                                   enum drbd_notification_type type)
+ {
++      struct drbd_connection_state_change *connection_state_change = _connection_state_change;
+       struct drbd_connection *connection = connection_state_change->connection;
+       struct connection_info connection_info = {
+               .conn_connection_state = connection_state_change->cstate[NEW],
+@@ -1537,9 +1539,10 @@ void notify_connection_state_change(struct sk_buff *skb,
+ void notify_device_state_change(struct sk_buff *skb,
+                               unsigned int seq,
+-                              struct drbd_device_state_change *device_state_change,
++                              void *_device_state_change,
+                               enum drbd_notification_type type)
+ {
++      struct drbd_device_state_change *device_state_change = _device_state_change;
+       struct drbd_device *device = device_state_change->device;
+       struct device_info device_info = {
+               .dev_disk_state = device_state_change->disk_state[NEW],
+@@ -1550,9 +1553,10 @@ void notify_device_state_change(struct sk_buff *skb,
+ void notify_peer_device_state_change(struct sk_buff *skb,
+                                    unsigned int seq,
+-                                   struct drbd_peer_device_state_change *p,
++                                   void *_p,
+                                    enum drbd_notification_type type)
+ {
++      struct drbd_peer_device_state_change *p = _p;
+       struct drbd_peer_device *peer_device = p->peer_device;
+       struct peer_device_info peer_device_info = {
+               .peer_repl_state = p->repl_state[NEW],
+diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
+index 6c9d5d4..110f64d 100644
+--- a/drivers/block/drbd/drbd_state.h
++++ b/drivers/block/drbd/drbd_state.h
+@@ -126,7 +126,7 @@ extern enum drbd_state_rv _drbd_set_state(struct drbd_device *, union drbd_state
+                                         enum chg_state_flags,
+                                         struct completion *done);
+ extern void print_st_err(struct drbd_device *, union drbd_state,
+-                      union drbd_state, int);
++                      union drbd_state, enum drbd_state_rv);
+ enum drbd_state_rv
+ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
+diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
+index 9e503a1..ac60262 100644
+--- a/drivers/block/drbd/drbd_state_change.h
++++ b/drivers/block/drbd/drbd_state_change.h
+@@ -45,19 +45,19 @@ extern void forget_state_change(struct drbd_state_change *);
+ extern void notify_resource_state_change(struct sk_buff *,
+                                        unsigned int,
+-                                       struct drbd_resource_state_change *,
++                                       void *,
+                                        enum drbd_notification_type type);
+ extern void notify_connection_state_change(struct sk_buff *,
+                                          unsigned int,
+-                                         struct drbd_connection_state_change *,
++                                         void *,
+                                          enum drbd_notification_type type);
+ extern void notify_device_state_change(struct sk_buff *,
+                                      unsigned int,
+-                                     struct drbd_device_state_change *,
++                                     void *,
+                                      enum drbd_notification_type type);
+ extern void notify_peer_device_state_change(struct sk_buff *,
+                                           unsigned int,
+-                                          struct drbd_peer_device_state_change *,
++                                          void *,
+                                           enum drbd_notification_type type);
+ #endif  /* DRBD_STATE_CHANGE_H */
+diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
+index c6755c9..2586293 100644
+--- a/drivers/block/drbd/drbd_worker.c
++++ b/drivers/block/drbd/drbd_worker.c
+@@ -87,7 +87,8 @@ void drbd_md_endio(struct bio *bio)
+ /* reads on behalf of the partner,
+  * "submitted" by the receiver
+  */
+-static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
++static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local);
++static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req)
+ {
+       unsigned long flags = 0;
+       struct drbd_peer_device *peer_device = peer_req->peer_device;
+@@ -108,7 +109,8 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
+ /* writes on behalf of the partner, or resync writes,
+  * "submitted" by the receiver, final stage.  */
+-void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
++void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local);
++void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req)
+ {
+       unsigned long flags = 0;
+       struct drbd_peer_device *peer_device = peer_req->peer_device;
+@@ -408,7 +410,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
+       list_add_tail(&peer_req->w.list, &device->read_ee);
+       spin_unlock_irq(&device->resource->req_lock);
+-      atomic_add(size >> 9, &device->rs_sect_ev);
++      atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
+       if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+                                    DRBD_FAULT_RS_RD) == 0)
+               return 0;
+@@ -554,7 +556,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
+       unsigned int sect_in;  /* Number of sectors that came in since the last turn */
+       int number, mxb;
+-      sect_in = atomic_xchg(&device->rs_sect_in, 0);
++      sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
+       device->rs_in_flight -= sect_in;
+       rcu_read_lock();
+@@ -1662,8 +1664,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
+       struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
+       struct fifo_buffer *plan;
+-      atomic_set(&device->rs_sect_in, 0);
+-      atomic_set(&device->rs_sect_ev, 0);
++      atomic_set_unchecked(&device->rs_sect_in, 0);
++      atomic_set_unchecked(&device->rs_sect_ev, 0);
+       device->rs_in_flight = 0;
+       device->rs_last_events =
+               (int)part_stat_read(&disk->part0, sectors[0]) +
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index e3d8e4c..4198ed8 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -961,6 +961,10 @@ static void empty(void)
+ {
+ }
++static void empty2(int i)
++{
++}
++
+ static void (*floppy_work_fn)(void);
+ static void floppy_work_workfn(struct work_struct *work)
+@@ -1953,14 +1957,14 @@ static const struct cont_t wakeup_cont = {
+       .interrupt      = empty,
+       .redo           = do_wakeup,
+       .error          = empty,
+-      .done           = (done_f)empty
++      .done           = empty2
+ };
+ static const struct cont_t intr_cont = {
+       .interrupt      = empty,
+       .redo           = process_fd_request,
+       .error          = empty,
+-      .done           = (done_f)empty
++      .done           = empty2
+ };
+ static int wait_til_done(void (*handler)(void), bool interruptible)
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index 90fa4ac..8328db6 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -109,7 +109,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
+ static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+ {
+-      return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
++      return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
+ }
+ /*
+@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
+               return -EROFS;
+       }
+       pd->settings.fp = ti.fp;
+-      pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
++      pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
+       if (ti.nwa_v) {
+               pd->nwa = be32_to_cpu(ti.next_writable);
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 6c6519f..f5fff92 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -64,7 +64,7 @@
+  * If the counter is already at its maximum value returns
+  * -EINVAL without updating it.
+  */
+-static int atomic_inc_return_safe(atomic_t *v)
++static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
+ {
+       unsigned int counter;
+diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
+index e5565fb..71be10b4 100644
+--- a/drivers/block/smart1,2.h
++++ b/drivers/block/smart1,2.h
+@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
+ }
+ static struct access_method smart4_access = {
+-      smart4_submit_command,
+-      smart4_intr_mask,
+-      smart4_fifo_full,
+-      smart4_intr_pending,
+-      smart4_completed,
++      .submit_command = smart4_submit_command,
++      .set_intr_mask = smart4_intr_mask,
++      .fifo_full = smart4_fifo_full,
++      .intr_pending = smart4_intr_pending,
++      .command_completed = smart4_completed,
+ };
+ /*
+@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
+ }
+ static struct access_method smart2_access = {
+-      smart2_submit_command,
+-      smart2_intr_mask,
+-      smart2_fifo_full,
+-      smart2_intr_pending,
+-      smart2_completed,
++      .submit_command = smart2_submit_command,
++      .set_intr_mask = smart2_intr_mask,
++      .fifo_full = smart2_fifo_full,
++      .intr_pending = smart2_intr_pending,
++      .command_completed = smart2_completed,
+ };
+ /*
+@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
+ }
+ static struct access_method smart2e_access = {
+-      smart2e_submit_command,
+-      smart2e_intr_mask,
+-      smart2e_fifo_full,
+-      smart2e_intr_pending,
+-      smart2e_completed,
++      .submit_command = smart2e_submit_command,
++      .set_intr_mask = smart2e_intr_mask,
++      .fifo_full = smart2e_fifo_full,
++      .intr_pending = smart2e_intr_pending,
++      .command_completed = smart2e_completed,
+ };
+ /*
+@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
+ }
+ static struct access_method smart1_access = {
+-      smart1_submit_command,
+-      smart1_intr_mask,
+-      smart1_fifo_full,
+-      smart1_intr_pending,
+-      smart1_completed,
++      .submit_command = smart1_submit_command,
++      .set_intr_mask = smart1_intr_mask,
++      .fifo_full = smart1_fifo_full,
++      .intr_pending = smart1_intr_pending,
++      .command_completed = smart1_completed,
+ };
+diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
+index 485281b..ab20198 100644
+--- a/drivers/bluetooth/btwilink.c
++++ b/drivers/bluetooth/btwilink.c
+@@ -275,7 +275,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ static int bt_ti_probe(struct platform_device *pdev)
+ {
+-      static struct ti_st *hst;
++      struct ti_st *hst;
+       struct hci_dev *hdev;
+       int err;
+diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
+index ffa7c9d..a68b53e 100644
+--- a/drivers/bus/arm-cci.c
++++ b/drivers/bus/arm-cci.c
+@@ -1475,8 +1475,10 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
+       char *name = model->name;
+       u32 num_cntrs;
+-      pmu_event_attr_group.attrs = model->event_attrs;
+-      pmu_format_attr_group.attrs = model->format_attrs;
++      pax_open_kernel();
++      const_cast(pmu_event_attr_group.attrs) = model->event_attrs;
++      const_cast(pmu_format_attr_group.attrs) = model->format_attrs;
++      pax_close_kernel();
+       cci_pmu->pmu = (struct pmu) {
+               .name           = cci_pmu->model->name,
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 5d475b3..e9076c0 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
+       ENSURE(reset, CDC_RESET);
+       ENSURE(generic_packet, CDC_GENERIC_PACKET);
+       cdi->mc_flags = 0;
+-      cdo->n_minors = 0;
+       cdi->options = CDO_USE_FFLAGS;
+       if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
+@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
+       else
+               cdi->cdda_method = CDDA_OLD;
+-      if (!cdo->generic_packet)
+-              cdo->generic_packet = cdrom_dummy_generic_packet;
++      if (!cdo->generic_packet) {
++              pax_open_kernel();
++              const_cast(cdo->generic_packet) = cdrom_dummy_generic_packet;
++              pax_close_kernel();
++      }
+       cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
+       mutex_lock(&cdrom_mutex);
+@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
+       if (cdi->exit)
+               cdi->exit(cdi);
+-      cdi->ops->n_minors--;
+       cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
+ }
+@@ -2137,7 +2138,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+        */
+       nr = nframes;
+       do {
+-              cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
++              cgc.buffer = kcalloc(nr, CD_FRAMESIZE_RAW, GFP_KERNEL);
+               if (cgc.buffer)
+                       break;
+@@ -3441,7 +3442,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
+       struct cdrom_device_info *cdi;
+       int ret;
+-      ret = scnprintf(info + *pos, max_size - *pos, header);
++      ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
+       if (!ret)
+               return 1;
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index 584bc31..e64a12c 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
+       .audio_ioctl            = gdrom_audio_ioctl,
+       .capability             = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
+                                 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
+-      .n_minors               = 1,
+ };
+ static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index dcc0973..8d34c88 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -17,7 +17,8 @@ config DEVMEM
+ config DEVKMEM
+       bool "/dev/kmem virtual device support"
+-      default y
++      default n
++      depends on !GRKERNSEC_KMEM
+       help
+         Say Y here if you want to support the /dev/kmem device. The
+         /dev/kmem device is rarely used, but can be used for certain
+@@ -573,6 +574,7 @@ config TELCLOCK
+ config DEVPORT
+       bool
+       depends on ISA || PCI
++      depends on !GRKERNSEC_KMEM
+       default y
+ source "drivers/s390/char/Kconfig"
+diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
+index a48e05b..6bac831 100644
+--- a/drivers/char/agp/compat_ioctl.c
++++ b/drivers/char/agp/compat_ioctl.c
+@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
+                       return -ENOMEM;
+               }
+-              if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
++              if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
+                                  sizeof(*usegment) * ureserve.seg_count)) {
+                       kfree(usegment);
+                       kfree(ksegment);
+diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
+index 0f64d14..4cf4d6b 100644
+--- a/drivers/char/agp/frontend.c
++++ b/drivers/char/agp/frontend.c
+@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+       if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+               return -EFAULT;
+-      if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
++      if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
+               return -EFAULT;
+       client = agp_find_client_by_pid(reserve.pid);
+@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+               if (segment == NULL)
+                       return -ENOMEM;
+-              if (copy_from_user(segment, (void __user *) reserve.seg_list,
++              if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
+                                  sizeof(struct agp_segment) * reserve.seg_count)) {
+                       kfree(segment);
+                       return -EFAULT;
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 4431129..3983729 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -1418,8 +1418,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ }
+ EXPORT_SYMBOL(intel_gmch_probe);
+-void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+-                 phys_addr_t *mappable_base, u64 *mappable_end)
++void intel_gtt_get(u64 *gtt_total, u64 *stolen_size,
++                 u64 *mappable_base, u64 *mappable_end)
+ {
+       *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+       *stolen_size = intel_private.stolen_size;
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index be54e53..50272fe 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -574,7 +574,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
+ }
+ static int
+-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
+                 struct hpet_info *info)
+ {
+       struct hpet_timer __iomem *timer;
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index d8619998..445da20 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -436,7 +436,7 @@ struct ipmi_smi {
+       struct proc_dir_entry *proc_dir;
+       char                  proc_dir_name[10];
+-      atomic_t stats[IPMI_NUM_STATS];
++      atomic_unchecked_t stats[IPMI_NUM_STATS];
+       /*
+        * run_to_completion duplicate of smb_info, smi_info
+@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
+ static DEFINE_MUTEX(smi_watchers_mutex);
+ #define ipmi_inc_stat(intf, stat) \
+-      atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
++      atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
+ #define ipmi_get_stat(intf, stat) \
+-      ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
++      ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
+ static const char * const addr_src_to_str[] = {
+       "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
+@@ -2835,7 +2835,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
+       INIT_LIST_HEAD(&intf->cmd_rcvrs);
+       init_waitqueue_head(&intf->waitq);
+       for (i = 0; i < IPMI_NUM_STATS; i++)
+-              atomic_set(&intf->stats[i], 0);
++              atomic_set_unchecked(&intf->stats[i], 0);
+       intf->proc_dir = NULL;
+diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
+index 9f2e3be..676c910 100644
+--- a/drivers/char/ipmi/ipmi_poweroff.c
++++ b/drivers/char/ipmi/ipmi_poweroff.c
+@@ -66,7 +66,7 @@ static void (*specific_poweroff_func)(ipmi_user_t user);
+ /* Holds the old poweroff function so we can restore it on removal. */
+ static void (*old_poweroff_func)(void);
+-static int set_param_ifnum(const char *val, struct kernel_param *kp)
++static int set_param_ifnum(const char *val, const struct kernel_param *kp)
+ {
+       int rv = param_set_int(val, kp);
+       if (rv)
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index a112c01..5bd9d25 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -302,7 +302,7 @@ struct smi_info {
+       unsigned char slave_addr;
+       /* Counters and things for the proc filesystem. */
+-      atomic_t stats[SI_NUM_STATS];
++      atomic_unchecked_t stats[SI_NUM_STATS];
+       struct task_struct *thread;
+@@ -311,9 +311,9 @@ struct smi_info {
+ };
+ #define smi_inc_stat(smi, stat) \
+-      atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
++      atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
+ #define smi_get_stat(smi, stat) \
+-      ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
++      ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
+ #define SI_MAX_PARMS 4
+@@ -1344,7 +1344,7 @@ static unsigned int num_slave_addrs;
+ #define IPMI_MEM_ADDR_SPACE 1
+ static const char * const addr_space_to_str[] = { "i/o", "mem" };
+-static int hotmod_handler(const char *val, struct kernel_param *kp);
++static int hotmod_handler(const char *val, const struct kernel_param *kp);
+ module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+ MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
+@@ -1814,7 +1814,7 @@ static struct smi_info *smi_info_alloc(void)
+       return info;
+ }
+-static int hotmod_handler(const char *val, struct kernel_param *kp)
++static int hotmod_handler(const char *val, const struct kernel_param *kp)
+ {
+       char *str = kstrdup(val, GFP_KERNEL);
+       int  rv;
+@@ -3578,7 +3578,7 @@ static int try_smi_init(struct smi_info *new_smi)
+       atomic_set(&new_smi->req_events, 0);
+       new_smi->run_to_completion = false;
+       for (i = 0; i < SI_NUM_STATS; i++)
+-              atomic_set(&new_smi->stats[i], 0);
++              atomic_set_unchecked(&new_smi->stats[i], 0);
+       new_smi->interrupt_disabled = true;
+       atomic_set(&new_smi->need_watch, 0);
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 5673fff..3ab2908 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -284,17 +284,17 @@ struct ssif_info {
+       unsigned int  multi_len;
+       unsigned int  multi_pos;
+-      atomic_t stats[SSIF_NUM_STATS];
++      atomic_unchecked_t stats[SSIF_NUM_STATS];
+ };
+ #define ssif_inc_stat(ssif, stat) \
+-      atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
++      atomic_inc_unchecked(&(ssif)->stats[SSIF_STAT_ ## stat])
+ #define ssif_get_stat(ssif, stat) \
+-      ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
++      ((unsigned int) atomic_read_unchecked(&(ssif)->stats[SSIF_STAT_ ## stat]))
+ static bool initialized;
+-static atomic_t next_intf = ATOMIC_INIT(0);
++static atomic_unchecked_t next_intf = ATOMIC_INIT(0);
+ static void return_hosed_msg(struct ssif_info *ssif_info,
+                            struct ipmi_smi_msg *msg);
+@@ -1608,7 +1608,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+       }
+  found:
+-      ssif_info->intf_num = atomic_inc_return(&next_intf);
++      ssif_info->intf_num = atomic_inc_return_unchecked(&next_intf);
+       if (ssif_dbg_probe) {
+               pr_info("ssif_probe: i2c_probe found device at i2c address %x\n",
+@@ -1622,7 +1622,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+       ssif_info->retry_timer.function = retry_timeout;
+       for (i = 0; i < SSIF_NUM_STATS; i++)
+-              atomic_set(&ssif_info->stats[i], 0);
++              atomic_set_unchecked(&ssif_info->stats[i], 0);
+       if (ssif_info->supports_pec)
+               ssif_info->client->flags |= I2C_CLIENT_PEC;
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index a33163d..43c1578 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -18,6 +18,7 @@
+ #include <linux/raw.h>
+ #include <linux/tty.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
+ #include <linux/highmem.h>
+@@ -37,6 +38,10 @@
+ #define DEVPORT_MINOR 4
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++extern const struct file_operations grsec_fops;
++#endif
++
+ static inline unsigned long size_inside_page(unsigned long start,
+                                            unsigned long size)
+ {
+@@ -67,13 +72,22 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+       u64 cursor = from;
+       while (cursor < to) {
+-              if (!devmem_is_allowed(pfn))
++              if (!devmem_is_allowed(pfn)) {
++#ifdef CONFIG_GRKERNSEC_KMEM
++                      gr_handle_mem_readwrite(from, to);
++#endif
+                       return 0;
++              }
+               cursor += PAGE_SIZE;
+               pfn++;
+       }
+       return 1;
+ }
++#elif defined(CONFIG_GRKERNSEC_KMEM)
++static inline int range_is_allowed(unsigned long pfn, unsigned long size)
++{
++      return 0;
++}
+ #else
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+@@ -98,6 +112,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+       phys_addr_t p = *ppos;
+       ssize_t read, sz;
+       void *ptr;
++      char *temp;
+       if (p != *ppos)
+               return 0;
+@@ -120,13 +135,19 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+       }
+ #endif
++      temp = kmalloc(PAGE_SIZE, GFP_KERNEL|GFP_USERCOPY);
++      if (!temp)
++              return -ENOMEM;
++
+       while (count > 0) {
+               unsigned long remaining;
+               sz = size_inside_page(p, count);
+-              if (!range_is_allowed(p >> PAGE_SHIFT, count))
++              if (!range_is_allowed(p >> PAGE_SHIFT, count)) {
++                      kfree(temp);
+                       return -EPERM;
++              }
+               /*
+                * On ia64 if a page has been mapped somewhere as uncached, then
+@@ -134,13 +155,17 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+                * corruption may occur.
+                */
+               ptr = xlate_dev_mem_ptr(p);
+-              if (!ptr)
++              if (!ptr || probe_kernel_read(temp, ptr, sz)) {
++                      kfree(temp);
+                       return -EFAULT;
++              }
+-              remaining = copy_to_user(buf, ptr, sz);
++              remaining = copy_to_user(buf, temp, sz);
+               unxlate_dev_mem_ptr(p, ptr);
+-              if (remaining)
++              if (remaining) {
++                      kfree(temp);
+                       return -EFAULT;
++              }
+               buf += sz;
+               p += sz;
+@@ -148,6 +173,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+               read += sz;
+       }
++      kfree(temp);
++
+       *ppos += read;
+       return read;
+ }
+@@ -383,6 +410,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+       read = 0;
+       if (p < (unsigned long) high_memory) {
++              char *temp;
++
+               low_count = count;
+               if (count > (unsigned long)high_memory - p)
+                       low_count = (unsigned long)high_memory - p;
+@@ -400,6 +429,11 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+                       count -= sz;
+               }
+ #endif
++
++              temp = kmalloc(PAGE_SIZE, GFP_KERNEL|GFP_USERCOPY);
++              if (!temp)
++                      return -ENOMEM;
++
+               while (low_count > 0) {
+                       sz = size_inside_page(p, low_count);
+@@ -410,14 +444,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+                        */
+                       kbuf = xlate_dev_kmem_ptr((void *)p);
+-                      if (copy_to_user(buf, kbuf, sz))
++                      if (probe_kernel_read(temp, kbuf, sz) || copy_to_user(buf, temp, sz)) {
++                              kfree(temp);
+                               return -EFAULT;
++                      }
+                       buf += sz;
+                       p += sz;
+                       read += sz;
+                       low_count -= sz;
+                       count -= sz;
+               }
++
++              kfree(temp);
+       }
+       if (count > 0) {
+@@ -822,6 +860,9 @@ static const struct memdev {
+ #ifdef CONFIG_PRINTK
+       [11] = { "kmsg", 0644, &kmsg_fops, 0 },
+ #endif
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++      [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
++#endif
+ };
+ static int memory_open(struct inode *inode, struct file *filp)
+@@ -883,7 +924,7 @@ static int __init chr_dev_init(void)
+                       continue;
+               device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
+-                            NULL, devlist[minor].name);
++                            NULL, "%s", devlist[minor].name);
+       }
+       return tty_init();
+diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
+index 678fa97..5598cef 100644
+--- a/drivers/char/nvram.c
++++ b/drivers/char/nvram.c
+@@ -235,7 +235,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
+       spin_unlock_irq(&rtc_lock);
+-      if (copy_to_user(buf, contents, tmp - contents))
++      if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
+               return -EFAULT;
+       *ppos = i;
+diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
+index d28922d..3c343d6 100644
+--- a/drivers/char/pcmcia/synclink_cs.c
++++ b/drivers/char/pcmcia/synclink_cs.c
+@@ -2333,7 +2333,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
+-                       __FILE__, __LINE__, info->device_name, port->count);
++                       __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
+       if (tty_port_close_start(port, tty, filp) == 0)
+               goto cleanup;
+@@ -2351,7 +2351,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
+ cleanup:
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
+-                      tty->driver->name, port->count);
++                      tty->driver->name, atomic_read(&port->count));
+ }
+ /* Wait until the transmitter is empty.
+@@ -2493,7 +2493,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
+-                       __FILE__, __LINE__, tty->driver->name, port->count);
++                       __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
+       port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -2504,11 +2504,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
+               goto cleanup;
+       }
+       spin_lock(&port->lock);
+-      port->count++;
++      atomic_inc(&port->count);
+       spin_unlock(&port->lock);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (port->count == 1) {
++      if (atomic_read(&port->count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info, tty);
+               if (retval < 0)
+@@ -3897,7 +3897,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       switch (encoding)
+@@ -4001,7 +4001,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -4091,7 +4091,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 3efb3bf0..2541398 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -292,9 +292,6 @@
+ /*
+  * To allow fractional bits to be tracked, the entropy_count field is
+  * denominated in units of 1/8th bits.
+- *
+- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+- * credit_entropy_bits() needs to be 64 bits wide.
+  */
+ #define ENTROPY_SHIFT 3
+ #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+@@ -479,8 +476,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
+ static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
+ static void push_to_pool(struct work_struct *work);
+-static __u32 input_pool_data[INPUT_POOL_WORDS];
+-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
++static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
++static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
+ static struct entropy_store input_pool = {
+       .poolinfo = &poolinfo_table[0],
+@@ -680,7 +677,7 @@ retry:
+               /* The +2 corresponds to the /4 in the denominator */
+               do {
+-                      unsigned int anfrac = min(pnfrac, pool_size/2);
++                      u64 anfrac = min(pnfrac, pool_size/2);
+                       unsigned int add =
+                               ((pool_size - entropy_count)*anfrac*3) >> s;
+@@ -1476,7 +1473,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+               extract_buf(r, tmp);
+               i = min_t(int, nbytes, EXTRACT_SIZE);
+-              if (copy_to_user(buf, tmp, i)) {
++              if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+                       ret = -EFAULT;
+                       break;
+               }
+@@ -1926,7 +1923,7 @@ static char sysctl_bootid[16];
+ static int proc_do_uuid(struct ctl_table *table, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table fake_table;
++      ctl_table_no_const fake_table;
+       unsigned char buf[64], tmp_uuid[16], *uuid;
+       uuid = table->data;
+@@ -1956,7 +1953,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
+ static int proc_do_entropy(struct ctl_table *table, int write,
+                          void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table fake_table;
++      ctl_table_no_const fake_table;
+       int entropy_count;
+       entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
+index e496dae..3db53b6 100644
+--- a/drivers/char/sonypi.c
++++ b/drivers/char/sonypi.c
+@@ -54,6 +54,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
++#include <asm/local.h>
+ #include <linux/sonypi.h>
+@@ -490,7 +491,7 @@ static struct sonypi_device {
+       spinlock_t fifo_lock;
+       wait_queue_head_t fifo_proc_list;
+       struct fasync_struct *fifo_async;
+-      int open_count;
++      local_t open_count;
+       int model;
+       struct input_dev *input_jog_dev;
+       struct input_dev *input_key_dev;
+@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+ static int sonypi_misc_release(struct inode *inode, struct file *file)
+ {
+       mutex_lock(&sonypi_device.lock);
+-      sonypi_device.open_count--;
++      local_dec(&sonypi_device.open_count);
+       mutex_unlock(&sonypi_device.lock);
+       return 0;
+ }
+@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
+ {
+       mutex_lock(&sonypi_device.lock);
+       /* Flush input queue on first open */
+-      if (!sonypi_device.open_count)
++      if (!local_read(&sonypi_device.open_count))
+               kfifo_reset(&sonypi_device.fifo);
+-      sonypi_device.open_count++;
++      local_inc(&sonypi_device.open_count);
+       mutex_unlock(&sonypi_device.lock);
+       return 0;
+@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
+ static struct platform_device *sonypi_platform_device;
+-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
++static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
+       {
+               .ident = "Sony Vaio",
+               .matches = {
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index e595013..9653af2 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -196,6 +196,11 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_alloc);
++static void tpm_put_device(void *dev)
++{
++      put_device(dev);
++}
++
+ /**
+  * tpmm_chip_alloc() - allocate a new struct tpm_chip instance
+  * @pdev: parent device to which the chip is associated
+@@ -213,9 +218,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+       if (IS_ERR(chip))
+               return chip;
+-      rc = devm_add_action_or_reset(pdev,
+-                                    (void (*)(void *)) put_device,
+-                                    &chip->dev);
++      rc = devm_add_action_or_reset(pdev, tpm_put_device, &chip->dev);
+       if (rc)
+               return ERR_PTR(rc);
+diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
+index 565a947..dcdc06e 100644
+--- a/drivers/char/tpm/tpm_acpi.c
++++ b/drivers/char/tpm/tpm_acpi.c
+@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
+       virt = acpi_os_map_iomem(start, len);
+       if (!virt) {
+               kfree(log->bios_event_log);
++              log->bios_event_log = NULL;
+               printk("%s: ERROR - Unable to map memory\n", __func__);
+               return -EIO;
+       }
+-      memcpy_fromio(log->bios_event_log, virt, len);
++      memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
+       acpi_os_unmap_iomem(virt, len);
+       return 0;
+diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
+index e722886..78a48b9 100644
+--- a/drivers/char/tpm/tpm_eventlog.c
++++ b/drivers/char/tpm/tpm_eventlog.c
+@@ -108,8 +108,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+       converted_event_type = do_endian_conversion(event->event_type);
+       if (((converted_event_type == 0) && (converted_event_size == 0))
+-          || ((addr + sizeof(struct tcpa_event) + converted_event_size)
+-              >= limit))
++          || (converted_event_size >= limit - addr - sizeof(struct tcpa_event)))
+               return NULL;
+       return addr;
+@@ -138,7 +137,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+       converted_event_type = do_endian_conversion(event->event_type);
+       if (((converted_event_type == 0) && (converted_event_size == 0)) ||
+-          ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit))
++          (converted_event_size >= limit - v - sizeof(struct tcpa_event)))
+               return NULL;
+       (*pos)++;
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 5da47e26..fbfa419 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -692,11 +692,11 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
+       if (to_user) {
+               ssize_t ret;
+-              ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
++              ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
+               if (ret)
+                       return -EFAULT;
+       } else {
+-              memcpy((__force char *)out_buf, buf->buf + buf->offset,
++              memcpy((__force_kernel char *)out_buf, buf->buf + buf->offset,
+                      out_count);
+       }
+@@ -1171,7 +1171,7 @@ static int get_chars(u32 vtermno, char *buf, int count)
+       /* If we don't have an input queue yet, we can't get input. */
+       BUG_ON(!port->in_vq);
+-      return fill_readbuf(port, (__force char __user *)buf, count, false);
++      return fill_readbuf(port, (char __force_user *)buf, count, false);
+ }
+ static void resize_console(struct port *port)
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 0fc71cb..225b0c0 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -1145,8 +1145,9 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
+ };
+ static struct clk *bcm2835_register_pll(struct bcm2835_cprman *cprman,
+-                                      const struct bcm2835_pll_data *data)
++                                      const void *_data)
+ {
++      const struct bcm2835_pll_data *data = _data;
+       struct bcm2835_pll *pll;
+       struct clk_init_data init;
+@@ -1172,8 +1173,9 @@ static struct clk *bcm2835_register_pll(struct bcm2835_cprman *cprman,
+ static struct clk *
+ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
+-                           const struct bcm2835_pll_divider_data *data)
++                           const void *_data)
+ {
++      const struct bcm2835_pll_divider_data *data = _data;
+       struct bcm2835_pll_divider *divider;
+       struct clk_init_data init;
+       struct clk *clk;
+@@ -1231,8 +1233,9 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
+ }
+ static struct clk *bcm2835_register_clock(struct bcm2835_cprman *cprman,
+-                                        const struct bcm2835_clock_data *data)
++                                        const void *_data)
+ {
++      const struct bcm2835_clock_data *data = _data;
+       struct bcm2835_clock *clock;
+       struct clk_init_data init;
+       const char *parents[1 << CM_SRC_BITS];
+@@ -1274,8 +1277,10 @@ static struct clk *bcm2835_register_clock(struct bcm2835_cprman *cprman,
+ }
+ static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman,
+-                                       const struct bcm2835_gate_data *data)
++                                       const void *_data)
+ {
++      const struct bcm2835_gate_data *data = _data;
++
+       return clk_register_gate(cprman->dev, data->name, data->parent,
+                                CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+                                cprman->regs + data->ctl_reg,
+@@ -1290,8 +1295,7 @@ struct bcm2835_clk_desc {
+ };
+ /* assignment helper macros for different clock types */
+-#define _REGISTER(f, ...) { .clk_register = (bcm2835_clk_register)f, \
+-                          .data = __VA_ARGS__ }
++#define _REGISTER(f, ...) { .clk_register = f, .data = __VA_ARGS__ }
+ #define REGISTER_PLL(...)     _REGISTER(&bcm2835_register_pll,        \
+                                         &(struct bcm2835_pll_data)    \
+                                         {__VA_ARGS__})
+diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
+index 00269de..3e17e60 100644
+--- a/drivers/clk/clk-composite.c
++++ b/drivers/clk/clk-composite.c
+@@ -221,7 +221,7 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
+       struct clk_hw *hw;
+       struct clk_init_data init;
+       struct clk_composite *composite;
+-      struct clk_ops *clk_composite_ops;
++      clk_ops_no_const *clk_composite_ops;
+       int ret;
+       composite = kzalloc(sizeof(*composite), GFP_KERNEL);
+diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
+index c2d5727..1a305db 100644
+--- a/drivers/clk/socfpga/clk-gate-a10.c
++++ b/drivers/clk/socfpga/clk-gate-a10.c
+@@ -19,6 +19,7 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/of.h>
+ #include <linux/regmap.h>
++#include <asm/pgtable.h>
+ #include "clk.h"
+@@ -97,7 +98,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
+       return 0;
+ }
+-static struct clk_ops gateclk_ops = {
++static clk_ops_no_const gateclk_ops __read_only = {
+       .prepare = socfpga_clk_prepare,
+       .recalc_rate = socfpga_gate_clk_recalc_rate,
+ };
+@@ -128,8 +129,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
+               socfpga_clk->hw.reg = clk_mgr_a10_base_addr + clk_gate[0];
+               socfpga_clk->hw.bit_idx = clk_gate[1];
+-              gateclk_ops.enable = clk_gate_ops.enable;
+-              gateclk_ops.disable = clk_gate_ops.disable;
++              pax_open_kernel();
++              const_cast(gateclk_ops.enable) = clk_gate_ops.enable;
++              const_cast(gateclk_ops.disable) = clk_gate_ops.disable;
++              pax_close_kernel();
+       }
+       rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
+diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
+index aa7a6e6..1e9b426 100644
+--- a/drivers/clk/socfpga/clk-gate.c
++++ b/drivers/clk/socfpga/clk-gate.c
+@@ -21,6 +21,7 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/of.h>
+ #include <linux/regmap.h>
++#include <asm/pgtable.h>
+ #include "clk.h"
+@@ -169,7 +170,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
+       return 0;
+ }
+-static struct clk_ops gateclk_ops = {
++static clk_ops_no_const gateclk_ops __read_only = {
+       .prepare = socfpga_clk_prepare,
+       .recalc_rate = socfpga_clk_recalc_rate,
+       .get_parent = socfpga_clk_get_parent,
+@@ -202,8 +203,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
+               socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
+               socfpga_clk->hw.bit_idx = clk_gate[1];
+-              gateclk_ops.enable = clk_gate_ops.enable;
+-              gateclk_ops.disable = clk_gate_ops.disable;
++              pax_open_kernel();
++              const_cast(gateclk_ops.enable) = clk_gate_ops.enable;
++              const_cast(gateclk_ops.disable) = clk_gate_ops.disable;
++              pax_close_kernel();
+       }
+       rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
+diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
+index 35fabe1..d847c53 100644
+--- a/drivers/clk/socfpga/clk-pll-a10.c
++++ b/drivers/clk/socfpga/clk-pll-a10.c
+@@ -18,6 +18,7 @@
+ #include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
++#include <asm/pgtable.h>
+ #include "clk.h"
+@@ -69,7 +70,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
+               CLK_MGR_PLL_CLK_SRC_MASK;
+ }
+-static struct clk_ops clk_pll_ops = {
++static clk_ops_no_const clk_pll_ops __read_only = {
+       .recalc_rate = clk_pll_recalc_rate,
+       .get_parent = clk_pll_get_parent,
+ };
+@@ -112,8 +113,10 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
+       pll_clk->hw.hw.init = &init;
+       pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
+-      clk_pll_ops.enable = clk_gate_ops.enable;
+-      clk_pll_ops.disable = clk_gate_ops.disable;
++      pax_open_kernel();
++      const_cast(clk_pll_ops.enable) = clk_gate_ops.enable;
++      const_cast(clk_pll_ops.disable) = clk_gate_ops.disable;
++      pax_close_kernel();
+       clk = clk_register(NULL, &pll_clk->hw.hw);
+       if (WARN_ON(IS_ERR(clk))) {
+diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
+index c7f4631..8d1b7d0 100644
+--- a/drivers/clk/socfpga/clk-pll.c
++++ b/drivers/clk/socfpga/clk-pll.c
+@@ -20,6 +20,7 @@
+ #include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
++#include <asm/pgtable.h>
+ #include "clk.h"
+@@ -75,7 +76,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
+                       CLK_MGR_PLL_CLK_SRC_MASK;
+ }
+-static struct clk_ops clk_pll_ops = {
++static clk_ops_no_const clk_pll_ops __read_only = {
+       .recalc_rate = clk_pll_recalc_rate,
+       .get_parent = clk_pll_get_parent,
+ };
+@@ -114,8 +115,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
+       pll_clk->hw.hw.init = &init;
+       pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
+-      clk_pll_ops.enable = clk_gate_ops.enable;
+-      clk_pll_ops.disable = clk_gate_ops.disable;
++      pax_open_kernel();
++      const_cast(clk_pll_ops.enable) = clk_gate_ops.enable;
++      const_cast(clk_pll_ops.disable) = clk_gate_ops.disable;
++      pax_close_kernel();
+       clk = clk_register(NULL, &pll_clk->hw.hw);
+       if (WARN_ON(IS_ERR(clk))) {
+diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
+index 255cafb..7b41c3b 100644
+--- a/drivers/clk/ti/adpll.c
++++ b/drivers/clk/ti/adpll.c
+@@ -589,7 +589,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d,
+ {
+       struct ti_adpll_clkout_data *co;
+       struct clk_init_data init;
+-      struct clk_ops *ops;
++      clk_ops_no_const *ops;
+       const char *parent_names[2];
+       const char *child_name;
+       struct clk *clock;
+diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
+index 5fcf247..446780a 100644
+--- a/drivers/clk/ti/clk.c
++++ b/drivers/clk/ti/clk.c
+@@ -25,6 +25,8 @@
+ #include <linux/regmap.h>
+ #include <linux/bootmem.h>
++#include <asm/pgtable.h>
++
+ #include "clock.h"
+ #undef pr_fmt
+@@ -84,8 +86,10 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
+       }
+       ti_clk_ll_ops = ops;
+-      ops->clk_readl = clk_memmap_readl;
+-      ops->clk_writel = clk_memmap_writel;
++      pax_open_kernel();
++      const_cast(ops->clk_readl) = clk_memmap_readl;
++      const_cast(ops->clk_writel) = clk_memmap_writel;
++      pax_close_kernel();
+       return 0;
+ }
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 297e912..d5661fb 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -694,8 +694,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+       data->acpi_perf_cpu = cpu;
+       policy->driver_data = data;
+-      if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
+-              acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++      if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
++              pax_open_kernel();
++              const_cast(acpi_cpufreq_driver.flags) |= CPUFREQ_CONST_LOOPS;
++              pax_close_kernel();
++      }
+       result = acpi_processor_register_performance(perf, cpu);
+       if (result)
+@@ -833,7 +836,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+               policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
+               break;
+       case ACPI_ADR_SPACE_FIXED_HARDWARE:
+-              acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
++              pax_open_kernel();
++              const_cast(acpi_cpufreq_driver.get) = get_cur_freq_on_cpu;
++              pax_close_kernel();
+               break;
+       default:
+               break;
+@@ -930,8 +935,10 @@ static void __init acpi_cpufreq_boost_init(void)
+               if (!msrs)
+                       return;
+-              acpi_cpufreq_driver.set_boost = set_boost;
+-              acpi_cpufreq_driver.boost_enabled = boost_state(0);
++              pax_open_kernel();
++              const_cast(acpi_cpufreq_driver.set_boost) = set_boost;
++              const_cast(acpi_cpufreq_driver.boost_enabled) = boost_state(0);
++              pax_close_kernel();
+               cpu_notifier_register_begin();
+diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
+index 3957de8..fe991bb 100644
+--- a/drivers/cpufreq/cpufreq-dt.c
++++ b/drivers/cpufreq/cpufreq-dt.c
+@@ -366,7 +366,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
++      pax_open_kernel();
++      const_cast(dt_cpufreq_driver.driver_data) = dev_get_platdata(&pdev->dev);
++      pax_close_kernel();
+       ret = cpufreq_register_driver(&dt_cpufreq_driver);
+       if (ret)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 3dd4884..6249a29 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -528,12 +528,12 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+  *                          SYSFS INTERFACE                          *
+  *********************************************************************/
+ static ssize_t show_boost(struct kobject *kobj,
+-                               struct attribute *attr, char *buf)
++                               struct kobj_attribute *attr, char *buf)
+ {
+       return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ }
+-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+                                 const char *buf, size_t count)
+ {
+       int ret, enable;
+@@ -2150,7 +2150,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+       read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+       mutex_lock(&cpufreq_governor_mutex);
+-      list_del(&governor->governor_list);
++      pax_list_del(&governor->governor_list);
+       mutex_unlock(&cpufreq_governor_mutex);
+       return;
+ }
+@@ -2350,7 +2350,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata cpufreq_cpu_notifier = {
++static struct notifier_block cpufreq_cpu_notifier = {
+       .notifier_call = cpufreq_cpu_callback,
+ };
+@@ -2392,13 +2392,17 @@ int cpufreq_boost_trigger_state(int state)
+               return 0;
+       write_lock_irqsave(&cpufreq_driver_lock, flags);
+-      cpufreq_driver->boost_enabled = state;
++      pax_open_kernel();
++      const_cast(cpufreq_driver->boost_enabled) = state;
++      pax_close_kernel();
+       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+       ret = cpufreq_driver->set_boost(state);
+       if (ret) {
+               write_lock_irqsave(&cpufreq_driver_lock, flags);
+-              cpufreq_driver->boost_enabled = !state;
++              pax_open_kernel();
++              const_cast(cpufreq_driver->boost_enabled) = !state;
++              pax_close_kernel();
+               write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+               pr_err("%s: Cannot %s BOOST\n",
+@@ -2439,7 +2443,9 @@ int cpufreq_enable_boost_support(void)
+       if (cpufreq_boost_supported())
+               return 0;
+-      cpufreq_driver->set_boost = cpufreq_boost_set_sw;
++      pax_open_kernel();
++      const_cast(cpufreq_driver->set_boost) = cpufreq_boost_set_sw;
++      pax_close_kernel();
+       /* This will get removed on driver unregister */
+       return create_boost_sysfs_file();
+@@ -2496,8 +2502,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+       cpufreq_driver = driver_data;
+       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+-      if (driver_data->setpolicy)
+-              driver_data->flags |= CPUFREQ_CONST_LOOPS;
++      if (driver_data->setpolicy) {
++              pax_open_kernel();
++              const_cast(driver_data->flags) |= CPUFREQ_CONST_LOOPS;
++              pax_close_kernel();
++      }
+       if (cpufreq_boost_supported()) {
+               ret = create_boost_sysfs_file();
+diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
+index ef1037e..c832d36 100644
+--- a/drivers/cpufreq/cpufreq_governor.h
++++ b/drivers/cpufreq/cpufreq_governor.h
+@@ -171,7 +171,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
+ struct od_ops {
+       unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
+                       unsigned int freq_next, unsigned int relation);
+-};
++} __no_const;
+ unsigned int dbs_update(struct cpufreq_policy *policy);
+ void od_register_powersave_bias_handler(unsigned int (*f)
+diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
+index 3a1f49f..42a478e 100644
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -408,7 +408,7 @@ static void od_start(struct cpufreq_policy *policy)
+       ondemand_powersave_bias_init(policy);
+ }
+-static struct od_ops od_ops = {
++static struct od_ops od_ops __read_only = {
+       .powersave_bias_target = generic_powersave_bias_target,
+ };
+@@ -464,14 +464,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
+               (struct cpufreq_policy *, unsigned int, unsigned int),
+               unsigned int powersave_bias)
+ {
+-      od_ops.powersave_bias_target = f;
++      pax_open_kernel();
++      const_cast(od_ops.powersave_bias_target) = f;
++      pax_close_kernel();
+       od_set_powersave_bias(powersave_bias);
+ }
+ EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
+ void od_unregister_powersave_bias_handler(void)
+ {
+-      od_ops.powersave_bias_target = generic_powersave_bias_target;
++      pax_open_kernel();
++      const_cast(od_ops.powersave_bias_target) = generic_powersave_bias_target;
++      pax_close_kernel();
+       od_set_powersave_bias(0);
+ }
+ EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index b46547e..79b533d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -275,13 +275,13 @@ struct pstate_funcs {
+ struct cpu_defaults {
+       struct pstate_adjust_policy pid_policy;
+       struct pstate_funcs funcs;
+-};
++} __do_const;
+ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
+ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
+ static struct pstate_adjust_policy pid_params __read_mostly;
+-static struct pstate_funcs pstate_funcs __read_mostly;
++static struct pstate_funcs *pstate_funcs __read_mostly;
+ static int hwp_active __read_mostly;
+ #ifdef CONFIG_ACPI
+@@ -650,13 +650,13 @@ static void __init intel_pstate_debug_expose_params(void)
+ /************************** sysfs begin ************************/
+ #define show_one(file_name, object)                                   \
+       static ssize_t show_##file_name                                 \
+-      (struct kobject *kobj, struct attribute *attr, char *buf)       \
++      (struct kobject *kobj, struct kobj_attribute *attr, char *buf)  \
+       {                                                               \
+               return sprintf(buf, "%u\n", limits->object);            \
+       }
+ static ssize_t show_turbo_pct(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total, no_turbo, turbo_pct;
+@@ -672,7 +672,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
+ }
+ static ssize_t show_num_pstates(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total;
+@@ -683,7 +683,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
+ }
+ static ssize_t show_no_turbo(struct kobject *kobj,
+-                           struct attribute *attr, char *buf)
++                           struct kobj_attribute *attr, char *buf)
+ {
+       ssize_t ret;
+@@ -696,7 +696,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
+       return ret;
+ }
+-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
+                             const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -720,7 +720,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+       return count;
+ }
+-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -744,7 +744,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+       return count;
+ }
+-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -1145,19 +1145,19 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+        * right CPU.
+        */
+       wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+-                    pstate_funcs.get_val(cpu, pstate));
++                    pstate_funcs->get_val(cpu, pstate));
+ }
+ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ {
+-      cpu->pstate.min_pstate = pstate_funcs.get_min();
+-      cpu->pstate.max_pstate = pstate_funcs.get_max();
+-      cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
+-      cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+-      cpu->pstate.scaling = pstate_funcs.get_scaling();
++      cpu->pstate.min_pstate = pstate_funcs->get_min();
++      cpu->pstate.max_pstate = pstate_funcs->get_max();
++      cpu->pstate.max_pstate_physical = pstate_funcs->get_max_physical();
++      cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
++      cpu->pstate.scaling = pstate_funcs->get_scaling();
+-      if (pstate_funcs.get_vid)
+-              pstate_funcs.get_vid(cpu);
++      if (pstate_funcs->get_vid)
++              pstate_funcs->get_vid(cpu);
+       intel_pstate_set_min_pstate(cpu);
+ }
+@@ -1303,7 +1303,7 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+               return;
+       cpu->pstate.current_pstate = pstate;
+-      wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
++      wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs->get_val(cpu, pstate));
+ }
+ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+@@ -1313,7 +1313,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+       from = cpu->pstate.current_pstate;
+-      target_pstate = pstate_funcs.get_target_pstate(cpu);
++      target_pstate = pstate_funcs->get_target_pstate(cpu);
+       intel_pstate_update_pstate(cpu, target_pstate);
+@@ -1601,15 +1601,15 @@ static unsigned int force_load __initdata;
+ static int __init intel_pstate_msrs_not_valid(void)
+ {
+-      if (!pstate_funcs.get_max() ||
+-          !pstate_funcs.get_min() ||
+-          !pstate_funcs.get_turbo())
++      if (!pstate_funcs->get_max() ||
++          !pstate_funcs->get_min() ||
++          !pstate_funcs->get_turbo())
+               return -ENODEV;
+       return 0;
+ }
+-static void __init copy_pid_params(struct pstate_adjust_policy *policy)
++static void __init copy_pid_params(const struct pstate_adjust_policy *policy)
+ {
+       pid_params.sample_rate_ms = policy->sample_rate_ms;
+       pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
+@@ -1622,15 +1622,7 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy)
+ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
+ {
+-      pstate_funcs.get_max   = funcs->get_max;
+-      pstate_funcs.get_max_physical = funcs->get_max_physical;
+-      pstate_funcs.get_min   = funcs->get_min;
+-      pstate_funcs.get_turbo = funcs->get_turbo;
+-      pstate_funcs.get_scaling = funcs->get_scaling;
+-      pstate_funcs.get_val   = funcs->get_val;
+-      pstate_funcs.get_vid   = funcs->get_vid;
+-      pstate_funcs.get_target_pstate = funcs->get_target_pstate;
+-
++      pstate_funcs = funcs;
+ }
+ #ifdef CONFIG_ACPI
+diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
+index fd77812..97e3efe 100644
+--- a/drivers/cpufreq/p4-clockmod.c
++++ b/drivers/cpufreq/p4-clockmod.c
+@@ -130,10 +130,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+               case 0x0F: /* Core Duo */
+               case 0x16: /* Celeron Core */
+               case 0x1C: /* Atom */
+-                      p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++                      pax_open_kernel();
++                      const_cast(p4clockmod_driver.flags) |= CPUFREQ_CONST_LOOPS;
++                      pax_close_kernel();
+                       return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
+               case 0x0D: /* Pentium M (Dothan) */
+-                      p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++                      pax_open_kernel();
++                      const_cast(p4clockmod_driver.flags) |= CPUFREQ_CONST_LOOPS;
++                      pax_close_kernel();
+                       /* fall through */
+               case 0x09: /* Pentium M (Banias) */
+                       return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
+@@ -145,7 +149,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+       /* on P-4s, the TSC runs with constant frequency independent whether
+        * throttling is active or not. */
+-      p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++      pax_open_kernel();
++      const_cast(p4clockmod_driver.flags) |= CPUFREQ_CONST_LOOPS;
++      pax_close_kernel();
+       if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
+               pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
+diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
+index 9bb42ba..b01b4a2 100644
+--- a/drivers/cpufreq/sparc-us3-cpufreq.c
++++ b/drivers/cpufreq/sparc-us3-cpufreq.c
+@@ -18,14 +18,12 @@
+ #include <asm/head.h>
+ #include <asm/timer.h>
+-static struct cpufreq_driver *cpufreq_us3_driver;
+-
+ struct us3_freq_percpu_info {
+       struct cpufreq_frequency_table table[4];
+ };
+ /* Indexed by cpu number. */
+-static struct us3_freq_percpu_info *us3_freq_table;
++static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
+ /* UltraSPARC-III has three dividers: 1, 2, and 32.  These are controlled
+  * in the Safari config register.
+@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+ static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+ {
+-      if (cpufreq_us3_driver)
+-              us3_freq_target(policy, 0);
++      us3_freq_target(policy, 0);
+       return 0;
+ }
++static int __init us3_freq_init(void);
++static void __exit us3_freq_exit(void);
++
++static struct cpufreq_driver cpufreq_us3_driver = {
++      .init           = us3_freq_cpu_init,
++      .verify         = cpufreq_generic_frequency_table_verify,
++      .target_index   = us3_freq_target,
++      .get            = us3_freq_get,
++      .exit           = us3_freq_cpu_exit,
++      .name           = "UltraSPARC-III",
++
++};
++
+ static int __init us3_freq_init(void)
+ {
+       unsigned long manuf, impl, ver;
+-      int ret;
+       if (tlb_type != cheetah && tlb_type != cheetah_plus)
+               return -ENODEV;
+@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
+           (impl == CHEETAH_IMPL ||
+            impl == CHEETAH_PLUS_IMPL ||
+            impl == JAGUAR_IMPL ||
+-           impl == PANTHER_IMPL)) {
+-              struct cpufreq_driver *driver;
+-
+-              ret = -ENOMEM;
+-              driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+-              if (!driver)
+-                      goto err_out;
+-
+-              us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
+-                      GFP_KERNEL);
+-              if (!us3_freq_table)
+-                      goto err_out;
+-
+-              driver->init = us3_freq_cpu_init;
+-              driver->verify = cpufreq_generic_frequency_table_verify;
+-              driver->target_index = us3_freq_target;
+-              driver->get = us3_freq_get;
+-              driver->exit = us3_freq_cpu_exit;
+-              strcpy(driver->name, "UltraSPARC-III");
+-
+-              cpufreq_us3_driver = driver;
+-              ret = cpufreq_register_driver(driver);
+-              if (ret)
+-                      goto err_out;
+-
+-              return 0;
+-
+-err_out:
+-              if (driver) {
+-                      kfree(driver);
+-                      cpufreq_us3_driver = NULL;
+-              }
+-              kfree(us3_freq_table);
+-              us3_freq_table = NULL;
+-              return ret;
+-      }
++           impl == PANTHER_IMPL))
++              return cpufreq_register_driver(&cpufreq_us3_driver);
+       return -ENODEV;
+ }
+ static void __exit us3_freq_exit(void)
+ {
+-      if (cpufreq_us3_driver) {
+-              cpufreq_unregister_driver(cpufreq_us3_driver);
+-              kfree(cpufreq_us3_driver);
+-              cpufreq_us3_driver = NULL;
+-              kfree(us3_freq_table);
+-              us3_freq_table = NULL;
+-      }
++      cpufreq_unregister_driver(&cpufreq_us3_driver);
+ }
+ MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
+index 41bc539..e46a74d 100644
+--- a/drivers/cpufreq/speedstep-centrino.c
++++ b/drivers/cpufreq/speedstep-centrino.c
+@@ -352,8 +352,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
+           !cpu_has(cpu, X86_FEATURE_EST))
+               return -ENODEV;
+-      if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
+-              centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
++      if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
++              pax_open_kernel();
++              const_cast(centrino_driver.flags) |= CPUFREQ_CONST_LOOPS;
++              pax_close_kernel();
++      }
+       if (policy->cpu != 0)
+               return -ENODEV;
+diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
+index 389ade4..e328b5ce 100644
+--- a/drivers/cpuidle/driver.c
++++ b/drivers/cpuidle/driver.c
+@@ -193,7 +193,7 @@ static int poll_idle(struct cpuidle_device *dev,
+ static void poll_idle_init(struct cpuidle_driver *drv)
+ {
+-      struct cpuidle_state *state = &drv->states[0];
++      cpuidle_state_no_const *state = &drv->states[0];
+       snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+       snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
+index a5c111b..1113002 100644
+--- a/drivers/cpuidle/dt_idle_states.c
++++ b/drivers/cpuidle/dt_idle_states.c
+@@ -21,7 +21,7 @@
+ #include "dt_idle_states.h"
+-static int init_state_node(struct cpuidle_state *idle_state,
++static int init_state_node(cpuidle_state_no_const *idle_state,
+                          const struct of_device_id *matches,
+                          struct device_node *state_node)
+ {
+diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
+index fb9f511..213e6cc 100644
+--- a/drivers/cpuidle/governor.c
++++ b/drivers/cpuidle/governor.c
+@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
+       mutex_lock(&cpuidle_lock);
+       if (__cpuidle_find_governor(gov->name) == NULL) {
+               ret = 0;
+-              list_add_tail(&gov->governor_list, &cpuidle_governors);
++              pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
+               if (!cpuidle_curr_governor ||
+                   cpuidle_curr_governor->rating < gov->rating)
+                       cpuidle_switch_governor(gov);
+diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
+index 63bd5a4..eea2dff 100644
+--- a/drivers/cpuidle/governors/ladder.c
++++ b/drivers/cpuidle/governors/ladder.c
+@@ -173,6 +173,15 @@ static void ladder_reflect(struct cpuidle_device *dev, int index)
+ static struct cpuidle_governor ladder_governor = {
+       .name =         "ladder",
++      .rating =       25,
++      .enable =       ladder_enable_device,
++      .select =       ladder_select_state,
++      .reflect =      ladder_reflect,
++      .owner =        THIS_MODULE,
++};
++
++static struct cpuidle_governor ladder_governor_nohz = {
++      .name =         "ladder",
+       .rating =       10,
+       .enable =       ladder_enable_device,
+       .select =       ladder_select_state,
+@@ -190,10 +199,8 @@ static int __init init_ladder(void)
+        * governor is better so give it a higher rating than the menu
+        * governor.
+        */
+-      if (!tick_nohz_enabled)
+-              ladder_governor.rating = 25;
+-      return cpuidle_register_governor(&ladder_governor);
++      return cpuidle_register_governor(tick_nohz_enabled ? &ladder_governor_nohz : &ladder_governor);
+ }
+ postcore_initcall(init_ladder);
+diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
+index 832a2c3..1794080 100644
+--- a/drivers/cpuidle/sysfs.c
++++ b/drivers/cpuidle/sysfs.c
+@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
+       NULL
+ };
+-static struct attribute_group cpuidle_attr_group = {
++static attribute_group_no_const cpuidle_attr_group = {
+       .attrs = cpuidle_default_attrs,
+       .name = "cpuidle",
+ };
+diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
+index eee2c7e..268aa3e 100644
+--- a/drivers/crypto/hifn_795x.c
++++ b/drivers/crypto/hifn_795x.c
+@@ -37,7 +37,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
+ MODULE_PARM_DESC(hifn_pll_ref,
+                "PLL reference clock (pci[freq] or ext[freq], default ext)");
+-static atomic_t hifn_dev_number;
++static atomic_unchecked_t hifn_dev_number;
+ #define ACRYPTO_OP_DECRYPT    0
+ #define ACRYPTO_OP_ENCRYPT    1
+@@ -2483,7 +2483,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+               goto err_out_disable_pci_device;
+       snprintf(name, sizeof(name), "hifn%d",
+-                      atomic_inc_return(&hifn_dev_number) - 1);
++                      atomic_inc_return_unchecked(&hifn_dev_number) - 1);
+       err = pci_request_regions(pdev, name);
+       if (err)
+diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
+index 2839fcc..b40595a 100644
+--- a/drivers/crypto/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/qat/qat_common/adf_aer.c
+@@ -56,7 +56,7 @@
+ static struct workqueue_struct *device_reset_wq;
+ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+-                                         pci_channel_state_t state)
++                                         enum pci_channel_state state)
+ {
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
+index 9320ae1..4bf8e7e 100644
+--- a/drivers/crypto/qat/qat_common/adf_sriov.c
++++ b/drivers/crypto/qat/qat_common/adf_sriov.c
+@@ -93,7 +93,7 @@ static void adf_iov_send_resp(struct work_struct *work)
+       kfree(pf2vf_resp);
+ }
+-static void adf_vf2pf_bh_handler(void *data)
++static void adf_vf2pf_bh_handler(unsigned long data)
+ {
+       struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+       struct adf_pf2vf_resp *pf2vf_resp;
+@@ -126,7 +126,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+               vf_info->vf_nr = i;
+               tasklet_init(&vf_info->vf2pf_bh_tasklet,
+-                           (void *)adf_vf2pf_bh_handler,
++                           adf_vf2pf_bh_handler,
+                            (unsigned long)vf_info);
+               mutex_init(&vf_info->pf2vf_lock);
+               ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
+index bf99e11..a44361c 100644
+--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
+@@ -112,9 +112,9 @@ static void adf_dev_stop_async(struct work_struct *work)
+       kfree(stop_data);
+ }
+-static void adf_pf2vf_bh_handler(void *data)
++static void adf_pf2vf_bh_handler(unsigned long data)
+ {
+-      struct adf_accel_dev *accel_dev = data;
++      struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)data;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc =
+                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+@@ -183,7 +183,7 @@ err:
+ static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+ {
+       tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+-                   (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
++                   adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+       mutex_init(&accel_dev->vf.vf2pf_lock);
+       return 0;
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 478006b..fd0efda 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -802,7 +802,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
+               goto err_out;
+       }
+-      list_add(&governor->node, &devfreq_governor_list);
++      pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
+       list_for_each_entry(devfreq, &devfreq_list, node) {
+               int ret = 0;
+@@ -890,7 +890,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
+               }
+       }
+-      list_del(&governor->node);
++      pax_list_del((struct list_head *)&governor->node);
+ err_out:
+       mutex_unlock(&devfreq_list_lock);
+diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
+index 9ef46e2..775fc75 100644
+--- a/drivers/devfreq/governor_passive.c
++++ b/drivers/devfreq/governor_passive.c
+@@ -151,7 +151,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
+       struct devfreq_passive_data *p_data
+                       = (struct devfreq_passive_data *)devfreq->data;
+       struct devfreq *parent = (struct devfreq *)p_data->parent;
+-      struct notifier_block *nb = &p_data->nb;
++      notifier_block_no_const *nb = &p_data->nb;
+       int ret = 0;
+       if (!parent)
+diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
+index b2374cd..4f1e32c 100644
+--- a/drivers/dma/qcom/hidma.c
++++ b/drivers/dma/qcom/hidma.c
+@@ -547,7 +547,7 @@ static ssize_t hidma_show_values(struct device *dev,
+ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
+                                   int mode)
+ {
+-      struct device_attribute *attrs;
++      device_attribute_no_const *attrs;
+       char *name_copy;
+       attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
+diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
+index d61f106..a23baa3 100644
+--- a/drivers/dma/qcom/hidma_mgmt_sys.c
++++ b/drivers/dma/qcom/hidma_mgmt_sys.c
+@@ -194,7 +194,7 @@ static ssize_t set_values_channel(struct kobject *kobj,
+ static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode)
+ {
+-      struct device_attribute *attrs;
++      device_attribute_no_const *attrs;
+       char *name_copy;
+       attrs = devm_kmalloc(&dev->pdev->dev,
+diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
+index 10fcaba..326f709 100644
+--- a/drivers/dma/sh/shdma-base.c
++++ b/drivers/dma/sh/shdma-base.c
+@@ -227,8 +227,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
+               schan->slave_id = -EINVAL;
+       }
+-      schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
+-                            sdev->desc_size, GFP_KERNEL);
++      schan->desc = kcalloc(sdev->desc_size,
++                            NR_DESCS_PER_CHANNEL, GFP_KERNEL);
+       if (!schan->desc) {
+               ret = -ENOMEM;
+               goto edescalloc;
+diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
+index c94ffab..82c11f0 100644
+--- a/drivers/dma/sh/shdmac.c
++++ b/drivers/dma/sh/shdmac.c
+@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
+       return ret;
+ }
+-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
++static struct notifier_block sh_dmae_nmi_notifier = {
+       .notifier_call  = sh_dmae_nmi_handler,
+       /* Run before NMI debug handler and KGDB */
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index a979003..773b7f0 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -468,9 +468,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+  */
+ int edac_device_alloc_index(void)
+ {
+-      static atomic_t device_indexes = ATOMIC_INIT(0);
++      static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
+-      return atomic_inc_return(&device_indexes) - 1;
++      return atomic_inc_return_unchecked(&device_indexes) - 1;
+ }
+ EXPORT_SYMBOL_GPL(edac_device_alloc_index);
+diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
+index 93da1a4..5e2c149 100644
+--- a/drivers/edac/edac_device_sysfs.c
++++ b/drivers/edac/edac_device_sysfs.c
+@@ -749,7 +749,7 @@ static int edac_device_add_main_sysfs_attributes(
+                */
+               while (sysfs_attrib->attr.name != NULL) {
+                       err = sysfs_create_file(&edac_dev->kobj,
+-                              (struct attribute*) sysfs_attrib);
++                              &sysfs_attrib->attr);
+                       if (err)
+                               goto err_out;
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 4e0f8e7..0eb9499 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -50,7 +50,7 @@ int edac_mc_get_poll_msec(void)
+       return edac_mc_poll_msec;
+ }
+-static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
++static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
+ {
+       unsigned long l;
+       int ret;
+@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
+ struct dev_ch_attribute {
+       struct device_attribute attr;
+       int channel;
+-};
++} __do_const;
+ #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
+       static struct dev_ch_attribute dev_attr_legacy_##_name = \
+diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
+index 5f8543b..46aa017 100644
+--- a/drivers/edac/edac_module.c
++++ b/drivers/edac/edac_module.c
+@@ -19,7 +19,7 @@
+ #ifdef CONFIG_EDAC_DEBUG
+-static int edac_set_debug_level(const char *buf, struct kernel_param *kp)
++static int edac_set_debug_level(const char *buf, const struct kernel_param *kp)
+ {
+       unsigned long val;
+       int ret;
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index 8f2f289..cbb0d7c 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -29,7 +29,7 @@
+ static DEFINE_MUTEX(edac_pci_ctls_mutex);
+ static LIST_HEAD(edac_pci_list);
+-static atomic_t pci_indexes = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
+ /*
+  * edac_pci_alloc_ctl_info
+@@ -224,7 +224,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
+  */
+ int edac_pci_alloc_index(void)
+ {
+-      return atomic_inc_return(&pci_indexes) - 1;
++      return atomic_inc_return_unchecked(&pci_indexes) - 1;
+ }
+ EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index 6e3428b..9bdb207 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1;              /* log PCI parity errors */
+ static int edac_pci_log_npe = 1;      /* log PCI non-parity error errors */
+ static int edac_pci_poll_msec = 1000; /* one second workq period */
+-static atomic_t pci_parity_count = ATOMIC_INIT(0);
+-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
+ static struct kobject *edac_pci_top_main_kobj;
+ static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
+       void *value;
+        ssize_t(*show) (void *, char *);
+        ssize_t(*store) (void *, const char *, size_t);
+-};
++} __do_const;
+ /* Set of show/store abstract level functions for PCI Parity object */
+ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+@@ -564,7 +564,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                       edac_printk(KERN_CRIT, EDAC_PCI,
+                               "Signaled System Error on %s\n",
+                               pci_name(dev));
+-                      atomic_inc(&pci_nonparity_count);
++                      atomic_inc_unchecked(&pci_nonparity_count);
+               }
+               if (status & (PCI_STATUS_PARITY)) {
+@@ -572,7 +572,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                               "Master Data Parity Error on %s\n",
+                               pci_name(dev));
+-                      atomic_inc(&pci_parity_count);
++                      atomic_inc_unchecked(&pci_parity_count);
+               }
+               if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -580,7 +580,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                               "Detected Parity Error on %s\n",
+                               pci_name(dev));
+-                      atomic_inc(&pci_parity_count);
++                      atomic_inc_unchecked(&pci_parity_count);
+               }
+       }
+@@ -603,7 +603,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                               edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+                                       "Signaled System Error on %s\n",
+                                       pci_name(dev));
+-                              atomic_inc(&pci_nonparity_count);
++                              atomic_inc_unchecked(&pci_nonparity_count);
+                       }
+                       if (status & (PCI_STATUS_PARITY)) {
+@@ -611,7 +611,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                                       "Master Data Parity Error on "
+                                       "%s\n", pci_name(dev));
+-                              atomic_inc(&pci_parity_count);
++                              atomic_inc_unchecked(&pci_parity_count);
+                       }
+                       if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+                                       "Detected Parity Error on %s\n",
+                                       pci_name(dev));
+-                              atomic_inc(&pci_parity_count);
++                              atomic_inc_unchecked(&pci_parity_count);
+                       }
+               }
+       }
+@@ -657,7 +657,7 @@ void edac_pci_do_parity_check(void)
+       if (!check_pci_errors)
+               return;
+-      before_count = atomic_read(&pci_parity_count);
++      before_count = atomic_read_unchecked(&pci_parity_count);
+       /* scan all PCI devices looking for a Parity Error on devices and
+        * bridges.
+@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
+       /* Only if operator has selected panic on PCI Error */
+       if (edac_pci_get_panic_on_pe()) {
+               /* If the count is different 'after' from 'before' */
+-              if (before_count != atomic_read(&pci_parity_count))
++              if (before_count != atomic_read_unchecked(&pci_parity_count))
+                       panic("EDAC: PCI Parity Error");
+       }
+ }
+diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
+index c2359a1..8bd119d 100644
+--- a/drivers/edac/mce_amd.h
++++ b/drivers/edac/mce_amd.h
+@@ -74,7 +74,7 @@ struct amd_decoder_ops {
+       bool (*mc0_mce)(u16, u8);
+       bool (*mc1_mce)(u16, u8);
+       bool (*mc2_mce)(u16, u8);
+-};
++} __no_const;
+ void amd_report_gart_errors(bool);
+ void amd_register_ecc_decoder(void (*f)(int, struct mce *));
+diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
+index 57ea7f4..af06b76 100644
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
+                       const struct fw_card_driver *driver,
+                       struct device *device)
+ {
+-      static atomic_t index = ATOMIC_INIT(-1);
++      static atomic_unchecked_t index = ATOMIC_INIT(-1);
+-      card->index = atomic_inc_return(&index);
++      card->index = atomic_inc_return_unchecked(&index);
+       card->driver = driver;
+       card->device = device;
+       card->current_tlabel = 0;
+@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
+ void fw_core_remove_card(struct fw_card *card)
+ {
+-      struct fw_card_driver dummy_driver = dummy_driver_template;
++      fw_card_driver_no_const dummy_driver = dummy_driver_template;
+       card->driver->update_phy_reg(card, 4,
+                                    PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index aee149b..2a18960 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -970,7 +970,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
+ {
+       struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
+       struct fw_iso_context *context;
+-      fw_iso_callback_t cb;
++      void *cb;
+       int ret;
+       BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
+@@ -995,7 +995,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
+               break;
+       case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+-              cb = (fw_iso_callback_t)iso_mc_callback;
++              cb = iso_mc_callback;
+               break;
+       default:
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index f9e3aee..269dbdb 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
+ struct config_rom_attribute {
+       struct device_attribute attr;
+       u32 key;
+-};
++} __do_const;
+ static ssize_t show_immediate(struct device *dev,
+                             struct device_attribute *dattr, char *buf)
+diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
+index 38c0aa6..95466e4 100644
+--- a/drivers/firewire/core-iso.c
++++ b/drivers/firewire/core-iso.c
+@@ -162,7 +162,7 @@ size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
+ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+               int type, int channel, int speed, size_t header_size,
+-              fw_iso_callback_t callback, void *callback_data)
++              void *callback, void *callback_data)
+ {
+       struct fw_iso_context *ctx;
+diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
+index d6a09b9..18e90dd 100644
+--- a/drivers/firewire/core-transaction.c
++++ b/drivers/firewire/core-transaction.c
+@@ -38,6 +38,7 @@
+ #include <linux/timer.h>
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
++#include <linux/sched.h>
+ #include <asm/byteorder.h>
+diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
+index e1480ff6..1a429bd 100644
+--- a/drivers/firewire/core.h
++++ b/drivers/firewire/core.h
+@@ -111,6 +111,7 @@ struct fw_card_driver {
+       int (*stop_iso)(struct fw_iso_context *ctx);
+ };
++typedef struct fw_card_driver __no_const fw_card_driver_no_const;
+ void fw_card_initialize(struct fw_card *card,
+               const struct fw_card_driver *driver, struct device *device);
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 8bf8926..55a4930 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
+                         be32_to_cpu(ohci->next_header));
+       }
++#ifndef CONFIG_GRKERNSEC
+       if (param_remote_dma) {
+               reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+               reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+       }
++#endif
+       spin_unlock_irq(&ohci->lock);
+@@ -2585,8 +2587,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
+       unsigned long flags;
+       int n, ret = 0;
++#ifndef CONFIG_GRKERNSEC
+       if (param_remote_dma)
+               return 0;
++#endif
+       /*
+        * FIXME:  Make sure this bitmask is cleared when we clear the busReset
+diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
+index 44c0139..5252697 100644
+--- a/drivers/firmware/dmi-id.c
++++ b/drivers/firmware/dmi-id.c
+@@ -16,7 +16,7 @@
+ struct dmi_device_attribute{
+       struct device_attribute dev_attr;
+       int field;
+-};
++} __do_const;
+ #define to_dmi_dev_attr(_dev_attr) \
+       container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
+@@ -159,9 +159,14 @@ static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+       return 0;
+ }
++static void dmi_dev_release(struct device *dev)
++{
++      kfree(dev);
++}
++
+ static struct class dmi_class = {
+       .name = "dmi",
+-      .dev_release = (void(*)(struct device *)) kfree,
++      .dev_release = dmi_dev_release,
+       .dev_uevent = dmi_dev_uevent,
+ };
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 88bebe1..e599fad 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -712,14 +712,18 @@ static int __init dmi_init(void)
+       if (!dmi_table)
+               goto err_tables;
+-      bin_attr_smbios_entry_point.size = smbios_entry_point_size;
+-      bin_attr_smbios_entry_point.private = smbios_entry_point;
++      pax_open_kernel();
++      const_cast(bin_attr_smbios_entry_point.size) = smbios_entry_point_size;
++      const_cast(bin_attr_smbios_entry_point.private) = smbios_entry_point;
++      pax_close_kernel();
+       ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
+       if (ret)
+               goto err_unmap;
+-      bin_attr_DMI.size = dmi_len;
+-      bin_attr_DMI.private = dmi_table;
++      pax_open_kernel();
++      const_cast(bin_attr_DMI.size) = dmi_len;
++      const_cast(bin_attr_DMI.private) = dmi_table;
++      pax_close_kernel();
+       ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
+       if (!ret)
+               return 0;
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index d425374..1da1716 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
+  */
+ u64 cper_next_record_id(void)
+ {
+-      static atomic64_t seq;
++      static atomic64_unchecked_t seq;
+-      if (!atomic64_read(&seq))
+-              atomic64_set(&seq, ((u64)get_seconds()) << 32);
++      if (!atomic64_read_unchecked(&seq))
++              atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
+-      return atomic64_inc_return(&seq);
++      return atomic64_inc_return_unchecked(&seq);
+ }
+ EXPORT_SYMBOL_GPL(cper_next_record_id);
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 7dd2e2d..15990ac 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -180,15 +180,17 @@ static struct attribute_group efi_subsys_attr_group = {
+ };
+ static struct efivars generic_efivars;
+-static struct efivar_operations generic_ops;
++static efivar_operations_no_const generic_ops __read_only;
+ static int generic_ops_register(void)
+ {
+-      generic_ops.get_variable = efi.get_variable;
+-      generic_ops.set_variable = efi.set_variable;
+-      generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+-      generic_ops.get_next_variable = efi.get_next_variable;
+-      generic_ops.query_variable_store = efi_query_variable_store;
++      pax_open_kernel();
++      const_cast(generic_ops.get_variable) = efi.get_variable;
++      const_cast(generic_ops.set_variable) = efi.set_variable;
++      const_cast(generic_ops.set_variable_nonblocking) = efi.set_variable_nonblocking;
++      const_cast(generic_ops.get_next_variable) = efi.get_next_variable;
++      const_cast(generic_ops.query_variable_store) = efi_query_variable_store;
++      pax_close_kernel();
+       return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+ }
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 116b244..b16d9f2 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -583,7 +583,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+ static int
+ create_efivars_bin_attributes(void)
+ {
+-      struct bin_attribute *attr;
++      bin_attribute_no_const *attr;
+       int error;
+       /* new_var */
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index c069451..fca41b6 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -20,6 +20,8 @@ KBUILD_CFLAGS                        := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+                                  $(call cc-option,-ffreestanding) \
+                                  $(call cc-option,-fno-stack-protector)
++KBUILD_CFLAGS                 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
++
+ GCOV_PROFILE                  := n
+ KASAN_SANITIZE                        := n
+ UBSAN_SANITIZE                        := n
+diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
+index 5c55227..97f4978 100644
+--- a/drivers/firmware/efi/runtime-map.c
++++ b/drivers/firmware/efi/runtime-map.c
+@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
+       kfree(entry);
+ }
+-static struct kobj_type __refdata map_ktype = {
++static const struct kobj_type __refconst map_ktype = {
+       .sysfs_ops      = &map_attr_ops,
+       .default_attrs  = def_attrs,
+       .release        = map_release,
+diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
+index f1ab05e..ab51228 100644
+--- a/drivers/firmware/google/gsmi.c
++++ b/drivers/firmware/google/gsmi.c
+@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
+       return local_hash_64(input, 32);
+ }
+-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
++static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
+       {
+               .ident = "Google Board",
+               .matches = {
+diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
+index 2f569aa..3af5497 100644
+--- a/drivers/firmware/google/memconsole.c
++++ b/drivers/firmware/google/memconsole.c
+@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
+       return false;
+ }
+-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
++static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
+       {
+               .ident = "Google Board",
+               .matches = {
+@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
+       if (!found_memconsole())
+               return -ENODEV;
+-      memconsole_bin_attr.size = memconsole_length;
++      pax_open_kernel();
++      const_cast(memconsole_bin_attr.size) = memconsole_length;
++      pax_close_kernel();
++
+       return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
+ }
+diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
+index 5de3ed2..d839c56 100644
+--- a/drivers/firmware/memmap.c
++++ b/drivers/firmware/memmap.c
+@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
+       kfree(entry);
+ }
+-static struct kobj_type __refdata memmap_ktype = {
++static const struct kobj_type __refconst memmap_ktype = {
+       .release        = release_firmware_map_entry,
+       .sysfs_ops      = &memmap_attr_ops,
+       .default_attrs  = def_attrs,
+diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
+index 8263429..d0ef61f 100644
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -59,7 +59,7 @@ bool psci_tos_resident_on(int cpu)
+       return cpu == resident_cpu;
+ }
+-struct psci_operations psci_ops;
++struct psci_operations psci_ops __read_only;
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+                               unsigned long, unsigned long);
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index dd262f0..2834a84 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -440,9 +440,9 @@ static struct irq_chip *davinci_gpio_get_irq_chip(unsigned int irq)
+       return &gpio_unbanked.chip;
+ };
+-static struct irq_chip *keystone_gpio_get_irq_chip(unsigned int irq)
++static irq_chip_no_const *keystone_gpio_get_irq_chip(unsigned int irq)
+ {
+-      static struct irq_chip gpio_unbanked;
++      static irq_chip_no_const gpio_unbanked;
+       gpio_unbanked = *irq_get_chip(irq);
+       return &gpio_unbanked;
+@@ -472,7 +472,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
+       struct davinci_gpio_regs __iomem *g;
+       struct irq_domain       *irq_domain = NULL;
+       const struct of_device_id *match;
+-      struct irq_chip *irq_chip;
++      irq_chip_no_const *irq_chip;
+       gpio_get_irq_chip_cb_t gpio_get_irq_chip;
+       /*
+diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
+index 8d32ccc..2d2ca61 100644
+--- a/drivers/gpio/gpio-em.c
++++ b/drivers/gpio/gpio-em.c
+@@ -274,7 +274,7 @@ static int em_gio_probe(struct platform_device *pdev)
+       struct em_gio_priv *p;
+       struct resource *io[2], *irq[2];
+       struct gpio_chip *gpio_chip;
+-      struct irq_chip *irq_chip;
++      irq_chip_no_const *irq_chip;
+       const char *name = dev_name(&pdev->dev);
+       unsigned int ngpios;
+       int ret;
+diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
+index 4f6d643..eb4655c 100644
+--- a/drivers/gpio/gpio-ich.c
++++ b/drivers/gpio/gpio-ich.c
+@@ -95,7 +95,7 @@ struct ichx_desc {
+        * this option allows driver caching written output values
+        */
+       bool use_outlvl_cache;
+-};
++} __do_const;
+ static struct {
+       spinlock_t lock;
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index 793518a..80ebce3 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -226,7 +226,7 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
+       return 0;
+ }
+-static struct irq_chip mpc8xxx_irq_chip = {
++static irq_chip_no_const mpc8xxx_irq_chip __read_only = {
+       .name           = "mpc8xxx-gpio",
+       .irq_unmask     = mpc8xxx_irq_unmask,
+       .irq_mask       = mpc8xxx_irq_mask,
+@@ -337,7 +337,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+        * It's assumed that only a single type of gpio controller is available
+        * on the current machine, so overwriting global data is fine.
+        */
+-      mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
++      pax_open_kernel();
++      const_cast(mpc8xxx_irq_chip.irq_set_type) = devtype->irq_set_type;
++      pax_close_kernel();
+       if (devtype->gpio_dir_out)
+               gc->direction_output = devtype->gpio_dir_out;
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index b98ede7..c83e860 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -1029,7 +1029,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
+               writel_relaxed(0, base + bank->regs->ctrl);
+ }
+-static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
++static int omap_gpio_chip_init(struct gpio_bank *bank, irq_chip_no_const *irqc)
+ {
+       static int gpio;
+       int irq_base = 0;
+@@ -1119,7 +1119,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
+       const struct omap_gpio_platform_data *pdata;
+       struct resource *res;
+       struct gpio_bank *bank;
+-      struct irq_chip *irqc;
++      irq_chip_no_const *irqc;
+       int ret;
+       match = of_match_device(of_match_ptr(omap_gpio_match), dev);
+diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
+index b96e0b4..c1e1b16 100644
+--- a/drivers/gpio/gpio-rcar.c
++++ b/drivers/gpio/gpio-rcar.c
+@@ -391,7 +391,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
+       struct gpio_rcar_priv *p;
+       struct resource *io, *irq;
+       struct gpio_chip *gpio_chip;
+-      struct irq_chip *irq_chip;
++      irq_chip_no_const *irq_chip;
+       struct device *dev = &pdev->dev;
+       const char *name = dev_name(dev);
+       unsigned int npins;
+diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
+index ac8deb0..f3caa10 100644
+--- a/drivers/gpio/gpio-vr41xx.c
++++ b/drivers/gpio/gpio-vr41xx.c
+@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
+       printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+              maskl, pendl, maskh, pendh);
+-      atomic_inc(&irq_err_count);
++      atomic_inc_unchecked(&irq_err_count);
+       return -EINVAL;
+ }
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 53ff25a..6f88b8f 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1558,8 +1558,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
+       }
+       if (gpiochip->irqchip) {
+-              gpiochip->irqchip->irq_request_resources = NULL;
+-              gpiochip->irqchip->irq_release_resources = NULL;
++              pax_open_kernel();
++              const_cast(gpiochip->irqchip->irq_request_resources) = NULL;
++              const_cast(gpiochip->irqchip->irq_release_resources) = NULL;
++              pax_close_kernel();
+               gpiochip->irqchip = NULL;
+       }
+ }
+@@ -1636,8 +1638,10 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+        */
+       if (!irqchip->irq_request_resources &&
+           !irqchip->irq_release_resources) {
+-              irqchip->irq_request_resources = gpiochip_irq_reqres;
+-              irqchip->irq_release_resources = gpiochip_irq_relres;
++              pax_open_kernel();
++              const_cast(irqchip->irq_request_resources) = gpiochip_irq_reqres;
++              const_cast(irqchip->irq_release_resources) = gpiochip_irq_relres;
++              pax_close_kernel();
+       }
+       /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 700c56b..267fde4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1796,7 +1796,7 @@ int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+  * amdgpu smumgr functions
+  */
+ struct amdgpu_smumgr_funcs {
+-      int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
++      int (*check_fw_load_finish)(struct amdgpu_device *adev, enum AMDGPU_UCODE_ID fwtype);
+       int (*request_smu_load_fw)(struct amdgpu_device *adev);
+       int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 10b5ddf..ed2f78d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -519,7 +519,7 @@ static int amdgpu_atpx_init(void)
+  * look up whether we are the integrated or discrete GPU (all asics).
+  * Returns the client id.
+  */
+-static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
++static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev)
+ {
+       if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
+               return VGA_SWITCHEROO_IGD;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index bc0440f..ab93c5e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -1118,50 +1118,50 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
+ }
+ static const struct cgs_ops amdgpu_cgs_ops = {
+-      amdgpu_cgs_gpu_mem_info,
+-      amdgpu_cgs_gmap_kmem,
+-      amdgpu_cgs_gunmap_kmem,
+-      amdgpu_cgs_alloc_gpu_mem,
+-      amdgpu_cgs_free_gpu_mem,
+-      amdgpu_cgs_gmap_gpu_mem,
+-      amdgpu_cgs_gunmap_gpu_mem,
+-      amdgpu_cgs_kmap_gpu_mem,
+-      amdgpu_cgs_kunmap_gpu_mem,
+-      amdgpu_cgs_read_register,
+-      amdgpu_cgs_write_register,
+-      amdgpu_cgs_read_ind_register,
+-      amdgpu_cgs_write_ind_register,
+-      amdgpu_cgs_read_pci_config_byte,
+-      amdgpu_cgs_read_pci_config_word,
+-      amdgpu_cgs_read_pci_config_dword,
+-      amdgpu_cgs_write_pci_config_byte,
+-      amdgpu_cgs_write_pci_config_word,
+-      amdgpu_cgs_write_pci_config_dword,
+-      amdgpu_cgs_get_pci_resource,
+-      amdgpu_cgs_atom_get_data_table,
+-      amdgpu_cgs_atom_get_cmd_table_revs,
+-      amdgpu_cgs_atom_exec_cmd_table,
+-      amdgpu_cgs_create_pm_request,
+-      amdgpu_cgs_destroy_pm_request,
+-      amdgpu_cgs_set_pm_request,
+-      amdgpu_cgs_pm_request_clock,
+-      amdgpu_cgs_pm_request_engine,
+-      amdgpu_cgs_pm_query_clock_limits,
+-      amdgpu_cgs_set_camera_voltages,
+-      amdgpu_cgs_get_firmware_info,
+-      amdgpu_cgs_rel_firmware,
+-      amdgpu_cgs_set_powergating_state,
+-      amdgpu_cgs_set_clockgating_state,
+-      amdgpu_cgs_get_active_displays_info,
+-      amdgpu_cgs_notify_dpm_enabled,
+-      amdgpu_cgs_call_acpi_method,
+-      amdgpu_cgs_query_system_info,
++      .gpu_mem_info = amdgpu_cgs_gpu_mem_info,
++      .gmap_kmem = amdgpu_cgs_gmap_kmem,
++      .gunmap_kmem = amdgpu_cgs_gunmap_kmem,
++      .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
++      .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
++      .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
++      .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
++      .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
++      .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
++      .read_register = amdgpu_cgs_read_register,
++      .write_register = amdgpu_cgs_write_register,
++      .read_ind_register = amdgpu_cgs_read_ind_register,
++      .write_ind_register = amdgpu_cgs_write_ind_register,
++      .read_pci_config_byte = amdgpu_cgs_read_pci_config_byte,
++      .read_pci_config_word = amdgpu_cgs_read_pci_config_word,
++      .read_pci_config_dword = amdgpu_cgs_read_pci_config_dword,
++      .write_pci_config_byte = amdgpu_cgs_write_pci_config_byte,
++      .write_pci_config_word = amdgpu_cgs_write_pci_config_word,
++      .write_pci_config_dword = amdgpu_cgs_write_pci_config_dword,
++      .get_pci_resource = amdgpu_cgs_get_pci_resource,
++      .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
++      .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
++      .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
++      .create_pm_request = amdgpu_cgs_create_pm_request,
++      .destroy_pm_request = amdgpu_cgs_destroy_pm_request,
++      .set_pm_request = amdgpu_cgs_set_pm_request,
++      .pm_request_clock = amdgpu_cgs_pm_request_clock,
++      .pm_request_engine = amdgpu_cgs_pm_request_engine,
++      .pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits,
++      .set_camera_voltages = amdgpu_cgs_set_camera_voltages,
++      .get_firmware_info = amdgpu_cgs_get_firmware_info,
++      .rel_firmware = amdgpu_cgs_rel_firmware,
++      .set_powergating_state = amdgpu_cgs_set_powergating_state,
++      .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
++      .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
++      .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
++      .call_acpi_method = amdgpu_cgs_call_acpi_method,
++      .query_system_info = amdgpu_cgs_query_system_info
+ };
+ static const struct cgs_os_ops amdgpu_cgs_os_ops = {
+-      amdgpu_cgs_add_irq_source,
+-      amdgpu_cgs_irq_get,
+-      amdgpu_cgs_irq_put
++      .add_irq_source = amdgpu_cgs_add_irq_source,
++      .irq_get = amdgpu_cgs_irq_get,
++      .irq_put = amdgpu_cgs_irq_put
+ };
+ struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index ff0b55a..c58880e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -701,7 +701,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
+                                            struct drm_display_mode *mode)
+ {
+       struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+@@ -838,7 +838,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
+                                           struct drm_display_mode *mode)
+ {
+       struct drm_device *dev = connector->dev;
+@@ -1158,7 +1158,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
+               amdgpu_connector->use_digital = true;
+ }
+-static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
+                                           struct drm_display_mode *mode)
+ {
+       struct drm_device *dev = connector->dev;
+@@ -1427,7 +1427,7 @@ out:
+       return ret;
+ }
+-static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
+                                          struct drm_display_mode *mode)
+ {
+       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 39c01b9..ced138c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1059,7 +1059,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
+       * locking inversion with the driver load path. And the access here is
+       * completely racy anyway. So don't bother with locking for now.
+       */
+-      return dev->open_count == 0;
++      return local_read(&dev->open_count) == 0;
+ }
+ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 9aa533c..2f39e50 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -588,9 +588,6 @@ static struct drm_driver kms_driver = {
+       .patchlevel = KMS_DRIVER_PATCHLEVEL,
+ };
+-static struct drm_driver *driver;
+-static struct pci_driver *pdriver;
+-
+ static struct pci_driver amdgpu_kms_pci_driver = {
+       .name = DRIVER_NAME,
+       .id_table = pciidlist,
+@@ -610,18 +607,20 @@ static int __init amdgpu_init(void)
+               return -EINVAL;
+       }
+       DRM_INFO("amdgpu kernel modesetting enabled.\n");
+-      driver = &kms_driver;
+-      pdriver = &amdgpu_kms_pci_driver;
+-      driver->num_ioctls = amdgpu_max_kms_ioctl;
++
++      pax_open_kernel();
++      const_cast(kms_driver.num_ioctls) = amdgpu_max_kms_ioctl;
++      pax_close_kernel();
++
+       amdgpu_register_atpx_handler();
+       /* let modprobe override vga console setting */
+-      return drm_pci_init(driver, pdriver);
++      return drm_pci_init(&kms_driver, &amdgpu_kms_pci_driver);
+ }
+ static void __exit amdgpu_exit(void)
+ {
+       amdgpu_amdkfd_fini();
+-      drm_pci_exit(driver, pdriver);
++      drm_pci_exit(&kms_driver, &amdgpu_kms_pci_driver);
+       amdgpu_unregister_atpx_handler();
+       amdgpu_sync_fini();
+       amdgpu_fence_slab_fini();
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+index 51321e1..3c80c0b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -27,6 +27,6 @@
+ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
+ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
++void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 80120fa..20c5411 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -202,7 +202,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+       unsigned i;
+       int r = 0;
+-      fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
++      fences = kmalloc_array(adev->vm_manager.num_ids, sizeof(void *),
+                              GFP_KERNEL);
+       if (!fences)
+               return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+index b3e19ba..28942db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
++++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+@@ -519,7 +519,7 @@ static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
+       return 0;
+ }
+-static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
++static uint32_t fiji_smu_get_mask_for_fw_type(enum AMDGPU_UCODE_ID fw_type)
+ {
+       switch (fw_type) {
+               case AMDGPU_UCODE_ID_SDMA0:
+@@ -545,7 +545,7 @@ static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
+ }
+ static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
+-                                      uint32_t fw_type)
++                                      enum AMDGPU_UCODE_ID fw_type)
+ {
+       uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
+       int i;
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+index 2118399..8f80ddc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+@@ -424,7 +424,7 @@ static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
+       }
+ }
+-static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
++static uint32_t iceland_smu_get_mask_for_fw_type(enum AMDGPU_UCODE_ID fw_type)
+ {
+       switch (fw_type) {
+               case AMDGPU_UCODE_ID_SDMA0:
+@@ -562,7 +562,7 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
+ }
+ static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
+-                                          uint32_t fw_type)
++                                          enum AMDGPU_UCODE_ID fw_type)
+ {
+       uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
+       int i;
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+index 940de18..9ef25f7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+@@ -521,7 +521,7 @@ static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
+       return 0;
+ }
+-static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
++static uint32_t tonga_smu_get_mask_for_fw_type(enum AMDGPU_UCODE_ID fw_type)
+ {
+       switch (fw_type) {
+               case AMDGPU_UCODE_ID_SDMA0:
+@@ -547,7 +547,7 @@ static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
+ }
+ static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
+-                                      uint32_t fw_type)
++                                      enum AMDGPU_UCODE_ID fw_type)
+ {
+       uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
+       int i;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index ee3e04e..65f7436 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -418,7 +418,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
+               (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+                  ? cache_policy_coherent : cache_policy_noncoherent;
+-      if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
++      if (!dev->dqm->ops->set_cache_memory_policy(dev->dqm,
+                               &pdd->qpd,
+                               default_policy,
+                               alternate_policy,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 3f95f7c..0a62dad 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -298,7 +298,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+               goto device_queue_manager_error;
+       }
+-      if (kfd->dqm->ops.start(kfd->dqm) != 0) {
++      if (kfd->dqm->ops->start(kfd->dqm) != 0) {
+               dev_err(kfd_device,
+                       "Error starting queuen manager for device (%x:%x)\n",
+                       kfd->pdev->vendor, kfd->pdev->device);
+@@ -354,7 +354,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
+       BUG_ON(kfd == NULL);
+       if (kfd->init_complete) {
+-              kfd->dqm->ops.stop(kfd->dqm);
++              kfd->dqm->ops->stop(kfd->dqm);
+               amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+               amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
+               amd_iommu_free_device(kfd->pdev);
+@@ -377,7 +377,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
+               amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+                                               iommu_pasid_shutdown_callback);
+               amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
+-              kfd->dqm->ops.start(kfd->dqm);
++              kfd->dqm->ops->start(kfd->dqm);
+       }
+       return 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index f49c551..ad74c7e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -242,7 +242,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+       BUG_ON(!dqm || !q || !qpd);
+-      mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++      mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+       if (mqd == NULL)
+               return -ENOMEM;
+@@ -288,14 +288,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+       mutex_lock(&dqm->lock);
+       if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+-              mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++              mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+               if (mqd == NULL) {
+                       retval = -ENOMEM;
+                       goto out;
+               }
+               deallocate_hqd(dqm, q);
+       } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+-              mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
++              mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+               if (mqd == NULL) {
+                       retval = -ENOMEM;
+                       goto out;
+@@ -347,7 +347,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+       BUG_ON(!dqm || !q || !q->mqd);
+       mutex_lock(&dqm->lock);
+-      mqd = dqm->ops.get_mqd_manager(dqm,
++      mqd = dqm->ops->get_mqd_manager(dqm,
+                       get_mqd_type_from_queue_type(q->properties.type));
+       if (mqd == NULL) {
+               mutex_unlock(&dqm->lock);
+@@ -414,7 +414,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
+       mutex_lock(&dqm->lock);
+       list_add(&n->list, &dqm->queues);
+-      retval = dqm->ops_asic_specific.register_process(dqm, qpd);
++      retval = dqm->ops_asic_specific->register_process(dqm, qpd);
+       dqm->processes_count++;
+@@ -502,7 +502,7 @@ int init_pipelines(struct device_queue_manager *dqm,
+       memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
+-      mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++      mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+       if (mqd == NULL) {
+               kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
+               return -ENOMEM;
+@@ -635,7 +635,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+       struct mqd_manager *mqd;
+       int retval;
+-      mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
++      mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+       if (!mqd)
+               return -ENOMEM;
+@@ -650,7 +650,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+       pr_debug("     sdma queue id: %d\n", q->properties.sdma_queue_id);
+       pr_debug("     sdma engine id: %d\n", q->properties.sdma_engine_id);
+-      dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
++      dqm->ops_asic_specific->init_sdma_vm(dqm, q, qpd);
+       retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+                               &q->gart_mqd_addr, &q->properties);
+       if (retval != 0) {
+@@ -712,7 +712,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+       dqm->queue_count = dqm->processes_count = 0;
+       dqm->sdma_queue_count = 0;
+       dqm->active_runlist = false;
+-      retval = dqm->ops_asic_specific.initialize(dqm);
++      retval = dqm->ops_asic_specific->initialize(dqm);
+       if (retval != 0)
+               goto fail_init_pipelines;
+@@ -879,7 +879,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+       if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+               select_sdma_engine_id(q);
+-      mqd = dqm->ops.get_mqd_manager(dqm,
++      mqd = dqm->ops->get_mqd_manager(dqm,
+                       get_mqd_type_from_queue_type(q->properties.type));
+       if (mqd == NULL) {
+@@ -887,7 +887,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+               return -ENOMEM;
+       }
+-      dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
++      dqm->ops_asic_specific->init_sdma_vm(dqm, q, qpd);
+       retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+                               &q->gart_mqd_addr, &q->properties);
+       if (retval != 0)
+@@ -1060,7 +1060,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+       }
+-      mqd = dqm->ops.get_mqd_manager(dqm,
++      mqd = dqm->ops->get_mqd_manager(dqm,
+                       get_mqd_type_from_queue_type(q->properties.type));
+       if (!mqd) {
+               retval = -ENOMEM;
+@@ -1149,7 +1149,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+               qpd->sh_mem_ape1_limit = limit >> 16;
+       }
+-      retval = dqm->ops_asic_specific.set_cache_memory_policy(
++      retval = dqm->ops_asic_specific->set_cache_memory_policy(
+                       dqm,
+                       qpd,
+                       default_policy,
+@@ -1172,6 +1172,36 @@ out:
+       return false;
+ }
++static const struct device_queue_manager_ops cp_dqm_ops = {
++      .create_queue = create_queue_cpsch,
++      .initialize = initialize_cpsch,
++      .start = start_cpsch,
++      .stop = stop_cpsch,
++      .destroy_queue = destroy_queue_cpsch,
++      .update_queue = update_queue,
++      .get_mqd_manager = get_mqd_manager_nocpsch,
++      .register_process = register_process_nocpsch,
++      .unregister_process = unregister_process_nocpsch,
++      .uninitialize = uninitialize_nocpsch,
++      .create_kernel_queue = create_kernel_queue_cpsch,
++      .destroy_kernel_queue = destroy_kernel_queue_cpsch,
++      .set_cache_memory_policy = set_cache_memory_policy,
++};
++
++static const struct device_queue_manager_ops no_cp_dqm_ops = {
++      .start = start_nocpsch,
++      .stop = stop_nocpsch,
++      .create_queue = create_queue_nocpsch,
++      .destroy_queue = destroy_queue_nocpsch,
++      .update_queue = update_queue,
++      .get_mqd_manager = get_mqd_manager_nocpsch,
++      .register_process = register_process_nocpsch,
++      .unregister_process = unregister_process_nocpsch,
++      .initialize = initialize_nocpsch,
++      .uninitialize = uninitialize_nocpsch,
++      .set_cache_memory_policy = set_cache_memory_policy,
++};
++
+ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ {
+       struct device_queue_manager *dqm;
+@@ -1189,33 +1219,11 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+       case KFD_SCHED_POLICY_HWS:
+       case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
+               /* initialize dqm for cp scheduling */
+-              dqm->ops.create_queue = create_queue_cpsch;
+-              dqm->ops.initialize = initialize_cpsch;
+-              dqm->ops.start = start_cpsch;
+-              dqm->ops.stop = stop_cpsch;
+-              dqm->ops.destroy_queue = destroy_queue_cpsch;
+-              dqm->ops.update_queue = update_queue;
+-              dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+-              dqm->ops.register_process = register_process_nocpsch;
+-              dqm->ops.unregister_process = unregister_process_nocpsch;
+-              dqm->ops.uninitialize = uninitialize_nocpsch;
+-              dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
+-              dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
+-              dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
++              dqm->ops = &cp_dqm_ops;
+               break;
+       case KFD_SCHED_POLICY_NO_HWS:
+               /* initialize dqm for no cp scheduling */
+-              dqm->ops.start = start_nocpsch;
+-              dqm->ops.stop = stop_nocpsch;
+-              dqm->ops.create_queue = create_queue_nocpsch;
+-              dqm->ops.destroy_queue = destroy_queue_nocpsch;
+-              dqm->ops.update_queue = update_queue;
+-              dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+-              dqm->ops.register_process = register_process_nocpsch;
+-              dqm->ops.unregister_process = unregister_process_nocpsch;
+-              dqm->ops.initialize = initialize_nocpsch;
+-              dqm->ops.uninitialize = uninitialize_nocpsch;
+-              dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
++              dqm->ops = &no_cp_dqm_ops;
+               break;
+       default:
+               BUG();
+@@ -1224,15 +1232,15 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+       switch (dev->device_info->asic_family) {
+       case CHIP_CARRIZO:
+-              device_queue_manager_init_vi(&dqm->ops_asic_specific);
++              device_queue_manager_init_vi(dqm);
+               break;
+       case CHIP_KAVERI:
+-              device_queue_manager_init_cik(&dqm->ops_asic_specific);
++              device_queue_manager_init_cik(dqm);
+               break;
+       }
+-      if (dqm->ops.initialize(dqm) != 0) {
++      if (dqm->ops->initialize(dqm) != 0) {
+               kfree(dqm);
+               return NULL;
+       }
+@@ -1244,6 +1252,6 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
+ {
+       BUG_ON(!dqm);
+-      dqm->ops.uninitialize(dqm);
++      dqm->ops->uninitialize(dqm);
+       kfree(dqm);
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index a625b91..411e7d1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -154,8 +154,8 @@ struct device_queue_manager_asic_ops {
+  */
+ struct device_queue_manager {
+-      struct device_queue_manager_ops ops;
+-      struct device_queue_manager_asic_ops ops_asic_specific;
++      const struct device_queue_manager_ops *ops;
++      const struct device_queue_manager_asic_ops *ops_asic_specific;
+       struct mqd_manager      *mqds[KFD_MQD_TYPE_MAX];
+       struct packet_manager   packets;
+@@ -178,8 +178,8 @@ struct device_queue_manager {
+       bool                    active_runlist;
+ };
+-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
+-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
++void device_queue_manager_init_cik(struct device_queue_manager *dqm);
++void device_queue_manager_init_vi(struct device_queue_manager *dqm);
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+                                       struct qcm_process_device *qpd);
+ int init_pipelines(struct device_queue_manager *dqm,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+index c6f435a..34fb247 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+@@ -37,12 +37,16 @@ static int initialize_cpsch_cik(struct device_queue_manager *dqm);
+ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+                               struct qcm_process_device *qpd);
+-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops)
++static const struct device_queue_manager_asic_ops cik_dqm_asic_ops = {
++      .set_cache_memory_policy = set_cache_memory_policy_cik,
++      .register_process = register_process_cik,
++      .initialize = initialize_cpsch_cik,
++      .init_sdma_vm = init_sdma_vm,
++};
++
++void device_queue_manager_init_cik(struct device_queue_manager *dqm)
+ {
+-      ops->set_cache_memory_policy = set_cache_memory_policy_cik;
+-      ops->register_process = register_process_cik;
+-      ops->initialize = initialize_cpsch_cik;
+-      ops->init_sdma_vm = init_sdma_vm;
++      dqm->ops_asic_specific = &cik_dqm_asic_ops;
+ }
+ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+index 7e9cae9..fbe7ba5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+@@ -39,12 +39,16 @@ static int initialize_cpsch_vi(struct device_queue_manager *dqm);
+ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+                               struct qcm_process_device *qpd);
+-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
++static const struct device_queue_manager_asic_ops vi_dqm_asic_ops = {
++      .set_cache_memory_policy = set_cache_memory_policy_vi,
++      .register_process = register_process_vi,
++      .initialize = initialize_cpsch_vi,
++      .init_sdma_vm = init_sdma_vm,
++};
++
++void device_queue_manager_init_vi(struct device_queue_manager *dqm)
+ {
+-      ops->set_cache_memory_policy = set_cache_memory_policy_vi;
+-      ops->register_process = register_process_vi;
+-      ops->initialize = initialize_cpsch_vi;
+-      ops->init_sdma_vm = init_sdma_vm;
++      dqm->ops_asic_specific = &vi_dqm_asic_ops;
+ }
+ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+index 7f134aa..cd34d4a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+@@ -50,8 +50,8 @@ static void interrupt_wq(struct work_struct *);
+ int kfd_interrupt_init(struct kfd_dev *kfd)
+ {
+-      void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
+-                                      kfd->device_info->ih_ring_entry_size,
++      void *interrupt_ring = kmalloc_array(kfd->device_info->ih_ring_entry_size,
++                                      KFD_INTERRUPT_RING_SIZE,
+                                       GFP_KERNEL);
+       if (!interrupt_ring)
+               return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 9beae87..1fe9326 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -56,7 +56,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+       switch (type) {
+       case KFD_QUEUE_TYPE_DIQ:
+       case KFD_QUEUE_TYPE_HIQ:
+-              kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
++              kq->mqd = dev->dqm->ops->get_mqd_manager(dev->dqm,
+                                               KFD_MQD_TYPE_HIQ);
+               break;
+       default:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+index 5940531..a75b0e5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+@@ -62,7 +62,7 @@ struct kernel_queue_ops {
+       void    (*submit_packet)(struct kernel_queue *kq);
+       void    (*rollback_packet)(struct kernel_queue *kq);
+-};
++} __no_const;
+ struct kernel_queue {
+       struct kernel_queue_ops ops;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 7b69070..d7bd78b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -194,7 +194,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+       if (list_empty(&pqm->queues)) {
+               pdd->qpd.pqm = pqm;
+-              dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
++              dev->dqm->ops->register_process(dev->dqm, &pdd->qpd);
+       }
+       pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+@@ -220,7 +220,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+                       goto err_create_queue;
+               pqn->q = q;
+               pqn->kq = NULL;
+-              retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
++              retval = dev->dqm->ops->create_queue(dev->dqm, q, &pdd->qpd,
+                                               &q->properties.vmid);
+               pr_debug("DQM returned %d for create_queue\n", retval);
+               print_queue(q);
+@@ -234,7 +234,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+               kq->queue->properties.queue_id = *qid;
+               pqn->kq = kq;
+               pqn->q = NULL;
+-              retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
++              retval = dev->dqm->ops->create_kernel_queue(dev->dqm,
+                                                       kq, &pdd->qpd);
+               break;
+       default:
+@@ -265,7 +265,7 @@ err_allocate_pqn:
+       /* check if queues list is empty unregister process from device */
+       clear_bit(*qid, pqm->queue_slot_bitmap);
+       if (list_empty(&pqm->queues))
+-              dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
++              dev->dqm->ops->unregister_process(dev->dqm, &pdd->qpd);
+       return retval;
+ }
+@@ -306,13 +306,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+       if (pqn->kq) {
+               /* destroy kernel queue (DIQ) */
+               dqm = pqn->kq->dev->dqm;
+-              dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
++              dqm->ops->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
+               kernel_queue_uninit(pqn->kq);
+       }
+       if (pqn->q) {
+               dqm = pqn->q->device->dqm;
+-              retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
++              retval = dqm->ops->destroy_queue(dqm, &pdd->qpd, pqn->q);
+               if (retval != 0)
+                       return retval;
+@@ -324,7 +324,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+       clear_bit(qid, pqm->queue_slot_bitmap);
+       if (list_empty(&pqm->queues))
+-              dqm->ops.unregister_process(dqm, &pdd->qpd);
++              dqm->ops->unregister_process(dqm, &pdd->qpd);
+       return retval;
+ }
+@@ -349,7 +349,7 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+       pqn->q->properties.queue_percent = p->queue_percent;
+       pqn->q->properties.priority = p->priority;
+-      retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
++      retval = pqn->q->device->dqm->ops->update_queue(pqn->q->device->dqm,
+                                                       pqn->q);
+       if (retval != 0)
+               return retval;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+index 2028980..484984b 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+@@ -240,10 +240,16 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+ static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
+       /*we don't need an exit table here, because there is only D3 cold on Kv*/
+-      { phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
+-      { phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
++      {
++        .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating,
++        .tableFunction = cz_tf_uvd_power_gating_initialize
++      },
++      {
++        .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating,
++        .tableFunction = cz_tf_vce_power_gating_initialize
++      },
+       /* to do { NULL, cz_tf_xdma_power_gating_enable }, */
+-      { NULL, NULL }
++      { }
+ };
+ const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+index 8cc0df9..365a42c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+@@ -916,13 +916,13 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
+ }
+ static const struct phm_master_table_item cz_set_power_state_list[] = {
+-      {NULL, cz_tf_update_sclk_limit},
+-      {NULL, cz_tf_set_deep_sleep_sclk_threshold},
+-      {NULL, cz_tf_set_watermark_threshold},
+-      {NULL, cz_tf_set_enabled_levels},
+-      {NULL, cz_tf_enable_nb_dpm},
+-      {NULL, cz_tf_update_low_mem_pstate},
+-      {NULL, NULL}
++      { .tableFunction = cz_tf_update_sclk_limit },
++      { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold },
++      { .tableFunction = cz_tf_set_watermark_threshold },
++      { .tableFunction = cz_tf_set_enabled_levels },
++      { .tableFunction = cz_tf_enable_nb_dpm },
++      { .tableFunction = cz_tf_update_low_mem_pstate },
++      { }
+ };
+ static const struct phm_master_table_header cz_set_power_state_master = {
+@@ -932,15 +932,15 @@ static const struct phm_master_table_header cz_set_power_state_master = {
+ };
+ static const struct phm_master_table_item cz_setup_asic_list[] = {
+-      {NULL, cz_tf_reset_active_process_mask},
+-      {NULL, cz_tf_upload_pptable_to_smu},
+-      {NULL, cz_tf_init_sclk_limit},
+-      {NULL, cz_tf_init_uvd_limit},
+-      {NULL, cz_tf_init_vce_limit},
+-      {NULL, cz_tf_init_acp_limit},
+-      {NULL, cz_tf_init_power_gate_state},
+-      {NULL, cz_tf_init_sclk_threshold},
+-      {NULL, NULL}
++      { .tableFunction = cz_tf_reset_active_process_mask },
++      { .tableFunction = cz_tf_upload_pptable_to_smu },
++      { .tableFunction = cz_tf_init_sclk_limit },
++      { .tableFunction = cz_tf_init_uvd_limit },
++      { .tableFunction = cz_tf_init_vce_limit },
++      { .tableFunction = cz_tf_init_acp_limit },
++      { .tableFunction = cz_tf_init_power_gate_state },
++      { .tableFunction = cz_tf_init_sclk_threshold },
++      { }
+ };
+ static const struct phm_master_table_header cz_setup_asic_master = {
+@@ -985,10 +985,10 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
+ }
+ static const struct phm_master_table_item cz_power_down_asic_list[] = {
+-      {NULL, cz_tf_power_up_display_clock_sys_pll},
+-      {NULL, cz_tf_clear_nb_dpm_flag},
+-      {NULL, cz_tf_reset_cc6_data},
+-      {NULL, NULL}
++      { .tableFunction = cz_tf_power_up_display_clock_sys_pll },
++      { .tableFunction = cz_tf_clear_nb_dpm_flag },
++      { .tableFunction = cz_tf_reset_cc6_data },
++      { }
+ };
+ static const struct phm_master_table_header cz_power_down_asic_master = {
+@@ -1096,8 +1096,8 @@ static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
+ }
+ static const struct phm_master_table_item cz_disable_dpm_list[] = {
+-      { NULL, cz_tf_check_for_dpm_enabled},
+-      {NULL, NULL},
++      { .tableFunction = cz_tf_check_for_dpm_enabled },
++      { },
+ };
+@@ -1108,13 +1108,13 @@ static const struct phm_master_table_header cz_disable_dpm_master = {
+ };
+ static const struct phm_master_table_item cz_enable_dpm_list[] = {
+-      { NULL, cz_tf_check_for_dpm_disabled },
+-      { NULL, cz_tf_program_voting_clients },
+-      { NULL, cz_tf_start_dpm},
+-      { NULL, cz_tf_program_bootup_state},
+-      { NULL, cz_tf_enable_didt },
+-      { NULL, cz_tf_reset_acp_boot_level },
+-      {NULL, NULL},
++      { .tableFunction = cz_tf_check_for_dpm_disabled },
++      { .tableFunction = cz_tf_program_voting_clients },
++      { .tableFunction = cz_tf_start_dpm },
++      { .tableFunction = cz_tf_program_bootup_state },
++      { .tableFunction = cz_tf_enable_didt },
++      { .tableFunction = cz_tf_reset_acp_boot_level },
++      { },
+ };
+ static const struct phm_master_table_header cz_enable_dpm_master = {
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+index 92976b6..7d1f7f6 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+@@ -617,17 +617,17 @@ static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
+ static const struct phm_master_table_item
+ fiji_thermal_start_thermal_controller_master_list[] = {
+-      {NULL, tf_fiji_thermal_initialize},
+-      {NULL, tf_fiji_thermal_set_temperature_range},
+-      {NULL, tf_fiji_thermal_enable_alert},
++      { .tableFunction = tf_fiji_thermal_initialize},
++      { .tableFunction = tf_fiji_thermal_set_temperature_range},
++      { .tableFunction = tf_fiji_thermal_enable_alert},
+ /* We should restrict performance levels to low before we halt the SMC.
+  * On the other hand we are still in boot state when we do this
+  * so it would be pointless.
+  * If this assumption changes we have to revisit this table.
+  */
+-      {NULL, tf_fiji_thermal_setup_fan_table},
+-      {NULL, tf_fiji_thermal_start_smc_fan_control},
+-      {NULL, NULL}
++      { .tableFunction = tf_fiji_thermal_setup_fan_table},
++      { .tableFunction = tf_fiji_thermal_start_smc_fan_control},
++      { }
+ };
+ static const struct phm_master_table_header
+@@ -639,10 +639,10 @@ fiji_thermal_start_thermal_controller_master = {
+ static const struct phm_master_table_item
+ fiji_thermal_set_temperature_range_master_list[] = {
+-      {NULL, tf_fiji_thermal_disable_alert},
+-      {NULL, tf_fiji_thermal_set_temperature_range},
+-      {NULL, tf_fiji_thermal_enable_alert},
+-      {NULL, NULL}
++      { .tableFunction = tf_fiji_thermal_disable_alert},
++      { .tableFunction = tf_fiji_thermal_set_temperature_range},
++      { .tableFunction = tf_fiji_thermal_enable_alert},
++      { }
+ };
+ static const struct phm_master_table_header
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+index b206632..eeb4724 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+@@ -645,18 +645,18 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+ static const struct phm_master_table_item
+ polaris10_thermal_start_thermal_controller_master_list[] = {
+-      {NULL, tf_polaris10_thermal_initialize},
+-      {NULL, tf_polaris10_thermal_set_temperature_range},
+-      {NULL, tf_polaris10_thermal_enable_alert},
+-      {NULL, tf_polaris10_thermal_avfs_enable},
++      { .tableFunction = tf_polaris10_thermal_initialize },
++      { .tableFunction = tf_polaris10_thermal_set_temperature_range },
++      { .tableFunction = tf_polaris10_thermal_enable_alert },
++      { .tableFunction = tf_polaris10_thermal_avfs_enable },
+ /* We should restrict performance levels to low before we halt the SMC.
+  * On the other hand we are still in boot state when we do this
+  * so it would be pointless.
+  * If this assumption changes we have to revisit this table.
+  */
+-      {NULL, tf_polaris10_thermal_setup_fan_table},
+-      {NULL, tf_polaris10_thermal_start_smc_fan_control},
+-      {NULL, NULL}
++      { .tableFunction = tf_polaris10_thermal_setup_fan_table },
++      { .tableFunction = tf_polaris10_thermal_start_smc_fan_control },
++      { }
+ };
+ static const struct phm_master_table_header
+@@ -668,10 +668,10 @@ polaris10_thermal_start_thermal_controller_master = {
+ static const struct phm_master_table_item
+ polaris10_thermal_set_temperature_range_master_list[] = {
+-      {NULL, tf_polaris10_thermal_disable_alert},
+-      {NULL, tf_polaris10_thermal_set_temperature_range},
+-      {NULL, tf_polaris10_thermal_enable_alert},
+-      {NULL, NULL}
++      { .tableFunction = tf_polaris10_thermal_disable_alert },
++      { .tableFunction = tf_polaris10_thermal_set_temperature_range },
++      { .tableFunction = tf_polaris10_thermal_enable_alert },
++      { }
+ };
+ static const struct phm_master_table_header
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+index 47ef1ca..d352d38 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+@@ -526,16 +526,16 @@ static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, v
+ }
+ static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
+-      { NULL, tf_tonga_thermal_initialize },
+-      { NULL, tf_tonga_thermal_set_temperature_range },
+-      { NULL, tf_tonga_thermal_enable_alert },
++      { .tableFunction = tf_tonga_thermal_initialize },
++      { .tableFunction = tf_tonga_thermal_set_temperature_range },
++      { .tableFunction = tf_tonga_thermal_enable_alert },
+ /* We should restrict performance levels to low before we halt the SMC.
+  * On the other hand we are still in boot state when we do this so it would be pointless.
+  * If this assumption changes we have to revisit this table.
+  */
+-      { NULL, tf_tonga_thermal_setup_fan_table},
+-      { NULL, tf_tonga_thermal_start_smc_fan_control},
+-      { NULL, NULL }
++      { .tableFunction = tf_tonga_thermal_setup_fan_table},
++      { .tableFunction = tf_tonga_thermal_start_smc_fan_control},
++      { }
+ };
+ static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
+@@ -545,10 +545,10 @@ static const struct phm_master_table_header tonga_thermal_start_thermal_controll
+ };
+ static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
+-      { NULL, tf_tonga_thermal_disable_alert},
+-      { NULL, tf_tonga_thermal_set_temperature_range},
+-      { NULL, tf_tonga_thermal_enable_alert},
+-      { NULL, NULL }
++      { .tableFunction = tf_tonga_thermal_disable_alert},
++      { .tableFunction = tf_tonga_thermal_set_temperature_range},
++      { .tableFunction = tf_tonga_thermal_enable_alert},
++      { }
+ };
+ static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+index 963a24d..e5d0a91 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+@@ -140,7 +140,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+       if (r)
+               return r;
+-      atomic_set(&entity->fence_seq, 0);
++      atomic_set_unchecked(&entity->fence_seq, 0);
+       entity->fence_context = fence_context_alloc(2);
+       return 0;
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+index 7cbbbfb..a1e3949 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+@@ -47,7 +47,7 @@ struct amd_sched_entity {
+       spinlock_t                      queue_lock;
+       struct kfifo                    job_queue;
+-      atomic_t                        fence_seq;
++      atomic_unchecked_t              fence_seq;
+       uint64_t                        fence_context;
+       struct fence                    *dependency;
+diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+index 6b63bea..d7aa8a9 100644
+--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
++++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+@@ -41,7 +41,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+       fence->sched = entity->sched;
+       spin_lock_init(&fence->lock);
+-      seq = atomic_inc_return(&entity->fence_seq);
++      seq = atomic_inc_return_unchecked(&entity->fence_seq);
+       fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+                  &fence->lock, entity->fence_context, seq);
+       fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
+index f5ebdd6..135c95c 100644
+--- a/drivers/gpu/drm/armada/armada_drv.c
++++ b/drivers/gpu/drm/armada/armada_drv.c
+@@ -213,6 +213,7 @@ static struct drm_driver armada_drm_driver = {
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET |
+                                 DRIVER_HAVE_IRQ | DRIVER_PRIME,
+       .ioctls                 = armada_ioctls,
++      .num_ioctls             = ARRAY_SIZE(armada_ioctls),
+       .fops                   = &armada_drm_fops,
+ };
+@@ -333,8 +334,6 @@ static int __init armada_drm_init(void)
+ {
+       int ret;
+-      armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
+-
+       ret = platform_driver_register(&armada_lcd_platform_driver);
+       if (ret)
+               return ret;
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 5957c3e..970039e 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -775,7 +775,7 @@ static int ast_get_modes(struct drm_connector *connector)
+       return 0;
+ }
+-static int ast_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
+                         struct drm_display_mode *mode)
+ {
+       struct ast_private *ast = connector->dev->dev_private;
+diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
+index 207a2cb..666b75a 100644
+--- a/drivers/gpu/drm/bochs/bochs_kms.c
++++ b/drivers/gpu/drm/bochs/bochs_kms.c
+@@ -187,7 +187,7 @@ int bochs_connector_get_modes(struct drm_connector *connector)
+       return count;
+ }
+-static int bochs_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
+                                     struct drm_display_mode *mode)
+ {
+       struct bochs_device *bochs =
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index a09825d..6faa4d7 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1102,7 +1102,7 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+       return true;
+ }
+-static int tc_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
+                                  struct drm_display_mode *mode)
+ {
+       /* Accept any mode */
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index ddebe54..68a674d 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -4364,7 +4364,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                                       goto done;
+                               }
+-                              if (copy_to_user(&enum_ptr[copied].name,
++                              if (copy_to_user(enum_ptr[copied].name,
+                                                &prop_enum->name, DRM_PROP_NAME_LEN)) {
+                                       ret = -EFAULT;
+                                       goto done;
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index be27ed3..72aa552 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -368,7 +368,7 @@ void drm_unplug_dev(struct drm_device *dev)
+       drm_device_set_unplugged(dev);
+-      if (dev->open_count == 0) {
++      if (local_read(&dev->open_count) == 0) {
+               drm_put_dev(dev);
+       }
+       mutex_unlock(&drm_global_mutex);
+diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
+index 1fd6eac..e4206c9 100644
+--- a/drivers/gpu/drm/drm_fb_cma_helper.c
++++ b/drivers/gpu/drm/drm_fb_cma_helper.c
+@@ -335,7 +335,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
+                                   struct drm_gem_cma_object *cma_obj)
+ {
+       struct fb_deferred_io *fbdefio;
+-      struct fb_ops *fbops;
++      fb_ops_no_const *fbops;
+       /*
+        * Per device structures are needed because:
+@@ -362,7 +362,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
+       fbdefio->deferred_io = drm_fb_helper_deferred_io;
+       fbi->fbdefio = fbdefio;
+       fb_deferred_io_init(fbi);
+-      fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
++      fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 323c238..0eaad21 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -132,7 +132,7 @@ int drm_open(struct inode *inode, struct file *filp)
+               return PTR_ERR(minor);
+       dev = minor->dev;
+-      if (!dev->open_count++)
++      if (local_inc_return(&dev->open_count) == 1)
+               need_setup = 1;
+       /* share address_space across all char-devs of a single device */
+@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
+       return 0;
+ err_undo:
+-      dev->open_count--;
++      local_dec(&dev->open_count);
+       drm_minor_release(minor);
+       return retcode;
+ }
+@@ -371,7 +371,7 @@ int drm_release(struct inode *inode, struct file *filp)
+       mutex_lock(&drm_global_mutex);
+-      DRM_DEBUG("open_count = %d\n", dev->open_count);
++      DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
+       mutex_lock(&dev->filelist_mutex);
+       list_del(&file_priv->lhead);
+@@ -384,10 +384,10 @@ int drm_release(struct inode *inode, struct file *filp)
+        * Begin inline drm_release
+        */
+-      DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
++      DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->kdev->devt),
+-                dev->open_count);
++                local_read(&dev->open_count));
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_legacy_lock_release(dev, filp);
+@@ -425,7 +425,7 @@ int drm_release(struct inode *inode, struct file *filp)
+        * End inline drm_release
+        */
+-      if (!--dev->open_count) {
++      if (local_dec_and_test(&dev->open_count)) {
+               drm_lastclose(dev);
+               if (drm_device_is_unplugged(dev))
+                       drm_put_dev(dev);
+@@ -564,6 +564,11 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+ }
+ EXPORT_SYMBOL(drm_poll);
++static void drm_pending_event_destroy(struct drm_pending_event *event)
++{
++      kfree(event);
++}
++
+ /**
+  * drm_event_reserve_init_locked - init a DRM event and reserve space for it
+  * @dev: DRM device
+diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
+index 3d2e91c..d31c4c9 100644
+--- a/drivers/gpu/drm/drm_global.c
++++ b/drivers/gpu/drm/drm_global.c
+@@ -36,7 +36,7 @@
+ struct drm_global_item {
+       struct mutex mutex;
+       void *object;
+-      int refcount;
++      atomic_t refcount;
+ };
+ static struct drm_global_item glob[DRM_GLOBAL_NUM];
+@@ -49,7 +49,7 @@ void drm_global_init(void)
+               struct drm_global_item *item = &glob[i];
+               mutex_init(&item->mutex);
+               item->object = NULL;
+-              item->refcount = 0;
++              atomic_set(&item->refcount, 0);
+       }
+ }
+@@ -59,7 +59,7 @@ void drm_global_release(void)
+       for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+               struct drm_global_item *item = &glob[i];
+               BUG_ON(item->object != NULL);
+-              BUG_ON(item->refcount != 0);
++              BUG_ON(atomic_read(&item->refcount) != 0);
+       }
+ }
+@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
+       struct drm_global_item *item = &glob[ref->global_type];
+       mutex_lock(&item->mutex);
+-      if (item->refcount == 0) {
++      if (atomic_read(&item->refcount) == 0) {
+               item->object = kzalloc(ref->size, GFP_KERNEL);
+               if (unlikely(item->object == NULL)) {
+                       ret = -ENOMEM;
+@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
+                       goto out_err;
+       }
+-      ++item->refcount;
++      atomic_inc(&item->refcount);
+       ref->object = item->object;
+       mutex_unlock(&item->mutex);
+       return 0;
+@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
+       struct drm_global_item *item = &glob[ref->global_type];
+       mutex_lock(&item->mutex);
+-      BUG_ON(item->refcount == 0);
++      BUG_ON(atomic_read(&item->refcount) == 0);
+       BUG_ON(ref->object != item->object);
+-      if (--item->refcount == 0) {
++      if (atomic_dec_and_test(&item->refcount)) {
+               ref->release(ref);
+               item->object = NULL;
+       }
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index a628975..98c84f7 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -458,7 +458,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+       request = compat_alloc_user_space(nbytes);
+       if (!request)
+               return -EFAULT;
+-      list = (struct drm_buf_desc *) (request + 1);
++      list = (struct drm_buf_desc __user *) (request + 1);
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+@@ -519,7 +519,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+       request = compat_alloc_user_space(nbytes);
+       if (!request)
+               return -EFAULT;
+-      list = (struct drm_buf_pub *) (request + 1);
++      list = (struct drm_buf_pub __user *) (request + 1);
+       if (__put_user(count, &request->count)
+           || __put_user(list, &request->list))
+@@ -1074,7 +1074,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+ }
+ #endif
+-static drm_ioctl_compat_t *drm_compat_ioctls[] = {
++static drm_ioctl_compat_t drm_compat_ioctls[] = {
+       [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+@@ -1123,7 +1123,6 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn;
+       int ret;
+       /* Assume that ioctls without an explicit compat routine will just
+@@ -1133,10 +1132,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+       if (nr >= ARRAY_SIZE(drm_compat_ioctls))
+               return drm_ioctl(filp, cmd, arg);
+-      fn = drm_compat_ioctls[nr];
+-
+-      if (fn != NULL)
+-              ret = (*fn) (filp, cmd, arg);
++      if (drm_compat_ioctls[nr] != NULL)
++              ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp, cmd, arg);
+diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+index 33af4a5..ceb09f2 100644
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -643,7 +643,7 @@ long drm_ioctl(struct file *filp,
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev;
+       const struct drm_ioctl_desc *ioctl = NULL;
+-      drm_ioctl_t *func;
++      drm_ioctl_no_const_t func;
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+       int retcode = -EINVAL;
+       char stack_kdata[128];
+diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
+index b2f8f10..39eb872 100644
+--- a/drivers/gpu/drm/drm_pci.c
++++ b/drivers/gpu/drm/drm_pci.c
+@@ -264,7 +264,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+       /* No locking needed since shadow-attach is single-threaded since it may
+        * only be called from the per-driver module init hook. */
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+-              list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
++              pax_list_add_tail(&dev->legacy_dev_list, (struct list_head *)&driver->legacy_dev_list);
+       return 0;
+@@ -303,7 +303,10 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+               return pci_register_driver(pdriver);
+       /* If not using KMS, fall back to stealth mode manual scanning. */
+-      INIT_LIST_HEAD(&driver->legacy_dev_list);
++      pax_open_kernel();
++      INIT_LIST_HEAD((struct list_head *)&driver->legacy_dev_list);
++      pax_close_kernel();
++
+       for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
+               pid = &pdriver->id_table[i];
+@@ -426,7 +429,7 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+       } else {
+               list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+                                        legacy_dev_list) {
+-                      list_del(&dev->legacy_dev_list);
++                      pax_list_del(&dev->legacy_dev_list);
+                       drm_put_dev(dev);
+               }
+       }
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+index 877d2ef..7b2d94d 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+@@ -548,6 +548,11 @@ static int compare_dev(struct device *dev, void *data)
+       return dev == (struct device *)data;
+ }
++static int platform_bus_type_match(struct device *dev, void *data)
++{
++      return platform_bus_type.match(dev, data);
++}
++
+ static struct component_match *exynos_drm_match_add(struct device *dev)
+ {
+       struct component_match *match = NULL;
+@@ -562,7 +567,7 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
+               while ((d = bus_find_device(&platform_bus_type, p,
+                                           &info->driver->driver,
+-                                          (void *)platform_bus_type.match))) {
++                                          platform_bus_type_match))) {
+                       put_device(p);
+                       component_match_add(dev, &match, compare_dev, d);
+                       p = d;
+@@ -593,7 +598,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
+       struct component_match *match;
+       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+-      exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
+       match = exynos_drm_match_add(&pdev->dev);
+       if (IS_ERR(match))
+@@ -631,7 +635,7 @@ static struct device *exynos_drm_get_dma_device(void)
+               while ((dev = bus_find_device(&platform_bus_type, NULL,
+                                           &info->driver->driver,
+-                                          (void *)platform_bus_type.match))) {
++                                          platform_bus_type_match))) {
+                       put_device(dev);
+                       return dev;
+               }
+@@ -652,7 +656,7 @@ static void exynos_drm_unregister_devices(void)
+               while ((dev = bus_find_device(&platform_bus_type, NULL,
+                                           &info->driver->driver,
+-                                          (void *)platform_bus_type.match))) {
++                                          platform_bus_type_match))) {
+                       put_device(dev);
+                       platform_device_unregister(to_platform_device(dev));
+               }
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+index 6eca8bb..d607c01 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+@@ -1055,6 +1055,11 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
+       return 0;
+ }
++static void exynos_g2d_dmabuf_destroy(struct drm_pending_event *event)
++{
++      kfree(event);
++}
++
+ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+                                struct drm_file *file)
+ {
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index 2275efe..c91e144 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -919,7 +919,7 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
+       return -EINVAL;
+ }
+-static int hdmi_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
+                       struct drm_display_mode *mode)
+ {
+       struct hdmi_context *hdata = connector_to_hdmi(connector);
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
+index b837e7a..cb5a14b 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
+@@ -64,7 +64,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+       REG_WRITE(reg, temp);
+ }
+-static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+ {
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
+index c52f9ad..486d203 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
+@@ -505,7 +505,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
+       msleep(intel_dp->backlight_off_delay);
+ }
+-static int
++static enum drm_mode_status
+ cdv_intel_dp_mode_valid(struct drm_connector *connector,
+                   struct drm_display_mode *mode)
+ {
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+index 563f193..f087899 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+@@ -223,7 +223,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int cdv_hdmi_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       if (mode->clock > 165000)
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+index 38dc890..c87ef7b 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+@@ -244,7 +244,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
+ {
+ }
+-static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+                             struct drm_display_mode *mode)
+ {
+       struct drm_device *dev = connector->dev;
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+index a05c0206..01bfdad 100644
+--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+@@ -120,9 +120,14 @@ static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
+       u32 pipeconf_reg = PIPEACONF;
+       u32 dspcntr_reg = DSPACNTR;
+-      u32 dspcntr = dev_priv->dspcntr[pipe];
++      u32 dspcntr;
+       u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
++      if (pipe == -1)
++              return;
++
++      dspcntr = dev_priv->dspcntr[pipe];
++
+       if (pipe) {
+               pipeconf_reg = PIPECCONF;
+               dspcntr_reg = DSPCCNTR;
+@@ -645,6 +650,9 @@ static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+       if (!gma_power_begin(dev, true))
+               return;
++      if (pipe == -1)
++              return;
++
+       if (on) {
+               if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+                       mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+index 907cb51..ae6f60c 100644
+--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+@@ -351,7 +351,7 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
+       return 0;
+ }
+-static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+                                               struct drm_display_mode *mode)
+ {
+       struct mdfld_dsi_connector *dsi_connector =
+diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+index 8b2eb32..78566a8 100644
+--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+@@ -509,7 +509,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
+       HDMI_WRITE(HDMI_VIDEO_REG, temp);
+ }
+-static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+ {
+       if (mode->clock > 165000)
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index 50eb944f..93904f6 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -373,7 +373,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
+       drm_irq_install(dev, dev->pdev->irq);
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+-      dev->driver->get_vblank_counter = psb_get_vblank_counter;
+       psb_modeset_init(dev);
+       psb_fbdev_init(dev);
+diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
+index 2a3b7c6..fbd3fa3 100644
+--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
++++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
+@@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev,
+ extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                     const struct drm_display_mode *mode,
+                                     struct drm_display_mode *adjusted_mode);
+-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
+                                    struct drm_display_mode *mode);
+ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+                                       struct drm_property *property,
+diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+index e55733c..524a9fd 100644
+--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
+       }
+ }
+-int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+index e787d37..91622fd 100644
+--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
++++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+@@ -1158,7 +1158,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+       return;
+ }
+-static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
+index f4315bc..2048cc2 100644
+--- a/drivers/gpu/drm/i2c/tda998x_drv.c
++++ b/drivers/gpu/drm/i2c/tda998x_drv.c
+@@ -856,7 +856,7 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+       priv->dpms = mode;
+ }
+-static int tda998x_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
+                                       struct drm_display_mode *mode)
+ {
+       /* TDA19988 dotclock can go up to 165MHz */
+diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
+index d918567..6cfd904 100644
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -1250,7 +1250,7 @@ const struct drm_ioctl_desc i810_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ };
+-int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
++const int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
+ /**
+  * Determine if the device really is AGP or not.
+diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
+index 44f4a13..af9f6f5 100644
+--- a/drivers/gpu/drm/i810/i810_drv.c
++++ b/drivers/gpu/drm/i810/i810_drv.c
+@@ -87,7 +87,11 @@ static int __init i810_init(void)
+               pr_err("drm/i810 does not support SMP\n");
+               return -EINVAL;
+       }
+-      driver.num_ioctls = i810_max_ioctl;
++
++      pax_open_kernel();
++      const_cast(driver.num_ioctls) = i810_max_ioctl;
++      pax_close_kernel();
++
+       return drm_pci_init(&driver, &i810_pci_driver);
+ }
+diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
+index 93ec5dc..204ec92 100644
+--- a/drivers/gpu/drm/i810/i810_drv.h
++++ b/drivers/gpu/drm/i810/i810_drv.h
+@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
+       int page_flipping;
+       wait_queue_head_t irq_queue;
+-      atomic_t irq_received;
+-      atomic_t irq_emitted;
++      atomic_unchecked_t irq_received;
++      atomic_unchecked_t irq_emitted;
+       int front_offset;
+ } drm_i810_private_t;
+@@ -128,7 +128,7 @@ extern int i810_driver_device_is_agp(struct drm_device *dev);
+ extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ extern const struct drm_ioctl_desc i810_ioctls[];
+-extern int i810_max_ioctl;
++extern const int i810_max_ioctl;
+ #define I810_BASE(reg)                ((unsigned long) \
+                               dev_priv->mmio_map->handle)
+diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
+index 5e6a301..b6e143e 100644
+--- a/drivers/gpu/drm/i915/dvo.h
++++ b/drivers/gpu/drm/i915/dvo.h
+@@ -74,7 +74,7 @@ struct intel_dvo_dev_ops {
+        *
+        * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+        */
+-      int (*mode_valid)(struct intel_dvo_device *dvo,
++      enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
+                         struct drm_display_mode *mode);
+       /*
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 5de36d8..7d7899c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -50,7 +50,7 @@
+ #include "i915_vgpu.h"
+ #include "intel_drv.h"
+-static struct drm_driver driver;
++static drm_driver_no_const driver;
+ static unsigned int i915_load_fail_count;
+@@ -557,7 +557,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+        * locking inversion with the driver load path. And the access here is
+        * completely racy anyway. So don't bother with locking for now.
+        */
+-      return dev->open_count == 0;
++      return local_read(&dev->open_count) == 0;
+ }
+ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+@@ -1224,8 +1224,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+       struct drm_i915_private *dev_priv;
+       int ret;
+-      if (i915.nuclear_pageflip)
++      if (i915.nuclear_pageflip) {
++              pax_open_kernel();
+               driver.driver_features |= DRIVER_ATOMIC;
++              pax_close_kernel();
++      }
+       ret = -ENOMEM;
+       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+@@ -2610,7 +2613,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+ };
+-static struct drm_driver driver = {
++static drm_driver_no_const driver __read_only = {
+       /* Don't use MTRRs here; the Xserver or userspace app should
+        * deal with them for Intel hardware.
+        */
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index b35e5b6..998ddfc 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -993,12 +993,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+ static int
+ validate_exec_list(struct drm_device *dev,
+                  struct drm_i915_gem_exec_object2 *exec,
+-                 int count)
++                 unsigned int count)
+ {
+       unsigned relocs_total = 0;
+       unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+       unsigned invalid_flags;
+-      int i;
++      unsigned int i;
+       invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
+       if (USES_FULL_PPGTT(dev))
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index f38ceff..3f18728 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -3268,8 +3268,8 @@ int i915_ggtt_init_hw(struct drm_device *dev)
+       /* GMADR is the PCI mmio aperture into the global GTT. */
+       DRM_INFO("Memory usable by graphics device = %lluM\n",
+                ggtt->base.total >> 20);
+-      DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+-      DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
++      DRM_DEBUG_DRIVER("GMADR size = %lluM\n", ggtt->mappable_end >> 20);
++      DRM_DEBUG_DRIVER("GTT stolen size = %lluM\n", ggtt->stolen_size >> 20);
+ #ifdef CONFIG_INTEL_IOMMU
+       if (intel_iommu_gfx_mapped)
+               DRM_INFO("VT-d active for gfx access\n");
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
+index aa5f31d..9df8e4d 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
+@@ -350,14 +350,14 @@ struct i915_address_space {
+ struct i915_ggtt {
+       struct i915_address_space base;
+-      size_t stolen_size;             /* Total size of stolen memory */
++      u64 stolen_size;                /* Total size of stolen memory */
+       size_t stolen_usable_size;      /* Total size minus BIOS reserved */
+       size_t stolen_reserved_base;
+       size_t stolen_reserved_size;
+       size_t size;                    /* Total size of Global GTT */
+       u64 mappable_end;               /* End offset that we can CPU map */
+       struct io_mapping *mappable;    /* Mapping to our CPU mappable region */
+-      phys_addr_t mappable_base;      /* PA of our GMADR */
++      u64 mappable_base;              /* PA of our GMADR */
+       /** "Graphics Stolen Memory" holds the global PTEs */
+       void __iomem *gsm;
+diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
+index 97f3a56..32c712e 100644
+--- a/drivers/gpu/drm/i915/i915_ioc32.c
++++ b/drivers/gpu/drm/i915/i915_ioc32.c
+@@ -65,7 +65,7 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
+                        (unsigned long)request);
+ }
+-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
++static drm_ioctl_compat_t i915_compat_ioctls[] = {
+       [DRM_I915_GETPARAM] = compat_i915_getparam,
+ };
+@@ -81,17 +81,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
+ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
+-              fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
+-              ret = (*fn) (filp, cmd, arg);
++      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
++              ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp, cmd, arg);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 1c2aec3..f807515 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -4541,14 +4541,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+       INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
+                         i915_hangcheck_elapsed);
++      pax_open_kernel();
+       if (IS_GEN2(dev_priv)) {
+               dev->max_vblank_count = 0;
+-              dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
++              const_cast(dev->driver->get_vblank_counter) = i8xx_get_vblank_counter;
+       } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+-              dev->driver->get_vblank_counter = g4x_get_vblank_counter;
++              const_cast(dev->driver->get_vblank_counter) = g4x_get_vblank_counter;
+       } else {
+-              dev->driver->get_vblank_counter = i915_get_vblank_counter;
++              const_cast(dev->driver->get_vblank_counter) = i915_get_vblank_counter;
+               dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+       }
+@@ -4560,32 +4561,32 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+       if (!IS_GEN2(dev_priv))
+               dev->vblank_disable_immediate = true;
+-      dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+-      dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
++      const_cast(dev->driver->get_vblank_timestamp) = i915_get_vblank_timestamp;
++      const_cast(dev->driver->get_scanout_position) = i915_get_crtc_scanoutpos;
+       if (IS_CHERRYVIEW(dev_priv)) {
+-              dev->driver->irq_handler = cherryview_irq_handler;
+-              dev->driver->irq_preinstall = cherryview_irq_preinstall;
+-              dev->driver->irq_postinstall = cherryview_irq_postinstall;
+-              dev->driver->irq_uninstall = cherryview_irq_uninstall;
+-              dev->driver->enable_vblank = valleyview_enable_vblank;
+-              dev->driver->disable_vblank = valleyview_disable_vblank;
++              const_cast(dev->driver->irq_handler) = cherryview_irq_handler;
++              const_cast(dev->driver->irq_preinstall) = cherryview_irq_preinstall;
++              const_cast(dev->driver->irq_postinstall) = cherryview_irq_postinstall;
++              const_cast(dev->driver->irq_uninstall) = cherryview_irq_uninstall;
++              const_cast(dev->driver->enable_vblank) = valleyview_enable_vblank;
++              const_cast(dev->driver->disable_vblank) = valleyview_disable_vblank;
+               dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+-              dev->driver->irq_handler = valleyview_irq_handler;
+-              dev->driver->irq_preinstall = valleyview_irq_preinstall;
+-              dev->driver->irq_postinstall = valleyview_irq_postinstall;
+-              dev->driver->irq_uninstall = valleyview_irq_uninstall;
+-              dev->driver->enable_vblank = valleyview_enable_vblank;
+-              dev->driver->disable_vblank = valleyview_disable_vblank;
++              const_cast(dev->driver->irq_handler) = valleyview_irq_handler;
++              const_cast(dev->driver->irq_preinstall) = valleyview_irq_preinstall;
++              const_cast(dev->driver->irq_postinstall) = valleyview_irq_postinstall;
++              const_cast(dev->driver->irq_uninstall) = valleyview_irq_uninstall;
++              const_cast(dev->driver->enable_vblank) = valleyview_enable_vblank;
++              const_cast(dev->driver->disable_vblank) = valleyview_disable_vblank;
+               dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+       } else if (INTEL_INFO(dev_priv)->gen >= 8) {
+-              dev->driver->irq_handler = gen8_irq_handler;
+-              dev->driver->irq_preinstall = gen8_irq_reset;
+-              dev->driver->irq_postinstall = gen8_irq_postinstall;
+-              dev->driver->irq_uninstall = gen8_irq_uninstall;
+-              dev->driver->enable_vblank = gen8_enable_vblank;
+-              dev->driver->disable_vblank = gen8_disable_vblank;
++              const_cast(dev->driver->irq_handler) = gen8_irq_handler;
++              const_cast(dev->driver->irq_preinstall) = gen8_irq_reset;
++              const_cast(dev->driver->irq_postinstall) = gen8_irq_postinstall;
++              const_cast(dev->driver->irq_uninstall) = gen8_irq_uninstall;
++              const_cast(dev->driver->enable_vblank) = gen8_enable_vblank;
++              const_cast(dev->driver->disable_vblank) = gen8_disable_vblank;
+               if (IS_BROXTON(dev))
+                       dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+               else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
+@@ -4593,35 +4594,36 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+               else
+                       dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+       } else if (HAS_PCH_SPLIT(dev)) {
+-              dev->driver->irq_handler = ironlake_irq_handler;
+-              dev->driver->irq_preinstall = ironlake_irq_reset;
+-              dev->driver->irq_postinstall = ironlake_irq_postinstall;
+-              dev->driver->irq_uninstall = ironlake_irq_uninstall;
+-              dev->driver->enable_vblank = ironlake_enable_vblank;
+-              dev->driver->disable_vblank = ironlake_disable_vblank;
++              const_cast(dev->driver->irq_handler) = ironlake_irq_handler;
++              const_cast(dev->driver->irq_preinstall) = ironlake_irq_reset;
++              const_cast(dev->driver->irq_postinstall) = ironlake_irq_postinstall;
++              const_cast(dev->driver->irq_uninstall) = ironlake_irq_uninstall;
++              const_cast(dev->driver->enable_vblank) = ironlake_enable_vblank;
++              const_cast(dev->driver->disable_vblank) = ironlake_disable_vblank;
+               dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+       } else {
+               if (IS_GEN2(dev_priv)) {
+-                      dev->driver->irq_preinstall = i8xx_irq_preinstall;
+-                      dev->driver->irq_postinstall = i8xx_irq_postinstall;
+-                      dev->driver->irq_handler = i8xx_irq_handler;
+-                      dev->driver->irq_uninstall = i8xx_irq_uninstall;
++                      const_cast(dev->driver->irq_preinstall) = i8xx_irq_preinstall;
++                      const_cast(dev->driver->irq_postinstall) = i8xx_irq_postinstall;
++                      const_cast(dev->driver->irq_handler) = i8xx_irq_handler;
++                      const_cast(dev->driver->irq_uninstall) = i8xx_irq_uninstall;
+               } else if (IS_GEN3(dev_priv)) {
+-                      dev->driver->irq_preinstall = i915_irq_preinstall;
+-                      dev->driver->irq_postinstall = i915_irq_postinstall;
+-                      dev->driver->irq_uninstall = i915_irq_uninstall;
+-                      dev->driver->irq_handler = i915_irq_handler;
++                      const_cast(dev->driver->irq_preinstall) = i915_irq_preinstall;
++                      const_cast(dev->driver->irq_postinstall) = i915_irq_postinstall;
++                      const_cast(dev->driver->irq_uninstall) = i915_irq_uninstall;
++                      const_cast(dev->driver->irq_handler) = i915_irq_handler;
+               } else {
+-                      dev->driver->irq_preinstall = i965_irq_preinstall;
+-                      dev->driver->irq_postinstall = i965_irq_postinstall;
+-                      dev->driver->irq_uninstall = i965_irq_uninstall;
+-                      dev->driver->irq_handler = i965_irq_handler;
++                      const_cast(dev->driver->irq_preinstall) = i965_irq_preinstall;
++                      const_cast(dev->driver->irq_postinstall) = i965_irq_postinstall;
++                      const_cast(dev->driver->irq_uninstall) = i965_irq_uninstall;
++                      const_cast(dev->driver->irq_handler) = i965_irq_handler;
+               }
+               if (I915_HAS_HOTPLUG(dev_priv))
+                       dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+-              dev->driver->enable_vblank = i915_enable_vblank;
+-              dev->driver->disable_vblank = i915_disable_vblank;
++              const_cast(dev->driver->enable_vblank) = i915_enable_vblank;
++              const_cast(dev->driver->disable_vblank) = i915_disable_vblank;
+       }
++      pax_close_kernel();
+ }
+ /**
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e9a64fb..54a2344 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -15569,13 +15569,13 @@ struct intel_quirk {
+       int subsystem_vendor;
+       int subsystem_device;
+       void (*hook)(struct drm_device *dev);
+-};
++} __do_const;
+ /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+ struct intel_dmi_quirk {
+       void (*hook)(struct drm_device *dev);
+-      const struct dmi_system_id (*dmi_id_list)[];
+-};
++      const struct dmi_system_id *dmi_id_list;
++} __do_const;
+ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+ {
+@@ -15583,18 +15583,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+       return 1;
+ }
+-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
++static const struct dmi_system_id intel_dmi_quirks_table[] = {
+       {
+-              .dmi_id_list = &(const struct dmi_system_id[]) {
+-                      {
+-                              .callback = intel_dmi_reverse_brightness,
+-                              .ident = "NCR Corporation",
+-                              .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+-                                          DMI_MATCH(DMI_PRODUCT_NAME, ""),
+-                              },
+-                      },
+-                      { }  /* terminating entry */
++              .callback = intel_dmi_reverse_brightness,
++              .ident = "NCR Corporation",
++              .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
++                          DMI_MATCH(DMI_PRODUCT_NAME, ""),
+               },
++      },
++      { }  /* terminating entry */
++};
++
++static const struct intel_dmi_quirk intel_dmi_quirks[] = {
++      {
++              .dmi_id_list = intel_dmi_quirks_table,
+               .hook = quirk_invert_brightness,
+       },
+ };
+@@ -15677,7 +15679,7 @@ static void intel_init_quirks(struct drm_device *dev)
+                       q->hook(dev);
+       }
+       for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+-              if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
++              if (dmi_check_system(intel_dmi_quirks[i].dmi_id_list) != 0)
+                       intel_dmi_quirks[i].hook(dev);
+       }
+ }
+diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
+index 7bf90e9..30711b9 100644
+--- a/drivers/gpu/drm/imx/imx-drm-core.c
++++ b/drivers/gpu/drm/imx/imx-drm-core.c
+@@ -380,7 +380,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
+       if (imxdrm->pipes >= MAX_CRTC)
+               return -EINVAL;
+-      if (imxdrm->drm->open_count)
++      if (local_read(&imxdrm->drm->open_count))
+               return -EBUSY;
+       imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
+index 5e87594..98a690c 100644
+--- a/drivers/gpu/drm/imx/imx-tve.c
++++ b/drivers/gpu/drm/imx/imx-tve.c
+@@ -252,7 +252,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int imx_tve_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status imx_tve_connector_mode_valid(struct drm_connector *connector,
+                                       struct drm_display_mode *mode)
+ {
+       struct imx_tve *tve = con_to_tve(connector);
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+index 334562d..90fa448 100644
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -1232,7 +1232,7 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
+       return ret;
+ }
+-static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
++static enum drm_mode_status mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
+                                   struct drm_display_mode *mode)
+ {
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
+index 5e2f131..c134c7c 100644
+--- a/drivers/gpu/drm/mga/mga_drv.c
++++ b/drivers/gpu/drm/mga/mga_drv.c
+@@ -92,7 +92,10 @@ static struct pci_driver mga_pci_driver = {
+ static int __init mga_init(void)
+ {
+-      driver.num_ioctls = mga_max_ioctl;
++      pax_open_kernel();
++      const_cast(driver.num_ioctls) = mga_max_ioctl;
++      pax_close_kernel();
++
+       return drm_pci_init(&driver, &mga_pci_driver);
+ }
+diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
+index bb31233..75b39f0 100644
+--- a/drivers/gpu/drm/mga/mga_drv.h
++++ b/drivers/gpu/drm/mga/mga_drv.h
+@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
+       u32 clear_cmd;
+       u32 maccess;
+-      atomic_t vbl_received;          /**< Number of vblanks received. */
++      atomic_unchecked_t vbl_received;          /**< Number of vblanks received. */
+       wait_queue_head_t fence_queue;
+-      atomic_t last_fence_retired;
++      atomic_unchecked_t last_fence_retired;
+       u32 next_fence_to_post;
+       unsigned int fb_cpp;
+@@ -152,7 +152,7 @@ typedef struct drm_mga_private {
+ } drm_mga_private_t;
+ extern const struct drm_ioctl_desc mga_ioctls[];
+-extern int mga_max_ioctl;
++extern const int mga_max_ioctl;
+                               /* mga_dma.c */
+ extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
+index 729bfd5..14bae78 100644
+--- a/drivers/gpu/drm/mga/mga_ioc32.c
++++ b/drivers/gpu/drm/mga/mga_ioc32.c
+@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
+       return 0;
+ }
+-drm_ioctl_compat_t *mga_compat_ioctls[] = {
++drm_ioctl_compat_t mga_compat_ioctls[] = {
+       [DRM_MGA_INIT] = compat_mga_init,
+       [DRM_MGA_GETPARAM] = compat_mga_getparam,
+       [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
+@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
+ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
+-              fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
+-              ret = (*fn) (filp, cmd, arg);
++      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
++              ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp, cmd, arg);
+diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
+index 693ba70..465bcfc 100644
+--- a/drivers/gpu/drm/mga/mga_irq.c
++++ b/drivers/gpu/drm/mga/mga_irq.c
+@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+       if (pipe != 0)
+               return 0;
+-      return atomic_read(&dev_priv->vbl_received);
++      return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
+       /* VBLANK interrupt */
+       if (status & MGA_VLINEPEN) {
+               MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
+-              atomic_inc(&dev_priv->vbl_received);
++              atomic_inc_unchecked(&dev_priv->vbl_received);
+               drm_handle_vblank(dev, 0);
+               handled = 1;
+       }
+@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
+               if ((prim_start & ~0x03) != (prim_end & ~0x03))
+                       MGA_WRITE(MGA_PRIMEND, prim_end);
+-              atomic_inc(&dev_priv->last_fence_retired);
++              atomic_inc_unchecked(&dev_priv->last_fence_retired);
+               wake_up(&dev_priv->fence_queue);
+               handled = 1;
+       }
+@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
+        * using fences.
+        */
+       DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
+-                  (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
++                  (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
+                     - *sequence) <= (1 << 23)));
+       *sequence = cur_fence;
+diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
+index 792f924..aeb1334 100644
+--- a/drivers/gpu/drm/mga/mga_state.c
++++ b/drivers/gpu/drm/mga/mga_state.c
+@@ -1099,4 +1099,4 @@ const struct drm_ioctl_desc mga_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ };
+-int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
++const int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index 6b21cb2..90c2876 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -1574,7 +1574,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
+ #define MODE_BANDWIDTH        MODE_BAD
+-static int mga_vga_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       struct drm_device *dev = connector->dev;
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+index c8d1f19..10d49d4 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+@@ -306,7 +306,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+       return num;
+ }
+-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+ {
+       int id = dsi_mgr_connector_get_id(connector);
+diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
+index 5960628..fe2e4de 100644
+--- a/drivers/gpu/drm/msm/edp/edp_connector.c
++++ b/drivers/gpu/drm/msm/edp/edp_connector.c
+@@ -63,7 +63,7 @@ static int edp_connector_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int edp_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status edp_connector_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       struct edp_connector *edp_connector = to_edp_connector(connector);
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+index a2515b4..cec0906 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+@@ -377,7 +377,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status msm_hdmi_connector_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+index ed7143d..527b26a 100644
+--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
++++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+@@ -647,9 +647,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+       dev->mode_config.max_width = config->hw->lm.max_width;
+       dev->mode_config.max_height = config->hw->lm.max_height;
+-      dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
+-      dev->driver->get_scanout_position = mdp5_get_scanoutpos;
+-      dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
++      pax_open_kernel();
++      const_cast(dev->driver->get_vblank_timestamp) = mdp5_get_vblank_timestamp;
++      const_cast(dev->driver->get_scanout_position) = mdp5_get_scanoutpos;
++      const_cast(dev->driver->get_vblank_counter) = mdp5_get_vblank_counter;
++      pax_close_kernel();
++
+       dev->max_vblank_count = 0xffffffff;
+       dev->vblank_disable_immediate = true;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index dc57b62..8f2a3d8 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -194,7 +194,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
+       return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
+ }
+-static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
++static enum vga_switcheroo_client_id nouveau_dsm_get_client_id(struct pci_dev *pdev)
+ {
+       /* easy option one - intel vendor ID means Integrated */
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index a1570b1..0e3c08c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -964,7 +964,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_table {
+       const char id;
+       int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+-};
++} __no_const;
+ #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index c108408..575750a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -862,7 +862,7 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
+               return 112000;
+ }
+-static int
++static enum drm_mode_status
+ nouveau_connector_mode_valid(struct drm_connector *connector,
+                            struct drm_display_mode *mode)
+ {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index 66c1280..580abef 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -80,9 +80,8 @@ MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1
+ int nouveau_runtime_pm = -1;
+ module_param_named(runpm, nouveau_runtime_pm, int, 0400);
+-static struct drm_driver driver_stub;
+ static struct drm_driver driver_pci;
+-static struct drm_driver driver_platform;
++static drm_driver_no_const driver_platform __read_only;
+ static u64
+ nouveau_pci_name(struct pci_dev *pdev)
+@@ -942,7 +941,7 @@ nouveau_driver_fops = {
+ };
+ static struct drm_driver
+-driver_stub = {
++driver_pci = {
+       .driver_features =
+               DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
+               DRIVER_KMS_LEGACY_CONTEXT,
+@@ -954,6 +953,8 @@ driver_stub = {
+       .postclose = nouveau_drm_postclose,
+       .lastclose = nouveau_vga_lastclose,
++      .set_busid = drm_pci_set_busid,
++
+ #if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = nouveau_drm_debugfs_init,
+       .debugfs_cleanup = nouveau_drm_debugfs_cleanup,
+@@ -1086,9 +1087,10 @@ err_free:
+ static int __init
+ nouveau_drm_init(void)
+ {
+-      driver_pci = driver_stub;
+-      driver_pci.set_busid = drm_pci_set_busid;
+-      driver_platform = driver_stub;
++      pax_open_kernel();
++      driver_platform = driver_pci;
++      driver_platform.set_busid = NULL;
++      pax_close_kernel();
+       nouveau_display_options();
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+index 822a021..a131e66 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -124,7 +124,6 @@ struct nouveau_drm {
+               struct drm_global_reference mem_global_ref;
+               struct ttm_bo_global_ref bo_global_ref;
+               struct ttm_bo_device bdev;
+-              atomic_t validate_sequence;
+               int (*move)(struct nouveau_channel *,
+                           struct ttm_buffer_object *,
+                           struct ttm_mem_reg *, struct ttm_mem_reg *);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+index 462679a..88e32a7 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+                        unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
++      drm_ioctl_compat_t fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+index 1825dbc..c1ec287 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+@@ -107,10 +107,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ }
+ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+-      nouveau_vram_manager_init,
+-      nouveau_vram_manager_fini,
+-      nouveau_vram_manager_new,
+-      nouveau_vram_manager_del,
++      .init = nouveau_vram_manager_init,
++      .takedown = nouveau_vram_manager_fini,
++      .get_node = nouveau_vram_manager_new,
++      .put_node = nouveau_vram_manager_del,
+ };
+ static int
+@@ -184,11 +184,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+ }
+ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+-      nouveau_gart_manager_init,
+-      nouveau_gart_manager_fini,
+-      nouveau_gart_manager_new,
+-      nouveau_gart_manager_del,
+-      nouveau_gart_manager_debug
++      .init = nouveau_gart_manager_init,
++      .takedown = nouveau_gart_manager_fini,
++      .get_node = nouveau_gart_manager_new,
++      .put_node = nouveau_gart_manager_del,
++      .debug = nouveau_gart_manager_debug
+ };
+ /*XXX*/
+@@ -257,11 +257,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+ }
+ const struct ttm_mem_type_manager_func nv04_gart_manager = {
+-      nv04_gart_manager_init,
+-      nv04_gart_manager_fini,
+-      nv04_gart_manager_new,
+-      nv04_gart_manager_del,
+-      nv04_gart_manager_debug
++      .init = nv04_gart_manager_init,
++      .takedown = nv04_gart_manager_fini,
++      .get_node = nv04_gart_manager_new,
++      .put_node = nv04_gart_manager_del,
++      .debug = nv04_gart_manager_debug
+ };
+ int
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
+index c6a180a..c5c7855 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
++++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
+@@ -73,7 +73,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+        * locking inversion with the driver load path. And the access here is
+        * completely racy anyway. So don't bother with locking for now.
+        */
+-      return dev->open_count == 0;
++      return local_read(&dev->open_count) == 0;
+ }
+ static const struct vga_switcheroo_client_ops
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+index b2557e8..2d4f9f4 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+@@ -151,11 +151,16 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
+       return (void *)fw;
+ }
++static void shadow_fw_fini(void *fw)
++{
++      release_firmware(fw);
++}
++
+ static const struct nvbios_source
+ shadow_fw = {
+       .name = "firmware",
+       .init = shadow_fw_init,
+-      .fini = (void(*)(void *))release_firmware,
++      .fini = shadow_fw_fini,
+       .read = shadow_fw_read,
+       .rw = false,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+index 9b91da0..b3fa90d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+@@ -111,11 +111,16 @@ platform_init(struct nvkm_bios *bios, const char *name)
+       return ERR_PTR(ret);
+ }
++static void platform_fini(void *data)
++{
++      kfree(data);
++}
++
+ const struct nvbios_source
+ nvbios_platform = {
+       .name = "PLATFORM",
+       .init = platform_init,
+-      .fini = (void(*)(void *))kfree,
++      .fini = platform_fini,
+       .read = pcirom_read,
+       .rw = true,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+index a9a8a0e..2ad6d62 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+@@ -226,8 +226,8 @@ struct gm200_secboot_func {
+ int gm200_secboot_init(struct nvkm_secboot *);
+ void *gm200_secboot_dtor(struct nvkm_secboot *);
+-int gm200_secboot_reset(struct nvkm_secboot *, u32);
+-int gm200_secboot_start(struct nvkm_secboot *, u32);
++int gm200_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
++int gm200_secboot_start(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+ int gm20x_secboot_prepare_blobs(struct gm200_secboot *);
+diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
+index 8dcdd7c..0e37527 100644
+--- a/drivers/gpu/drm/omapdrm/dss/display.c
++++ b/drivers/gpu/drm/omapdrm/dss/display.c
+@@ -112,12 +112,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
+       if (dssdev->name == NULL)
+               dssdev->name = dssdev->alias;
++      pax_open_kernel();
+       if (drv && drv->get_resolution == NULL)
+-              drv->get_resolution = omapdss_default_get_resolution;
++              const_cast(drv->get_resolution) = omapdss_default_get_resolution;
+       if (drv && drv->get_recommended_bpp == NULL)
+-              drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
++              const_cast(drv->get_recommended_bpp) = omapdss_default_get_recommended_bpp;
+       if (drv && drv->get_timings == NULL)
+-              drv->get_timings = omapdss_default_get_timings;
++              const_cast(drv->get_timings) = omapdss_default_get_timings;
++      pax_close_kernel();
+       mutex_lock(&panel_list_mutex);
+       list_add_tail(&dssdev->panel_list, &panel_list);
+diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
+index 137fe69..bbfc18c 100644
+--- a/drivers/gpu/drm/omapdrm/omap_connector.c
++++ b/drivers/gpu/drm/omapdrm/omap_connector.c
+@@ -201,7 +201,7 @@ static int omap_connector_get_modes(struct drm_connector *connector)
+       return n;
+ }
+-static int omap_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+ {
+       struct omap_connector *omap_connector = to_omap_connector(connector);
+diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
+index 04270f5..7688e90 100644
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
+       int ret;
+       mutex_lock(&qdev->async_io_mutex);
+-      irq_num = atomic_read(&qdev->irq_received_io_cmd);
++      irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
+       if (qdev->last_sent_io_cmd > irq_num) {
+               if (intr)
+                       ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+-                                                             atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
++                                                             atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+               else
+                       ret = wait_event_timeout(qdev->io_cmd_event,
+-                                               atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
++                                               atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+               /* 0 is timeout, just bail the "hw" has gone away */
+               if (ret <= 0)
+                       goto out;
+-              irq_num = atomic_read(&qdev->irq_received_io_cmd);
++              irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
+       }
+       outb(val, addr);
+       qdev->last_sent_io_cmd = irq_num + 1;
+       if (intr)
+               ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+-                                                     atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
++                                                     atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+       else
+               ret = wait_event_timeout(qdev->io_cmd_event,
+-                                       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
++                                       atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+ out:
+       if (ret > 0)
+               ret = 0;
+diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
+index 6911b8c..89d6867 100644
+--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
++++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
+@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct qxl_device *qdev = node->minor->dev->dev_private;
+-      seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
+-      seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
+-      seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
+-      seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
++      seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
++      seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
++      seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
++      seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
+       seq_printf(m, "%d\n", qdev->irq_received_error);
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 3aef127..9ea7666 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -826,7 +826,7 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int qxl_conn_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+ {
+       struct drm_device *ddev = connector->dev;
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
+index 460bbce..abeb896 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.c
++++ b/drivers/gpu/drm/qxl/qxl_drv.c
+@@ -37,7 +37,7 @@
+ #include "qxl_drv.h"
+ #include "qxl_object.h"
+-extern int qxl_max_ioctls;
++extern const int qxl_max_ioctls;
+ static const struct pci_device_id pciidlist[] = {
+       { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
+         0xffff00, 0 },
+@@ -277,7 +277,11 @@ static int __init qxl_init(void)
+       if (qxl_modeset == 0)
+               return -EINVAL;
+-      qxl_driver.num_ioctls = qxl_max_ioctls;
++
++      pax_open_kernel();
++      const_cast(qxl_driver.num_ioctls) = qxl_max_ioctls;
++      pax_close_kernel();
++
+       return drm_pci_init(&qxl_driver, &qxl_pci_driver);
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 8e633ca..64debeb 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -292,10 +292,10 @@ struct qxl_device {
+       unsigned int last_sent_io_cmd;
+       /* interrupt handling */
+-      atomic_t irq_received;
+-      atomic_t irq_received_display;
+-      atomic_t irq_received_cursor;
+-      atomic_t irq_received_io_cmd;
++      atomic_unchecked_t irq_received;
++      atomic_unchecked_t irq_received_display;
++      atomic_unchecked_t irq_received_cursor;
++      atomic_unchecked_t irq_received_io_cmd;
+       unsigned irq_received_error;
+       wait_queue_head_t display_event;
+       wait_queue_head_t cursor_event;
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 5a4c8c4..faf4c73 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -183,7 +183,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+       /* TODO copy slow path code from i915 */
+       fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+-      unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
++      unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
+       {
+               struct qxl_drawable *draw = fb_cmd;
+@@ -203,7 +203,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+               struct drm_qxl_reloc reloc;
+               if (copy_from_user(&reloc,
+-                                     &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
++                                     &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
+                                      sizeof(reloc))) {
+                       ret = -EFAULT;
+                       goto out_free_bos;
+@@ -282,10 +282,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+       for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+-              struct drm_qxl_command *commands =
+-                      (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
++              struct drm_qxl_command __user *commands =
++                      (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
+-              if (copy_from_user(&user_cmd, &commands[cmd_num],
++              if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
+                                      sizeof(user_cmd)))
+                       return -EFAULT;
+@@ -439,4 +439,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = {
+                         DRM_AUTH),
+ };
+-int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
++const int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
+diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
+index 0bf1e20..42a7310 100644
+--- a/drivers/gpu/drm/qxl/qxl_irq.c
++++ b/drivers/gpu/drm/qxl/qxl_irq.c
+@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
+       if (!pending)
+               return IRQ_NONE;
+-      atomic_inc(&qdev->irq_received);
++      atomic_inc_unchecked(&qdev->irq_received);
+       if (pending & QXL_INTERRUPT_DISPLAY) {
+-              atomic_inc(&qdev->irq_received_display);
++              atomic_inc_unchecked(&qdev->irq_received_display);
+               wake_up_all(&qdev->display_event);
+               qxl_queue_garbage_collect(qdev, false);
+       }
+       if (pending & QXL_INTERRUPT_CURSOR) {
+-              atomic_inc(&qdev->irq_received_cursor);
++              atomic_inc_unchecked(&qdev->irq_received_cursor);
+               wake_up_all(&qdev->cursor_event);
+       }
+       if (pending & QXL_INTERRUPT_IO_CMD) {
+-              atomic_inc(&qdev->irq_received_io_cmd);
++              atomic_inc_unchecked(&qdev->irq_received_io_cmd);
+               wake_up_all(&qdev->io_cmd_event);
+       }
+       if (pending & QXL_INTERRUPT_ERROR) {
+@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
+       init_waitqueue_head(&qdev->io_cmd_event);
+       INIT_WORK(&qdev->client_monitors_config_work,
+                 qxl_client_monitors_config_work_func);
+-      atomic_set(&qdev->irq_received, 0);
+-      atomic_set(&qdev->irq_received_display, 0);
+-      atomic_set(&qdev->irq_received_cursor, 0);
+-      atomic_set(&qdev->irq_received_io_cmd, 0);
++      atomic_set_unchecked(&qdev->irq_received, 0);
++      atomic_set_unchecked(&qdev->irq_received_display, 0);
++      atomic_set_unchecked(&qdev->irq_received_cursor, 0);
++      atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
+       qdev->irq_received_error = 0;
+       ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
+       qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
+index d50c967..f96d908 100644
+--- a/drivers/gpu/drm/qxl/qxl_ttm.c
++++ b/drivers/gpu/drm/qxl/qxl_ttm.c
+@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
+       }
+ }
+-static struct vm_operations_struct qxl_ttm_vm_ops;
++static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
+ static const struct vm_operations_struct *ttm_vm_ops;
+ static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+               return r;
+       if (unlikely(ttm_vm_ops == NULL)) {
+               ttm_vm_ops = vma->vm_ops;
++              pax_open_kernel();
+               qxl_ttm_vm_ops = *ttm_vm_ops;
+               qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
++              pax_close_kernel();
+       }
+       vma->vm_ops = &qxl_ttm_vm_ops;
+       return 0;
+@@ -474,25 +476,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
+ static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+ {
+ #if defined(CONFIG_DEBUG_FS)
+-      static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+-      static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+-      unsigned i;
++      static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
++              {
++                      .name = "qxl_mem_mm",
++                      .show = &qxl_mm_dump_table,
++              },
++              {
++                      .name = "qxl_surf_mm",
++                      .show = &qxl_mm_dump_table,
++              }
++      };
+-      for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+-              if (i == 0)
+-                      sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+-              else
+-                      sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+-              qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+-              qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+-              qxl_mem_types_list[i].driver_features = 0;
+-              if (i == 0)
+-                      qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+-              else
+-                      qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
++      pax_open_kernel();
++      const_cast(qxl_mem_types_list[0].data) = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
++      const_cast(qxl_mem_types_list[1].data) = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
++      pax_close_kernel();
+-      }
+-      return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
++      return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
+ #else
+       return 0;
+ #endif
+diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
+index 14fd83b5f..b2acbd19 100644
+--- a/drivers/gpu/drm/r128/r128_cce.c
++++ b/drivers/gpu/drm/r128/r128_cce.c
+@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
+       /* GH: Simple idle check.
+        */
+-      atomic_set(&dev_priv->idle_count, 0);
++      atomic_set_unchecked(&dev_priv->idle_count, 0);
+       /* We don't support anything other than bus-mastering ring mode,
+        * but the ring can be in either AGP or PCI space for the ring
+diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
+index c57b4de..1a875fb 100644
+--- a/drivers/gpu/drm/r128/r128_drv.c
++++ b/drivers/gpu/drm/r128/r128_drv.c
+@@ -94,7 +94,9 @@ static struct pci_driver r128_pci_driver = {
+ static int __init r128_init(void)
+ {
+-      driver.num_ioctls = r128_max_ioctl;
++      pax_open_kernel();
++      const_cast(driver.num_ioctls) = r128_max_ioctl;
++      pax_close_kernel();
+       return drm_pci_init(&driver, &r128_pci_driver);
+ }
+diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
+index 09143b8..86c8394 100644
+--- a/drivers/gpu/drm/r128/r128_drv.h
++++ b/drivers/gpu/drm/r128/r128_drv.h
+@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
+       int is_pci;
+       unsigned long cce_buffers_offset;
+-      atomic_t idle_count;
++      atomic_unchecked_t idle_count;
+       int page_flipping;
+       int current_page;
+       u32 crtc_offset;
+       u32 crtc_offset_cntl;
+-      atomic_t vbl_received;
++      atomic_unchecked_t vbl_received;
+       u32 color_fmt;
+       unsigned int front_offset;
+@@ -135,7 +135,7 @@ typedef struct drm_r128_buf_priv {
+ } drm_r128_buf_priv_t;
+ extern const struct drm_ioctl_desc r128_ioctls[];
+-extern int r128_max_ioctl;
++extern const int r128_max_ioctl;
+                               /* r128_cce.c */
+ extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
+index 663f38c..ec159a1 100644
+--- a/drivers/gpu/drm/r128/r128_ioc32.c
++++ b/drivers/gpu/drm/r128/r128_ioc32.c
+@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
+       return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ }
+-drm_ioctl_compat_t *r128_compat_ioctls[] = {
++drm_ioctl_compat_t r128_compat_ioctls[] = {
+       [DRM_R128_INIT] = compat_r128_init,
+       [DRM_R128_DEPTH] = compat_r128_depth,
+       [DRM_R128_STIPPLE] = compat_r128_stipple,
+@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
+ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
+-              fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
+-              ret = (*fn) (filp, cmd, arg);
++      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
++              ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp, cmd, arg);
+diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
+index 9730f49..920e9bf 100644
+--- a/drivers/gpu/drm/r128/r128_irq.c
++++ b/drivers/gpu/drm/r128/r128_irq.c
+@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+       if (pipe != 0)
+               return 0;
+-      return atomic_read(&dev_priv->vbl_received);
++      return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
+@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
+       /* VBLANK interrupt */
+       if (status & R128_CRTC_VBLANK_INT) {
+               R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+-              atomic_inc(&dev_priv->vbl_received);
++              atomic_inc_unchecked(&dev_priv->vbl_received);
+               drm_handle_vblank(dev, 0);
+               return IRQ_HANDLED;
+       }
+diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
+index 8fd2d9f..4e99166 100644
+--- a/drivers/gpu/drm/r128/r128_state.c
++++ b/drivers/gpu/drm/r128/r128_state.c
+@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
+ static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
+ {
+-      if (atomic_read(&dev_priv->idle_count) == 0)
++      if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
+               r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+       else
+-              atomic_set(&dev_priv->idle_count, 0);
++              atomic_set_unchecked(&dev_priv->idle_count, 0);
+ }
+ #endif
+@@ -1641,4 +1641,4 @@ const struct drm_ioctl_desc r128_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
+ };
+-int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);
++const int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);
+diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
+index b928c17..e5d9400 100644
+--- a/drivers/gpu/drm/radeon/mkregtable.c
++++ b/drivers/gpu/drm/radeon/mkregtable.c
+@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
+       regex_t mask_rex;
+       regmatch_t match[4];
+       char buf[1024];
+-      size_t end;
++      long end;
+       int len;
+       int done = 0;
+       int r;
+       unsigned o;
+       struct offset *offset;
+       char last_reg_s[10];
+-      int last_reg;
++      unsigned long last_reg;
+       if (regcomp
+           (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index ddef0d4..c4f3351 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -516,7 +516,7 @@ static int radeon_atpx_init(void)
+  * look up whether we are the integrated or discrete GPU (all asics).
+  * Returns the client id.
+  */
+-static int radeon_atpx_get_client_id(struct pci_dev *pdev)
++static enum vga_switcheroo_client_id radeon_atpx_get_client_id(struct pci_dev *pdev)
+ {
+       if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
+               return VGA_SWITCHEROO_IGD;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index b79f3b0..a1fd177 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -857,7 +857,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int radeon_lvds_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
+                                 struct drm_display_mode *mode)
+ {
+       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+@@ -1000,7 +1000,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int radeon_vga_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
+                                 struct drm_display_mode *mode)
+ {
+       struct drm_device *dev = connector->dev;
+@@ -1139,7 +1139,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
+       return 1;
+ }
+-static int radeon_tv_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+ {
+       if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
+@@ -1470,7 +1470,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
+               radeon_connector->use_digital = true;
+ }
+-static int radeon_dvi_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
+                                 struct drm_display_mode *mode)
+ {
+       struct drm_device *dev = connector->dev;
+@@ -1767,7 +1767,7 @@ out:
+       return ret;
+ }
+-static int radeon_dp_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
+                                 struct drm_display_mode *mode)
+ {
+       struct drm_device *dev = connector->dev;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 554ca71..e573a41 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1276,7 +1276,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+        * locking inversion with the driver load path. And the access here is
+        * completely racy anyway. So don't bother with locking for now.
+        */
+-      return dev->open_count == 0;
++      return local_read(&dev->open_count) == 0;
+ }
+ static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index c01a7c6..fd62ace 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -134,7 +134,7 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
+                                     const struct drm_display_mode *mode);
+ extern bool radeon_is_px(struct drm_device *dev);
+ extern const struct drm_ioctl_desc radeon_ioctls_kms[];
+-extern int radeon_max_kms_ioctl;
++extern const int radeon_max_kms_ioctl;
+ int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+ int radeon_mode_dumb_mmap(struct drm_file *filp,
+                         struct drm_device *dev,
+@@ -516,7 +516,7 @@ static struct drm_driver kms_driver = {
+       .driver_features =
+           DRIVER_USE_AGP |
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+-          DRIVER_PRIME | DRIVER_RENDER,
++          DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
+       .load = radeon_driver_load_kms,
+       .open = radeon_driver_open_kms,
+       .preclose = radeon_driver_preclose_kms,
+@@ -591,8 +591,11 @@ static int __init radeon_init(void)
+               DRM_INFO("radeon kernel modesetting enabled.\n");
+               driver = &kms_driver;
+               pdriver = &radeon_kms_pci_driver;
+-              driver->driver_features |= DRIVER_MODESET;
+-              driver->num_ioctls = radeon_max_kms_ioctl;
++
++              pax_open_kernel();
++              const_cast(driver->num_ioctls) = radeon_max_kms_ioctl;
++              pax_close_kernel();
++
+               radeon_register_atpx_handler();
+       } else {
+diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
+index 0b98ea1..a3c770f 100644
+--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
++++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
+@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+       request = compat_alloc_user_space(sizeof(*request));
+       if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+           || __put_user(req32.param, &request->param)
+-          || __put_user((void __user *)(unsigned long)req32.value,
++          || __put_user((unsigned long)req32.value,
+                         &request->value))
+               return -EFAULT;
+@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+ #define compat_radeon_cp_setparam NULL
+ #endif /* X86_64 || IA64 */
+-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
++static drm_ioctl_compat_t radeon_compat_ioctls[] = {
+       [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
+       [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
+       [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
+@@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+       unsigned int nr = DRM_IOCTL_NR(cmd);
+-      drm_ioctl_compat_t *fn = NULL;
+       int ret;
+       if (nr < DRM_COMMAND_BASE)
+               return drm_compat_ioctl(filp, cmd, arg);
+-      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
+-              fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+-      if (fn != NULL)
+-              ret = (*fn) (filp, cmd, arg);
++      if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
++              ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
+       else
+               ret = drm_ioctl(filp, cmd, arg);
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 835563c..85913cc 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -825,7 +825,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+  * Enable the interrupt on the requested crtc (all asics).
+  * Returns 0 on success, -EINVAL on failure.
+  */
+-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
++int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int crtc)
+ {
+       struct radeon_device *rdev = dev->dev_private;
+       unsigned long irqflags;
+@@ -851,7 +851,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+  *
+  * Disable the interrupt on the requested crtc (all asics).
+  */
+-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
++void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int crtc)
+ {
+       struct radeon_device *rdev = dev->dev_private;
+       unsigned long irqflags;
+@@ -880,7 +880,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+  * scanout position.  (all asics).
+  * Returns postive status flags on success, negative error on failure.
+  */
+-int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
++int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int crtc,
+                                   int *max_error,
+                                   struct timeval *vblank_time,
+                                   unsigned flags)
+@@ -949,4 +949,4 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ };
+-int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
++const int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index c2e0a1c..6270bca 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -974,7 +974,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+       man->size = size >> PAGE_SHIFT;
+ }
+-static struct vm_operations_struct radeon_ttm_vm_ops;
++static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
+ static const struct vm_operations_struct *ttm_vm_ops = NULL;
+ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -1015,8 +1015,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+       }
+       if (unlikely(ttm_vm_ops == NULL)) {
+               ttm_vm_ops = vma->vm_ops;
++              pax_open_kernel();
+               radeon_ttm_vm_ops = *ttm_vm_ops;
+               radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
++              pax_close_kernel();
+       }
+       vma->vm_ops = &radeon_ttm_vm_ops;
+       return 0;
+diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
+index d47dff9..0752202 100644
+--- a/drivers/gpu/drm/savage/savage_bci.c
++++ b/drivers/gpu/drm/savage/savage_bci.c
+@@ -1080,4 +1080,4 @@ const struct drm_ioctl_desc savage_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+ };
+-int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);
++const int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);
+diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
+index 21aed1f..85d23a0 100644
+--- a/drivers/gpu/drm/savage/savage_drv.c
++++ b/drivers/gpu/drm/savage/savage_drv.c
+@@ -76,7 +76,10 @@ static struct pci_driver savage_pci_driver = {
+ static int __init savage_init(void)
+ {
+-      driver.num_ioctls = savage_max_ioctl;
++      pax_open_kernel();
++      const_cast(driver.num_ioctls) = savage_max_ioctl;
++      pax_close_kernel();
++
+       return drm_pci_init(&driver, &savage_pci_driver);
+ }
+diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
+index 37b6995..9b31aaf 100644
+--- a/drivers/gpu/drm/savage/savage_drv.h
++++ b/drivers/gpu/drm/savage/savage_drv.h
+@@ -107,7 +107,7 @@ enum savage_family {
+ };
+ extern const struct drm_ioctl_desc savage_ioctls[];
+-extern int savage_max_ioctl;
++extern const int savage_max_ioctl;
+ #define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
+diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
+index 79bce76..6c02219 100644
+--- a/drivers/gpu/drm/sis/sis_drv.c
++++ b/drivers/gpu/drm/sis/sis_drv.c
+@@ -128,7 +128,10 @@ static struct pci_driver sis_pci_driver = {
+ static int __init sis_init(void)
+ {
+-      driver.num_ioctls = sis_max_ioctl;
++      pax_open_kernel();
++      const_cast(driver.num_ioctls) = sis_max_ioctl;
++      pax_close_kernel();
++
+       return drm_pci_init(&driver, &sis_pci_driver);
+ }
+diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
+index 328f8a7..0cfcf55 100644
+--- a/drivers/gpu/drm/sis/sis_drv.h
++++ b/drivers/gpu/drm/sis/sis_drv.h
+@@ -77,6 +77,6 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
+ extern void sis_lastclose(struct drm_device *dev);
+ extern const struct drm_ioctl_desc sis_ioctls[];
+-extern int sis_max_ioctl;
++extern const int sis_max_ioctl;
+ #endif
+diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
+index 03defda..6f56b68 100644
+--- a/drivers/gpu/drm/sis/sis_mm.c
++++ b/drivers/gpu/drm/sis/sis_mm.c
+@@ -359,4 +359,4 @@ const struct drm_ioctl_desc sis_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ };
+-int sis_max_ioctl = ARRAY_SIZE(sis_ioctls);
++const int sis_max_ioctl = ARRAY_SIZE(sis_ioctls);
+diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
+index 3b53f7f..b0576c2 100644
+--- a/drivers/gpu/drm/sti/sti_cursor.c
++++ b/drivers/gpu/drm/sti/sti_cursor.c
+@@ -126,7 +126,7 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
+       return 0;
+ }
+-static struct drm_info_list cursor_debugfs_files[] = {
++static drm_info_list_no_const cursor_debugfs_files[] __read_only = {
+       { "cursor", cursor_dbg_show, 0, NULL },
+ };
+@@ -135,8 +135,10 @@ static int cursor_debugfs_init(struct sti_cursor *cursor,
+ {
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
+               cursor_debugfs_files[i].data = cursor;
++      pax_close_kernel();
+       return drm_debugfs_create_files(cursor_debugfs_files,
+                                       ARRAY_SIZE(cursor_debugfs_files),
+diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
+index 00881eb..3863e51 100644
+--- a/drivers/gpu/drm/sti/sti_dvo.c
++++ b/drivers/gpu/drm/sti/sti_dvo.c
+@@ -190,7 +190,7 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
+       return 0;
+ }
+-static struct drm_info_list dvo_debugfs_files[] = {
++static drm_info_list_no_const dvo_debugfs_files[] __read_only = {
+       { "dvo", dvo_dbg_show, 0, NULL },
+ };
+@@ -205,8 +205,10 @@ static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+ {
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
+               dvo_debugfs_files[i].data = dvo;
++      pax_close_kernel();
+       return drm_debugfs_create_files(dvo_debugfs_files,
+                                       ARRAY_SIZE(dvo_debugfs_files),
+diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
+index b8d942c..476893d 100644
+--- a/drivers/gpu/drm/sti/sti_gdp.c
++++ b/drivers/gpu/drm/sti/sti_gdp.c
+@@ -284,22 +284,22 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
+       return 0;
+ }
+-static struct drm_info_list gdp0_debugfs_files[] = {
++static drm_info_list_no_const gdp0_debugfs_files[] __read_only = {
+       { "gdp0", gdp_dbg_show, 0, NULL },
+       { "gdp0_node", gdp_node_dbg_show, 0, NULL },
+ };
+-static struct drm_info_list gdp1_debugfs_files[] = {
++static drm_info_list_no_const gdp1_debugfs_files[] __read_only = {
+       { "gdp1", gdp_dbg_show, 0, NULL },
+       { "gdp1_node", gdp_node_dbg_show, 0, NULL },
+ };
+-static struct drm_info_list gdp2_debugfs_files[] = {
++static drm_info_list_no_const gdp2_debugfs_files[] __read_only = {
+       { "gdp2", gdp_dbg_show, 0, NULL },
+       { "gdp2_node", gdp_node_dbg_show, 0, NULL },
+ };
+-static struct drm_info_list gdp3_debugfs_files[] = {
++static drm_info_list_no_const gdp3_debugfs_files[] __read_only = {
+       { "gdp3", gdp_dbg_show, 0, NULL },
+       { "gdp3_node", gdp_node_dbg_show, 0, NULL },
+ };
+@@ -307,7 +307,7 @@ static struct drm_info_list gdp3_debugfs_files[] = {
+ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
+ {
+       unsigned int i;
+-      struct drm_info_list *gdp_debugfs_files;
++      drm_info_list_no_const *gdp_debugfs_files;
+       int nb_files;
+       switch (gdp->plane.desc) {
+@@ -331,8 +331,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
+               return -EINVAL;
+       }
++      pax_open_kernel();
+       for (i = 0; i < nb_files; i++)
+               gdp_debugfs_files[i].data = gdp;
++      pax_close_kernel();
+       return drm_debugfs_create_files(gdp_debugfs_files,
+                                       nb_files,
+diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
+index 8505569..aae4422 100644
+--- a/drivers/gpu/drm/sti/sti_hda.c
++++ b/drivers/gpu/drm/sti/sti_hda.c
+@@ -394,7 +394,7 @@ static int hda_dbg_show(struct seq_file *s, void *data)
+       return 0;
+ }
+-static struct drm_info_list hda_debugfs_files[] = {
++static drm_info_list_no_const hda_debugfs_files[] __read_only = {
+       { "hda", hda_dbg_show, 0, NULL },
+ };
+@@ -409,8 +409,10 @@ static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+ {
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
+               hda_debugfs_files[i].data = hda;
++      pax_close_kernel();
+       return drm_debugfs_create_files(hda_debugfs_files,
+                                       ARRAY_SIZE(hda_debugfs_files),
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
+index fedc17f..d43c181 100644
+--- a/drivers/gpu/drm/sti/sti_hdmi.c
++++ b/drivers/gpu/drm/sti/sti_hdmi.c
+@@ -728,7 +728,7 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
+       return 0;
+ }
+-static struct drm_info_list hdmi_debugfs_files[] = {
++static drm_info_list_no_const hdmi_debugfs_files[] __read_only = {
+       { "hdmi", hdmi_dbg_show, 0, NULL },
+ };
+@@ -743,8 +743,10 @@ static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+ {
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
+               hdmi_debugfs_files[i].data = hdmi;
++      pax_close_kernel();
+       return drm_debugfs_create_files(hdmi_debugfs_files,
+                                       ARRAY_SIZE(hdmi_debugfs_files),
+diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
+index b5ee783..6d45c45 100644
+--- a/drivers/gpu/drm/sti/sti_hqvdp.c
++++ b/drivers/gpu/drm/sti/sti_hqvdp.c
+@@ -627,7 +627,7 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
+       return 0;
+ }
+-static struct drm_info_list hqvdp_debugfs_files[] = {
++static drm_info_list_no_const hqvdp_debugfs_files[] __read_only = {
+       { "hqvdp", hqvdp_dbg_show, 0, NULL },
+ };
+@@ -635,8 +635,10 @@ static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+ {
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
+               hqvdp_debugfs_files[i].data = hqvdp;
++      pax_close_kernel();
+       return drm_debugfs_create_files(hqvdp_debugfs_files,
+                                       ARRAY_SIZE(hqvdp_debugfs_files),
+diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
+index 7d9aea8..e0542f6 100644
+--- a/drivers/gpu/drm/sti/sti_mixer.c
++++ b/drivers/gpu/drm/sti/sti_mixer.c
+@@ -173,18 +173,18 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
+       return 0;
+ }
+-static struct drm_info_list mixer0_debugfs_files[] = {
++static drm_info_list_no_const mixer0_debugfs_files[] __read_only = {
+       { "mixer_main", mixer_dbg_show, 0, NULL },
+ };
+-static struct drm_info_list mixer1_debugfs_files[] = {
++static drm_info_list_no_const mixer1_debugfs_files[] __read_only = {
+       { "mixer_aux", mixer_dbg_show, 0, NULL },
+ };
+ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+ {
+       unsigned int i;
+-      struct drm_info_list *mixer_debugfs_files;
++      drm_info_list_no_const *mixer_debugfs_files;
+       int nb_files;
+       switch (mixer->id) {
+@@ -200,8 +200,10 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+               return -EINVAL;
+       }
++      pax_open_kernel();
+       for (i = 0; i < nb_files; i++)
+               mixer_debugfs_files[i].data = mixer;
++      pax_close_kernel();
+       return drm_debugfs_create_files(mixer_debugfs_files,
+                                       nb_files,
+diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
+index e25995b..8f2b12f 100644
+--- a/drivers/gpu/drm/sti/sti_tvout.c
++++ b/drivers/gpu/drm/sti/sti_tvout.c
+@@ -585,7 +585,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
+       return 0;
+ }
+-static struct drm_info_list tvout_debugfs_files[] = {
++static drm_info_list_no_const tvout_debugfs_files[] __read_only = {
+       { "tvout", tvout_dbg_show, 0, NULL },
+ };
+@@ -600,8 +600,10 @@ static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+ {
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
+               tvout_debugfs_files[i].data = tvout;
++      pax_close_kernel();
+       return drm_debugfs_create_files(tvout_debugfs_files,
+                                       ARRAY_SIZE(tvout_debugfs_files),
+diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
+index 47634a0..7a9b19f5 100644
+--- a/drivers/gpu/drm/sti/sti_vid.c
++++ b/drivers/gpu/drm/sti/sti_vid.c
+@@ -119,7 +119,7 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
+       return 0;
+ }
+-static struct drm_info_list vid_debugfs_files[] = {
++static drm_info_list_no_const vid_debugfs_files[] __read_only = {
+       { "vid", vid_dbg_show, 0, NULL },
+ };
+@@ -127,8 +127,10 @@ int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+ {
+       unsigned int i;
++      pax_open_kernel();
+       for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
+               vid_debugfs_files[i].data = vid;
++      pax_close_kernel();
+       return drm_debugfs_create_files(vid_debugfs_files,
+                                       ARRAY_SIZE(vid_debugfs_files),
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index 8495bd0..21a9725 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -1685,7 +1685,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+       }
+       for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+-              dc->debugfs_files[i].data = dc;
++              const_cast(dc->debugfs_files[i].data) = dc;
+       err = drm_debugfs_create_files(dc->debugfs_files,
+                                      ARRAY_SIZE(debugfs_files),
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 3dea121..c2b888e 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -63,7 +63,7 @@ struct tegra_dsi {
+       struct clk *clk_lp;
+       struct clk *clk;
+-      struct drm_info_list *debugfs_files;
++      drm_info_list_no_const *debugfs_files;
+       struct drm_minor *minor;
+       struct dentry *debugfs;
+diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
+index cda0491..869916e 100644
+--- a/drivers/gpu/drm/tegra/hdmi.c
++++ b/drivers/gpu/drm/tegra/hdmi.c
+@@ -74,7 +74,7 @@ struct tegra_hdmi {
+       bool stereo;
+       bool dvi;
+-      struct drm_info_list *debugfs_files;
++      drm_info_list_no_const *debugfs_files;
+       struct drm_minor *minor;
+       struct dentry *debugfs;
+ };
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index 74d0540..f5277db 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -1263,8 +1263,11 @@ static int tegra_sor_debugfs_init(struct tegra_sor *sor,
+               goto remove;
+       }
+-      for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+-              sor->debugfs_files[i].data = sor;
++      for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) {
++              pax_open_kernel();
++              const_cast(sor->debugfs_files[i].data) = sor;
++              pax_close_kernel();
++      }
+       err = drm_debugfs_create_files(sor->debugfs_files,
+                                      ARRAY_SIZE(debugfs_files),
+diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
+index deeca48..54e1b6c 100644
+--- a/drivers/gpu/drm/tilcdc/Makefile
++++ b/drivers/gpu/drm/tilcdc/Makefile
+@@ -1,7 +1,7 @@
+ ccflags-y := -Iinclude/drm
+-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+-      ccflags-y += -Werror
+-endif
++#ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
++#     ccflags-y += -Werror
++#endif
+ obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
+                                        tilcdc_slave_compat.dtb.o
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
+index 03acb4f..8d4328e 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
+@@ -27,7 +27,7 @@ static const struct tilcdc_panel_info panel_info_tda998x = {
+               .raster_order           = 0,
+ };
+-static int tilcdc_external_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status tilcdc_external_mode_valid(struct drm_connector *connector,
+                                     struct drm_display_mode *mode)
+ {
+       struct tilcdc_drm_private *priv = connector->dev->dev_private;
+@@ -56,7 +56,7 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
+                                      struct drm_connector *connector)
+ {
+       struct tilcdc_drm_private *priv = dev->dev_private;
+-      struct drm_connector_helper_funcs *connector_funcs;
++      drm_connector_helper_funcs_no_const *connector_funcs;
+       priv->connectors[priv->num_connectors] = connector;
+       priv->encoders[priv->num_encoders++] = connector->encoder;
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+index ff7774c..697a5fc 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+@@ -179,7 +179,7 @@ static int panel_connector_get_modes(struct drm_connector *connector)
+       return i;
+ }
+-static int panel_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status panel_connector_mode_valid(struct drm_connector *connector,
+                 struct drm_display_mode *mode)
+ {
+       struct tilcdc_drm_private *priv = connector->dev->dev_private;
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+index 6b8c5b3..0899e85 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+@@ -184,7 +184,7 @@ static int tfp410_connector_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int tfp410_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status tfp410_connector_mode_valid(struct drm_connector *connector,
+                 struct drm_display_mode *mode)
+ {
+       struct tilcdc_drm_private *priv = connector->dev->dev_private;
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
+index aa0bd054..aea6a01 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
+@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ }
+ const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+-      ttm_bo_man_init,
+-      ttm_bo_man_takedown,
+-      ttm_bo_man_get_node,
+-      ttm_bo_man_put_node,
+-      ttm_bo_man_debug
++      .init = ttm_bo_man_init,
++      .takedown = ttm_bo_man_takedown,
++      .get_node = ttm_bo_man_get_node,
++      .put_node = ttm_bo_man_put_node,
++      .debug = ttm_bo_man_debug
+ };
+ EXPORT_SYMBOL(ttm_bo_manager_func);
+diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
+index a1803fb..c53f6b0 100644
+--- a/drivers/gpu/drm/ttm/ttm_memory.c
++++ b/drivers/gpu/drm/ttm/ttm_memory.c
+@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+       zone->glob = glob;
+       glob->zone_kernel = zone;
+       ret = kobject_init_and_add(
+-              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
++              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
+       if (unlikely(ret != 0)) {
+               kobject_put(&zone->kobj);
+               return ret;
+@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+       zone->glob = glob;
+       glob->zone_dma32 = zone;
+       ret = kobject_init_and_add(
+-              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
++              &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
+       if (unlikely(ret != 0)) {
+               kobject_put(&zone->kobj);
+               return ret;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index a37de5d..4a0db00 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -54,7 +54,7 @@
+ #define NUM_PAGES_TO_ALLOC            (PAGE_SIZE/sizeof(struct page *))
+ #define SMALL_ALLOCATION              16
+-#define FREE_ALL_PAGES                        (~0U)
++#define FREE_ALL_PAGES                        (~0UL)
+ /* times are in msecs */
+ #define PAGE_FREE_INTERVAL            1000
+@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+  * @free_all: If set to true will free all pages in pool
+  * @use_static: Safe to use static buffer
+  **/
+-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
++static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
+                             bool use_static)
+ {
+       static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+       unsigned long irq_flags;
+       struct page *p;
+       struct page **pages_to_free;
+-      unsigned freed_pages = 0,
+-               npages_to_free = nr_free;
++      unsigned long freed_pages = 0, npages_to_free = nr_free;
+       if (NUM_PAGES_TO_ALLOC < nr_free)
+               npages_to_free = NUM_PAGES_TO_ALLOC;
+@@ -371,7 +370,8 @@ restart:
+               __list_del(&p->lru, &pool->list);
+               ttm_pool_update_free_locked(pool, freed_pages);
+-              nr_free -= freed_pages;
++              if (likely(nr_free != FREE_ALL_PAGES))
++                      nr_free -= freed_pages;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+       unsigned i;
+       unsigned pool_offset;
+       struct ttm_page_pool *pool;
+-      int shrink_pages = sc->nr_to_scan;
++      unsigned long shrink_pages = sc->nr_to_scan;
+       unsigned long freed = 0;
+       if (!mutex_trylock(&lock))
+@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+       pool_offset = ++start_pool % NUM_POOLS;
+       /* select start pool in round robin fashion */
+       for (i = 0; i < NUM_POOLS; ++i) {
+-              unsigned nr_free = shrink_pages;
++              unsigned long nr_free = shrink_pages;
+               if (shrink_pages == 0)
+                       break;
+               pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+@@ -673,7 +673,7 @@ out:
+ }
+ /* Put all pages in pages list to correct pool to wait for reuse */
+-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
++static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
+                         enum ttm_caching_state cstate)
+ {
+       unsigned long irq_flags;
+@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+       struct list_head plist;
+       struct page *p = NULL;
+       gfp_t gfp_flags = GFP_USER;
+-      unsigned count;
++      unsigned long count;
+       int r;
+       /* set zero flag for page allocation if required */
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index bef9f6f..ca48e17 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -56,7 +56,7 @@
+ #define NUM_PAGES_TO_ALLOC            (PAGE_SIZE/sizeof(struct page *))
+ #define SMALL_ALLOCATION              4
+-#define FREE_ALL_PAGES                        (~0U)
++#define FREE_ALL_PAGES                        (~0UL)
+ /* times are in msecs */
+ #define IS_UNDEFINED                  (0)
+ #define IS_WC                         (1<<1)
+@@ -416,7 +416,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+  * @nr_free: If set to true will free all pages in pool
+  * @use_static: Safe to use static buffer
+  **/
+-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
++static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
+                                      bool use_static)
+ {
+       static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+@@ -424,8 +424,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+       struct dma_page *dma_p, *tmp;
+       struct page **pages_to_free;
+       struct list_head d_pages;
+-      unsigned freed_pages = 0,
+-               npages_to_free = nr_free;
++      unsigned long freed_pages = 0, npages_to_free = nr_free;
+       if (NUM_PAGES_TO_ALLOC < nr_free)
+               npages_to_free = NUM_PAGES_TO_ALLOC;
+@@ -502,7 +501,8 @@ restart:
+       /* remove range of pages from the pool */
+       if (freed_pages) {
+               ttm_pool_update_free_locked(pool, freed_pages);
+-              nr_free -= freed_pages;
++              if (likely(nr_free != FREE_ALL_PAGES))
++                      nr_free -= freed_pages;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+@@ -939,7 +939,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+       struct dma_page *d_page, *next;
+       enum pool_type type;
+       bool is_cached = false;
+-      unsigned count = 0, i, npages = 0;
++      unsigned long count = 0, i, npages = 0;
+       unsigned long irq_flags;
+       type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+@@ -1014,7 +1014,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+       static unsigned start_pool;
+       unsigned idx = 0;
+       unsigned pool_offset;
+-      unsigned shrink_pages = sc->nr_to_scan;
++      unsigned long shrink_pages = sc->nr_to_scan;
+       struct device_pools *p;
+       unsigned long freed = 0;
+@@ -1027,7 +1027,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+               goto out;
+       pool_offset = ++start_pool % _manager->npools;
+       list_for_each_entry(p, &_manager->pools, pools) {
+-              unsigned nr_free;
++              unsigned long nr_free;
+               if (!p->dev)
+                       continue;
+@@ -1041,7 +1041,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
+               freed += nr_free - shrink_pages;
+-              pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
++              pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
+                        p->pool->dev_name, p->pool->name, current->pid,
+                        nr_free, shrink_pages);
+       }
+diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
+index 4709b54..beb015d 100644
+--- a/drivers/gpu/drm/udl/udl_connector.c
++++ b/drivers/gpu/drm/udl/udl_connector.c
+@@ -80,7 +80,7 @@ static int udl_get_modes(struct drm_connector *connector)
+       return ret;
+ }
+-static int udl_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
+                         struct drm_display_mode *mode)
+ {
+       struct udl_device *udl = connector->dev->dev_private;
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index 611b6b9..e0faec1 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -242,7 +242,6 @@ static int udl_fb_release(struct fb_info *info, int user)
+               fb_deferred_io_cleanup(info);
+               kfree(info->fbdefio);
+               info->fbdefio = NULL;
+-              info->fbops->fb_mmap = udl_fb_mmap;
+       }
+ #endif
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index 9ecef93..d388af0 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -179,6 +179,11 @@ static int compare_dev(struct device *dev, void *data)
+       return dev == data;
+ }
++static int vc4_match(struct device *dev, void *drv)
++{
++      return platform_bus_type.match(dev, drv);
++}
++
+ static void vc4_match_add_drivers(struct device *dev,
+                                 struct component_match **match,
+                                 struct platform_driver *const *drivers,
+@@ -190,8 +195,7 @@ static void vc4_match_add_drivers(struct device *dev,
+               struct device_driver *drv = &drivers[i]->driver;
+               struct device *p = NULL, *d;
+-              while ((d = bus_find_device(&platform_bus_type, p, drv,
+-                                          (void *)platform_bus_type.match))) {
++              while ((d = bus_find_device(&platform_bus_type, p, drv, vc4_match))) {
+                       put_device(p);
+                       component_match_add(dev, match, compare_dev, d);
+                       p = d;
+diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
+index d17d8f2..67e8e48b 100644
+--- a/drivers/gpu/drm/via/via_dma.c
++++ b/drivers/gpu/drm/via/via_dma.c
+@@ -737,4 +737,4 @@ const struct drm_ioctl_desc via_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+ };
+-int via_max_ioctl = ARRAY_SIZE(via_ioctls);
++const int via_max_ioctl = ARRAY_SIZE(via_ioctls);
+diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
+index ed8aa8f..114cc8d 100644
+--- a/drivers/gpu/drm/via/via_drv.c
++++ b/drivers/gpu/drm/via/via_drv.c
+@@ -107,7 +107,10 @@ static struct pci_driver via_pci_driver = {
+ static int __init via_init(void)
+ {
+-      driver.num_ioctls = via_max_ioctl;
++      pax_open_kernel();
++      const_cast(driver.num_ioctls) = via_max_ioctl;
++      pax_close_kernel();
++
+       via_init_command_verifier();
+       return drm_pci_init(&driver, &via_pci_driver);
+ }
+diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
+index 286a785..c0182616 100644
+--- a/drivers/gpu/drm/via/via_drv.h
++++ b/drivers/gpu/drm/via/via_drv.h
+@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
+ typedef uint32_t maskarray_t[5];
+ typedef struct drm_via_irq {
+-      atomic_t irq_received;
++      atomic_unchecked_t irq_received;
+       uint32_t pending_mask;
+       uint32_t enable_mask;
+       wait_queue_head_t irq_queue;
+@@ -77,7 +77,7 @@ typedef struct drm_via_private {
+       struct timeval last_vblank;
+       int last_vblank_valid;
+       unsigned usec_per_vblank;
+-      atomic_t vbl_received;
++      atomic_unchecked_t vbl_received;
+       drm_via_state_t hc_state;
+       char pci_buf[VIA_PCI_BUF_SIZE];
+       const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+@@ -121,7 +121,7 @@ enum via_family {
+ #define VIA_WRITE8(reg, val)  DRM_WRITE8(VIA_BASE, reg, val)
+ extern const struct drm_ioctl_desc via_ioctls[];
+-extern int via_max_ioctl;
++extern const int via_max_ioctl;
+ extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+ extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
+index ea8172c..6ceff63 100644
+--- a/drivers/gpu/drm/via/via_irq.c
++++ b/drivers/gpu/drm/via/via_irq.c
+@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+       if (pipe != 0)
+               return 0;
+-      return atomic_read(&dev_priv->vbl_received);
++      return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+ irqreturn_t via_driver_irq_handler(int irq, void *arg)
+@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
+       status = VIA_READ(VIA_REG_INTERRUPT);
+       if (status & VIA_IRQ_VBLANK_PENDING) {
+-              atomic_inc(&dev_priv->vbl_received);
+-              if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
++              atomic_inc_unchecked(&dev_priv->vbl_received);
++              if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
+                       do_gettimeofday(&cur_vblank);
+                       if (dev_priv->last_vblank_valid) {
+                               dev_priv->usec_per_vblank =
+@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
+                       dev_priv->last_vblank = cur_vblank;
+                       dev_priv->last_vblank_valid = 1;
+               }
+-              if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
++              if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
+                       DRM_DEBUG("US per vblank is: %u\n",
+                                 dev_priv->usec_per_vblank);
+               }
+@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
+       for (i = 0; i < dev_priv->num_irqs; ++i) {
+               if (status & cur_irq->pending_mask) {
+-                      atomic_inc(&cur_irq->irq_received);
++                      atomic_inc_unchecked(&cur_irq->irq_received);
+                       wake_up(&cur_irq->irq_queue);
+                       handled = 1;
+                       if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
+               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+                           ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+                            masks[irq][4]));
+-              cur_irq_sequence = atomic_read(&cur_irq->irq_received);
++              cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
+       } else {
+               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+                           (((cur_irq_sequence =
+-                             atomic_read(&cur_irq->irq_received)) -
++                             atomic_read_unchecked(&cur_irq->irq_received)) -
+                             *sequence) <= (1 << 23)));
+       }
+       *sequence = cur_irq_sequence;
+@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
+               }
+               for (i = 0; i < dev_priv->num_irqs; ++i) {
+-                      atomic_set(&cur_irq->irq_received, 0);
++                      atomic_set_unchecked(&cur_irq->irq_received, 0);
+                       cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+                       cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+                       init_waitqueue_head(&cur_irq->irq_queue);
+@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
+       switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+       case VIA_IRQ_RELATIVE:
+               irqwait->request.sequence +=
+-                      atomic_read(&cur_irq->irq_received);
++                      atomic_read_unchecked(&cur_irq->irq_received);
+               irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+       case VIA_IRQ_ABSOLUTE:
+               break;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
+index 4e192aa..15665db 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_display.c
++++ b/drivers/gpu/drm/virtio/virtgpu_display.c
+@@ -192,7 +192,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
+       return count;
+ }
+-static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
+                                     struct drm_display_mode *mode)
+ {
+       struct virtio_gpu_output *output =
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
+index 80482ac..bf693e5 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
+@@ -198,11 +198,11 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ }
+ static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
+-      ttm_bo_man_init,
+-      ttm_bo_man_takedown,
+-      ttm_bo_man_get_node,
+-      ttm_bo_man_put_node,
+-      ttm_bo_man_debug
++      .init = &ttm_bo_man_init,
++      .takedown = &ttm_bo_man_takedown,
++      .get_node = &ttm_bo_man_get_node,
++      .put_node = &ttm_bo_man_put_node,
++      .debug = &ttm_bo_man_debug
+ };
+ static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 74304b0..d453794 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -439,7 +439,7 @@ struct vmw_private {
+        * Fencing and IRQs.
+        */
+-      atomic_t marker_seq;
++      atomic_unchecked_t marker_seq;
+       wait_queue_head_t fence_queue;
+       wait_queue_head_t fifo_queue;
+       spinlock_t waiter_lock;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index b6a0806..9fb5479 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -156,7 +156,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+                (unsigned int) min,
+                (unsigned int) fifo->capabilities);
+-      atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
++      atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+       vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+       vmw_marker_queue_init(&fifo->marker_queue);
+@@ -355,7 +355,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+                               if (reserveable)
+                                       vmw_mmio_write(bytes, fifo_mem +
+                                                      SVGA_FIFO_RESERVED);
+-                              return (void __force *) (fifo_mem +
++                              return (void __force_kernel *) (fifo_mem +
+                                                        (next_cmd >> 2));
+                       } else {
+                               need_bounce = true;
+@@ -544,7 +544,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+       fm = vmw_fifo_reserve(dev_priv, bytes);
+       if (unlikely(fm == NULL)) {
+-              *seqno = atomic_read(&dev_priv->marker_seq);
++              *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+               ret = -ENOMEM;
+               (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
+                                       false, 3*HZ);
+@@ -552,7 +552,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+       }
+       do {
+-              *seqno = atomic_add_return(1, &dev_priv->marker_seq);
++              *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
+       } while (*seqno == 0);
+       if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+index 170b61b..fec7348 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
+ }
+ const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
+-      vmw_gmrid_man_init,
+-      vmw_gmrid_man_takedown,
+-      vmw_gmrid_man_get_node,
+-      vmw_gmrid_man_put_node,
+-      vmw_gmrid_man_debug
++      .init = vmw_gmrid_man_init,
++      .takedown = vmw_gmrid_man_takedown,
++      .get_node = vmw_gmrid_man_get_node,
++      .put_node = vmw_gmrid_man_put_node,
++      .debug = vmw_gmrid_man_debug
+ };
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+index 0c7e172..ead94fc 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -103,7 +103,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
+        * emitted. Then the fence is stale and signaled.
+        */
+-      ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
++      ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
+              > VMW_FENCE_WRAP);
+       return ret;
+@@ -142,7 +142,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
+               }
+       }
+-      signal_seq = atomic_read(&dev_priv->marker_seq);
++      signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
+       ret = 0;
+       for (;;) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+index efd1ffd..0ae13ca 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
+       while (!vmw_lag_lt(queue, us)) {
+               spin_lock(&queue->lock);
+               if (list_empty(&queue->head))
+-                      seqno = atomic_read(&dev_priv->marker_seq);
++                      seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+               else {
+                       marker = list_first_entry(&queue->head,
+                                                struct vmw_marker, head);
+diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
+index 5f962bf..b095fc5 100644
+--- a/drivers/gpu/vga/vga_switcheroo.c
++++ b/drivers/gpu/vga/vga_switcheroo.c
+@@ -1054,7 +1054,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
+  * where the power switch is separate to the device being powered down.
+  */
+ int vga_switcheroo_init_domain_pm_ops(struct device *dev,
+-                                    struct dev_pm_domain *domain)
++                                    dev_pm_domain_no_const *domain)
+ {
+       /* copy over all the bus versions */
+       if (dev->bus && dev->bus->pm) {
+@@ -1125,7 +1125,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
+  */
+ int
+ vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
+-                                               struct dev_pm_domain *domain)
++                                               dev_pm_domain_no_const *domain)
+ {
+       /* copy over all the bus versions */
+       if (dev->bus && dev->bus->pm) {
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 08f53c7..8f2d6a3 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2637,7 +2637,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
+ int hid_add_device(struct hid_device *hdev)
+ {
+-      static atomic_t id = ATOMIC_INIT(0);
++      static atomic_unchecked_t id = ATOMIC_INIT(0);
+       int ret;
+       if (WARN_ON(hdev->status & HID_STAT_ADDED))
+@@ -2681,7 +2681,7 @@ int hid_add_device(struct hid_device *hdev)
+       /* XXX hack, any other cleaner solution after the driver core
+        * is converted to allow more than 20 bytes as the device name? */
+       dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
+-                   hdev->vendor, hdev->product, atomic_inc_return(&id));
++                   hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
+       hid_debug_register(hdev, dev_name(&hdev->dev));
+       ret = device_add(&hdev->dev);
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index d6fa496..dde31aa 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -34,7 +34,7 @@ module_param(emulate_scroll_wheel, bool, 0644);
+ MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel");
+ static unsigned int scroll_speed = 32;
+-static int param_set_scroll_speed(const char *val, struct kernel_param *kp) {
++static int param_set_scroll_speed(const char *val, const struct kernel_param *kp) {
+       unsigned long speed;
+       if (!val || kstrtoul(val, 0, &speed) || speed > 63)
+               return -EINVAL;
+diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
+index 5614fee..8a6f5f6 100644
+--- a/drivers/hid/hid-sensor-custom.c
++++ b/drivers/hid/hid-sensor-custom.c
+@@ -590,7 +590,7 @@ static int hid_sensor_custom_add_attributes(struct hid_sensor_custom
+               j = 0;
+               while (j < HID_CUSTOM_TOTAL_ATTRS &&
+                      hid_custom_attrs[j].name) {
+-                      struct device_attribute *device_attr;
++                      device_attribute_no_const *device_attr;
+                       device_attr = &sensor_inst->fields[i].sd_attrs[j];
+diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
+index c13fb5b..55a3802 100644
+--- a/drivers/hid/hid-wiimote-debug.c
++++ b/drivers/hid/hid-wiimote-debug.c
+@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
+       else if (size == 0)
+               return -EIO;
+-      if (copy_to_user(u, buf, size))
++      if (size > sizeof(buf) || copy_to_user(u, buf, size))
+               return -EFAULT;
+       *off += size;
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 56dd261..493d7e0 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -398,7 +398,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+       int ret = 0;
+       next_gpadl_handle =
+-              (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
++              (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
+       ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
+       if (ret)
+@@ -749,9 +749,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+        * Adjust the size down since vmbus_channel_packet_page_buffer is the
+        * largest size we support
+        */
+-      descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
+-                        ((MAX_PAGE_BUFFER_COUNT - pagecount) *
+-                        sizeof(struct hv_page_buffer));
++      descsize = offsetof(struct vmbus_channel_packet_page_buffer, range[pagecount]);
+       packetlen = descsize + bufferlen;
+       packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index a1c086b..b45a999 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -183,6 +183,7 @@ static struct clocksource hyperv_cs_tsc = {
+ };
+ #endif
++static char hv_hypercall_page[PAGE_SIZE] __aligned(PAGE_SIZE) __used __section(".text");
+ /*
+  * hv_init - Main initialization routine.
+@@ -193,7 +194,6 @@ int hv_init(void)
+ {
+       int max_leaf;
+       union hv_x64_msr_hypercall_contents hypercall_msr;
+-      void *virtaddr = NULL;
+       memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
+       memset(hv_context.synic_message_page, 0,
+@@ -220,14 +220,9 @@ int hv_init(void)
+       /* See if the hypercall page is already set */
+       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+-      virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
+-
+-      if (!virtaddr)
+-              goto cleanup;
+-
+       hypercall_msr.enable = 1;
+-      hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
++      hypercall_msr.guest_physical_address = __phys_to_pfn(__pa(ktla_ktva((unsigned long)hv_hypercall_page)));
+       wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+       /* Confirm that hypercall page did get setup. */
+@@ -237,7 +232,7 @@ int hv_init(void)
+       if (!hypercall_msr.enable)
+               goto cleanup;
+-      hv_context.hypercall_page = virtaddr;
++      hv_context.hypercall_page = hv_hypercall_page;
+ #ifdef CONFIG_X86_64
+       if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+@@ -261,13 +256,9 @@ int hv_init(void)
+       return 0;
+ cleanup:
+-      if (virtaddr) {
+-              if (hypercall_msr.enable) {
+-                      hypercall_msr.as_uint64 = 0;
+-                      wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+-              }
+-
+-              vfree(virtaddr);
++      if (hypercall_msr.enable) {
++              hypercall_msr.as_uint64 = 0;
++              wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+       }
+       return -ENOTSUPP;
+@@ -288,7 +279,6 @@ void hv_cleanup(void)
+       if (hv_context.hypercall_page) {
+               hypercall_msr.as_uint64 = 0;
+               wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+-              vfree(hv_context.hypercall_page);
+               hv_context.hypercall_page = NULL;
+       }
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index df35fb7..fff7e4e 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -471,7 +471,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+ MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
+-static atomic_t trans_id = ATOMIC_INIT(0);
++static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
+ static int dm_ring_size = (5 * PAGE_SIZE);
+@@ -945,7 +945,7 @@ static void hot_add_req(struct work_struct *dummy)
+               pr_info("Memory hot add failed\n");
+       dm->state = DM_INITIALIZED;
+-      resp.hdr.trans_id = atomic_inc_return(&trans_id);
++      resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+       vmbus_sendpacket(dm->dev->channel, &resp,
+                       sizeof(struct dm_hot_add_response),
+                       (unsigned long)NULL,
+@@ -1026,7 +1026,7 @@ static void post_status(struct hv_dynmem_device *dm)
+       memset(&status, 0, sizeof(struct dm_status));
+       status.hdr.type = DM_STATUS_REPORT;
+       status.hdr.size = sizeof(struct dm_status);
+-      status.hdr.trans_id = atomic_inc_return(&trans_id);
++      status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+       /*
+        * The host expects the guest to report free and committed memory.
+@@ -1050,7 +1050,7 @@ static void post_status(struct hv_dynmem_device *dm)
+        * send the status. This can happen if we were interrupted
+        * after we picked our transaction ID.
+        */
+-      if (status.hdr.trans_id != atomic_read(&trans_id))
++      if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
+               return;
+       /*
+@@ -1195,7 +1195,7 @@ static void balloon_up(struct work_struct *dummy)
+                */
+               do {
+-                      bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
++                      bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+                       ret = vmbus_sendpacket(dm_device.dev->channel,
+                                               bl_resp,
+                                               bl_resp->hdr.size,
+@@ -1241,7 +1241,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
+       memset(&resp, 0, sizeof(struct dm_unballoon_response));
+       resp.hdr.type = DM_UNBALLOON_RESPONSE;
+-      resp.hdr.trans_id = atomic_inc_return(&trans_id);
++      resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+       resp.hdr.size = sizeof(struct dm_unballoon_response);
+       vmbus_sendpacket(dm_device.dev->channel, &resp,
+@@ -1301,7 +1301,7 @@ static void version_resp(struct hv_dynmem_device *dm,
+       memset(&version_req, 0, sizeof(struct dm_version_request));
+       version_req.hdr.type = DM_VERSION_REQUEST;
+       version_req.hdr.size = sizeof(struct dm_version_request);
+-      version_req.hdr.trans_id = atomic_inc_return(&trans_id);
++      version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+       version_req.version.version = dm->next_version;
+       /*
+@@ -1488,7 +1488,7 @@ static int balloon_probe(struct hv_device *dev,
+       memset(&version_req, 0, sizeof(struct dm_version_request));
+       version_req.hdr.type = DM_VERSION_REQUEST;
+       version_req.hdr.size = sizeof(struct dm_version_request);
+-      version_req.hdr.trans_id = atomic_inc_return(&trans_id);
++      version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+       version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
+       version_req.is_last_attempt = 0;
+@@ -1519,7 +1519,7 @@ static int balloon_probe(struct hv_device *dev,
+       memset(&cap_msg, 0, sizeof(struct dm_capabilities));
+       cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
+       cap_msg.hdr.size = sizeof(struct dm_capabilities);
+-      cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
++      cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
+       cap_msg.caps.cap_bits.balloon = 1;
+       cap_msg.caps.cap_bits.hot_add = 1;
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 718b5c7..c1bf203 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -566,7 +566,7 @@ enum vmbus_connect_state {
+ struct vmbus_connection {
+       enum vmbus_connect_state conn_state;
+-      atomic_t next_gpadl_handle;
++      atomic_unchecked_t next_gpadl_handle;
+       struct completion  unload_event;
+       /*
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 579bdf9..0dac21d5 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -116,7 +116,7 @@ struct sensor_template {
+                      struct device_attribute *devattr,
+                      const char *buf, size_t count);
+       int index;
+-};
++} __do_const;
+ /* Averaging interval */
+ static int update_avg_interval(struct acpi_power_meter_resource *resource)
+@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
+                         struct sensor_template *attrs)
+ {
+       struct device *dev = &resource->acpi_dev->dev;
+-      struct sensor_device_attribute *sensors =
++      sensor_device_attribute_no_const *sensors =
+               &resource->sensors[resource->num_sensors];
+       int res = 0;
+@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id __initdata pm_dmi_table[] = {
++static const struct dmi_system_id __initconst pm_dmi_table[] = {
+       {
+               enable_cap_knobs, "IBM Active Energy Manager",
+               {
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 0af7fd3..1fc50d4 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
+ {
+       struct applesmc_node_group *grp;
+       struct applesmc_dev_attr *node;
+-      struct attribute *attr;
++      attribute_no_const *attr;
+       int ret, i;
+       for (grp = groups; grp->format; grp++) {
+@@ -1242,7 +1242,7 @@ static int applesmc_dmi_match(const struct dmi_system_id *id)
+  * Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
+  * So we need to put "Apple MacBook Pro" before "Apple MacBook".
+  */
+-static __initdata struct dmi_system_id applesmc_whitelist[] = {
++static const __initconst struct dmi_system_id applesmc_whitelist[] = {
+       { applesmc_dmi_match, "Apple MacBook Air", {
+         DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+         DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index cccef87..06ce8ec 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
+ struct atk_sensor_data {
+       struct list_head list;
+       struct atk_data *data;
+-      struct device_attribute label_attr;
+-      struct device_attribute input_attr;
+-      struct device_attribute limit1_attr;
+-      struct device_attribute limit2_attr;
++      device_attribute_no_const label_attr;
++      device_attribute_no_const input_attr;
++      device_attribute_no_const limit1_attr;
++      device_attribute_no_const limit2_attr;
+       char label_attr_name[ATTR_NAME_SIZE];
+       char input_attr_name[ATTR_NAME_SIZE];
+       char limit1_attr_name[ATTR_NAME_SIZE];
+@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
+ static struct device_attribute atk_name_attr =
+               __ATTR(name, 0444, atk_name_show, NULL);
+-static void atk_init_attribute(struct device_attribute *attr, char *name,
++static void atk_init_attribute(device_attribute_no_const *attr, char *name,
+               sysfs_show_func show)
+ {
+       sysfs_attr_init(&attr->attr);
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 6a27eb2..349ed23 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block coretemp_cpu_notifier __refdata = {
++static struct notifier_block coretemp_cpu_notifier = {
+       .notifier_call = coretemp_cpu_callback,
+ };
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index acf9c03..1424826 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -886,7 +886,7 @@ static const struct i8k_config_data i8k_config_data[] = {
+       },
+ };
+-static struct dmi_system_id i8k_dmi_table[] __initdata = {
++static const struct dmi_system_id i8k_dmi_table[] __initconst = {
+       {
+               .ident = "Dell Inspiron",
+               .matches = {
+@@ -1002,7 +1002,7 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+  * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
+  * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
+  */
+-static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
++static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst = {
+       {
+               .ident = "Dell Studio XPS 8000",
+               .matches = {
+diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
+index 1f64378..2b6e615 100644
+--- a/drivers/hwmon/ibmaem.c
++++ b/drivers/hwmon/ibmaem.c
+@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
+                               const struct aem_rw_sensor_template *rw)
+ {
+       struct device *dev = &data->pdev->dev;
+-      struct sensor_device_attribute *sensors = data->sensors;
++      sensor_device_attribute_no_const *sensors = data->sensors;
+       int err;
+       /* Set up read-only sensors */
+diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
+index 8944987..839863d 100644
+--- a/drivers/hwmon/iio_hwmon.c
++++ b/drivers/hwmon/iio_hwmon.c
+@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
+       struct iio_hwmon_state *st;
+-      struct sensor_device_attribute *a;
++      sensor_device_attribute_no_const *a;
+       int ret, i;
+       int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
+       enum iio_chan_type type;
+diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
+index 559c596..3de1a96 100644
+--- a/drivers/hwmon/nct6683.c
++++ b/drivers/hwmon/nct6683.c
+@@ -404,11 +404,11 @@ nct6683_create_attr_group(struct device *dev,
+                         const struct sensor_template_group *tg,
+                         int repeat)
+ {
+-      struct sensor_device_attribute_2 *a2;
+-      struct sensor_device_attribute *a;
++      sensor_device_attribute_2_no_const *a2;
++      sensor_device_attribute_no_const *a;
+       struct sensor_device_template **t;
+       struct sensor_device_attr_u *su;
+-      struct attribute_group *group;
++      attribute_group_no_const *group;
+       struct attribute **attrs;
+       int i, j, count;
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index d087a8e..54e963a 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -1049,10 +1049,10 @@ nct6775_create_attr_group(struct device *dev,
+                         const struct sensor_template_group *tg,
+                         int repeat)
+ {
+-      struct attribute_group *group;
++      attribute_group_no_const *group;
+       struct sensor_device_attr_u *su;
+-      struct sensor_device_attribute *a;
+-      struct sensor_device_attribute_2 *a2;
++      sensor_device_attribute_no_const *a;
++      sensor_device_attribute_2_no_const *a2;
+       struct attribute **attrs;
+       struct sensor_device_template **t;
+       int i, count;
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index ba59eae..dbf694c 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -824,7 +824,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
+       return 0;
+ }
+-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
++static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
+                               const char *name,
+                               umode_t mode,
+                               ssize_t (*show)(struct device *dev,
+@@ -841,7 +841,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
+       dev_attr->store = store;
+ }
+-static void pmbus_attr_init(struct sensor_device_attribute *a,
++static void pmbus_attr_init(sensor_device_attribute_no_const *a,
+                           const char *name,
+                           umode_t mode,
+                           ssize_t (*show)(struct device *dev,
+@@ -863,7 +863,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
+                            u16 reg, u8 mask)
+ {
+       struct pmbus_boolean *boolean;
+-      struct sensor_device_attribute *a;
++      sensor_device_attribute_no_const *a;
+       boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
+       if (!boolean)
+@@ -888,7 +888,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
+                                            bool update, bool readonly)
+ {
+       struct pmbus_sensor *sensor;
+-      struct device_attribute *a;
++      device_attribute_no_const *a;
+       sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
+       if (!sensor)
+@@ -919,7 +919,7 @@ static int pmbus_add_label(struct pmbus_data *data,
+                          const char *lstring, int index)
+ {
+       struct pmbus_label *label;
+-      struct device_attribute *a;
++      device_attribute_no_const *a;
+       label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
+       if (!label)
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index a2fdbb7..e749a3c 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -170,7 +170,7 @@ struct sht15_data {
+       int                             supply_uv;
+       bool                            supply_uv_valid;
+       struct work_struct              update_supply_work;
+-      atomic_t                        interrupt_handled;
++      atomic_unchecked_t              interrupt_handled;
+ };
+ /**
+@@ -530,13 +530,13 @@ static int sht15_measurement(struct sht15_data *data,
+       ret = gpio_direction_input(data->pdata->gpio_data);
+       if (ret)
+               return ret;
+-      atomic_set(&data->interrupt_handled, 0);
++      atomic_set_unchecked(&data->interrupt_handled, 0);
+       enable_irq(gpio_to_irq(data->pdata->gpio_data));
+       if (gpio_get_value(data->pdata->gpio_data) == 0) {
+               disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+               /* Only relevant if the interrupt hasn't occurred. */
+-              if (!atomic_read(&data->interrupt_handled))
++              if (!atomic_read_unchecked(&data->interrupt_handled))
+                       schedule_work(&data->read_work);
+       }
+       ret = wait_event_timeout(data->wait_queue,
+@@ -808,7 +808,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
+       /* First disable the interrupt */
+       disable_irq_nosync(irq);
+-      atomic_inc(&data->interrupt_handled);
++      atomic_inc_unchecked(&data->interrupt_handled);
+       /* Then schedule a reading work struct */
+       if (data->state != SHT15_READING_NOTHING)
+               schedule_work(&data->read_work);
+@@ -830,11 +830,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
+                * If not, then start the interrupt again - care here as could
+                * have gone low in meantime so verify it hasn't!
+                */
+-              atomic_set(&data->interrupt_handled, 0);
++              atomic_set_unchecked(&data->interrupt_handled, 0);
+               enable_irq(gpio_to_irq(data->pdata->gpio_data));
+               /* If still not occurred or another handler was scheduled */
+               if (gpio_get_value(data->pdata->gpio_data)
+-                  || atomic_read(&data->interrupt_handled))
++                  || atomic_read_unchecked(&data->interrupt_handled))
+                       return;
+       }
+diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
+index ac91c07..8e69663 100644
+--- a/drivers/hwmon/via-cputemp.c
++++ b/drivers/hwmon/via-cputemp.c
+@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
++static struct notifier_block via_cputemp_cpu_notifier = {
+       .notifier_call = via_cputemp_cpu_callback,
+ };
+diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
+index 65e3240..e6c511d 100644
+--- a/drivers/i2c/busses/i2c-amd756-s4882.c
++++ b/drivers/i2c/busses/i2c-amd756-s4882.c
+@@ -39,7 +39,7 @@
+ extern struct i2c_adapter amd756_smbus;
+ static struct i2c_adapter *s4882_adapter;
+-static struct i2c_algorithm *s4882_algo;
++static i2c_algorithm_no_const *s4882_algo;
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(amd756_lock);
+diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
+index 96f8230..73d7616 100644
+--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
++++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
+@@ -57,7 +57,7 @@ struct dw_scl_sda_cfg {
+ };
+ struct dw_pci_controller {
+-      u32 bus_num;
++      int bus_num;
+       u32 bus_cfg;
+       u32 tx_fifo_depth;
+       u32 rx_fifo_depth;
+diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
+index 88eda09..cf40434 100644
+--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
++++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
+@@ -37,7 +37,7 @@
+ extern struct i2c_adapter *nforce2_smbus;
+ static struct i2c_adapter *s4985_adapter;
+-static struct i2c_algorithm *s4985_algo;
++static i2c_algorithm_no_const *s4985_algo;
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(nforce2_lock);
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 66f323f..af5b573 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -274,7 +274,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+                       break;
+               }
+-              data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
++              data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
+               rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
+               if (IS_ERR(rdwr_pa[i].buf)) {
+                       res = PTR_ERR(rdwr_pa[i].buf);
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index bf9a2ad..a54b1c4 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
+               alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+               if ((unsigned long)buf & alignment
+                   || blk_rq_bytes(rq) & q->dma_pad_mask
+-                  || object_is_on_stack(buf))
++                  || object_starts_on_stack(buf))
+                       drive->dma = 0;
+       }
+ }
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index 83679da..6e67e4f 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -178,7 +178,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+  * 1073741822 == 549756 MB or 48bit addressing fake drive
+  */
+-static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
++static ide_startstop_t __intentional_overflow(-1) ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+                                     sector_t block)
+ {
+       ide_hwif_t *hwif = drive->hwif;
+diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
+index d127ace..6ee866f 100644
+--- a/drivers/ide/ide.c
++++ b/drivers/ide/ide.c
+@@ -244,7 +244,7 @@ struct chs_geom {
+ static unsigned int ide_disks;
+ static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
+-static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
++static int ide_set_disk_chs(const char *str, const struct kernel_param *kp)
+ {
+       unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1;
+@@ -328,7 +328,7 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
+ static unsigned int ide_ignore_cable;
+-static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
++static int ide_set_ignore_cable(const char *s, const struct kernel_param *kp)
+ {
+       int i, j = 1;
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 67ec58f..0a78c78 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1208,36 +1208,46 @@ static void bxt_idle_state_table_update(void)
+       rdmsrl(MSR_PKGC6_IRTL, msr);
+       usec = irtl_2_usec(msr);
+       if (usec) {
+-              bxt_cstates[2].exit_latency = usec;
+-              bxt_cstates[2].target_residency = usec;
++              pax_open_kernel();
++              const_cast(bxt_cstates[2].exit_latency) = usec;
++              const_cast(bxt_cstates[2].target_residency) = usec;
++              pax_close_kernel();
+       }
+       rdmsrl(MSR_PKGC7_IRTL, msr);
+       usec = irtl_2_usec(msr);
+       if (usec) {
+-              bxt_cstates[3].exit_latency = usec;
+-              bxt_cstates[3].target_residency = usec;
++              pax_open_kernel();
++              const_cast(bxt_cstates[3].exit_latency) = usec;
++              const_cast(bxt_cstates[3].target_residency) = usec;
++              pax_close_kernel();
+       }
+       rdmsrl(MSR_PKGC8_IRTL, msr);
+       usec = irtl_2_usec(msr);
+       if (usec) {
+-              bxt_cstates[4].exit_latency = usec;
+-              bxt_cstates[4].target_residency = usec;
++              pax_open_kernel();
++              const_cast(bxt_cstates[4].exit_latency) = usec;
++              const_cast(bxt_cstates[4].target_residency) = usec;
++              pax_close_kernel();
+       }
+       rdmsrl(MSR_PKGC9_IRTL, msr);
+       usec = irtl_2_usec(msr);
+       if (usec) {
+-              bxt_cstates[5].exit_latency = usec;
+-              bxt_cstates[5].target_residency = usec;
++              pax_open_kernel();
++              const_cast(bxt_cstates[5].exit_latency) = usec;
++              const_cast(bxt_cstates[5].target_residency) = usec;
++              pax_close_kernel();
+       }
+       rdmsrl(MSR_PKGC10_IRTL, msr);
+       usec = irtl_2_usec(msr);
+       if (usec) {
+-              bxt_cstates[6].exit_latency = usec;
+-              bxt_cstates[6].target_residency = usec;
++              pax_open_kernel();
++              const_cast(bxt_cstates[6].exit_latency) = usec;
++              const_cast(bxt_cstates[6].target_residency) = usec;
++              pax_close_kernel();
+       }
+ }
+@@ -1280,8 +1290,10 @@ static void sklh_idle_state_table_update(void)
+                       return;
+       }
+-      skl_cstates[5].disabled = 1;    /* C8-SKL */
+-      skl_cstates[6].disabled = 1;    /* C9-SKL */
++      pax_open_kernel();
++      const_cast(skl_cstates[5].disabled) = 1;        /* C8-SKL */
++      const_cast(skl_cstates[6].disabled) = 1;        /* C9-SKL */
++      pax_close_kernel();
+ }
+ /*
+  * intel_idle_state_table_update()
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index d2b8899..5b0e8f5 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -769,7 +769,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
+ }
+ static
+-int __iio_device_attr_init(struct device_attribute *dev_attr,
++int __iio_device_attr_init(device_attribute_no_const *dev_attr,
+                          const char *postfix,
+                          struct iio_chan_spec const *chan,
+                          ssize_t (*readfunc)(struct device *dev,
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index c995255..7de0b49 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
+ struct cm_counter_group {
+       struct kobject obj;
+-      atomic_long_t counter[CM_ATTR_COUNT];
++      atomic_long_unchecked_t counter[CM_ATTR_COUNT];
+ };
+ struct cm_counter_attribute {
+@@ -1432,7 +1432,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
+ static void cm_format_rej(struct cm_rej_msg *rej_msg,
+                         struct cm_id_private *cm_id_priv,
+                         enum ib_cm_rej_reason reason,
+-                        void *ari,
++                        const void *ari,
+                         u8 ari_length,
+                         const void *private_data,
+                         u8 private_data_len)
+@@ -1476,7 +1476,7 @@ static void cm_dup_req_handler(struct cm_work *work,
+       struct ib_mad_send_buf *msg = NULL;
+       int ret;
+-      atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++      atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                       counter[CM_REQ_COUNTER]);
+       /* Quick state check to discard duplicate REQs. */
+@@ -1884,7 +1884,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
+       if (!cm_id_priv)
+               return;
+-      atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++      atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                       counter[CM_REP_COUNTER]);
+       ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+       if (ret)
+@@ -2051,7 +2051,7 @@ static int cm_rtu_handler(struct cm_work *work)
+       if (cm_id_priv->id.state != IB_CM_REP_SENT &&
+           cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+               spin_unlock_irq(&cm_id_priv->lock);
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_RTU_COUNTER]);
+               goto out;
+       }
+@@ -2234,7 +2234,7 @@ static int cm_dreq_handler(struct cm_work *work)
+       cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
+                                  dreq_msg->local_comm_id);
+       if (!cm_id_priv) {
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_DREQ_COUNTER]);
+               cm_issue_drep(work->port, work->mad_recv_wc);
+               return -EINVAL;
+@@ -2259,7 +2259,7 @@ static int cm_dreq_handler(struct cm_work *work)
+       case IB_CM_MRA_REP_RCVD:
+               break;
+       case IB_CM_TIMEWAIT:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_DREQ_COUNTER]);
+               if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+                       goto unlock;
+@@ -2273,7 +2273,7 @@ static int cm_dreq_handler(struct cm_work *work)
+                       cm_free_msg(msg);
+               goto deref;
+       case IB_CM_DREQ_RCVD:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_DREQ_COUNTER]);
+               goto unlock;
+       default:
+@@ -2336,12 +2336,13 @@ out:
+ }
+ int ib_send_cm_rej(struct ib_cm_id *cm_id,
+-                 enum ib_cm_rej_reason reason,
+-                 void *ari,
++                 int _reason,
++                 const void *ari,
+                  u8 ari_length,
+                  const void *private_data,
+                  u8 private_data_len)
+ {
++      enum ib_cm_rej_reason reason = _reason;
+       struct cm_id_private *cm_id_priv;
+       struct ib_mad_send_buf *msg;
+       unsigned long flags;
+@@ -2640,7 +2641,7 @@ static int cm_mra_handler(struct cm_work *work)
+                   ib_modify_mad(cm_id_priv->av.port->mad_agent,
+                                 cm_id_priv->msg, timeout)) {
+                       if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+-                              atomic_long_inc(&work->port->
++                              atomic_long_inc_unchecked(&work->port->
+                                               counter_group[CM_RECV_DUPLICATES].
+                                               counter[CM_MRA_COUNTER]);
+                       goto out;
+@@ -2649,7 +2650,7 @@ static int cm_mra_handler(struct cm_work *work)
+               break;
+       case IB_CM_MRA_REQ_RCVD:
+       case IB_CM_MRA_REP_RCVD:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_MRA_COUNTER]);
+               /* fall through */
+       default:
+@@ -2811,7 +2812,7 @@ static int cm_lap_handler(struct cm_work *work)
+       case IB_CM_LAP_IDLE:
+               break;
+       case IB_CM_MRA_LAP_SENT:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_LAP_COUNTER]);
+               if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+                       goto unlock;
+@@ -2827,7 +2828,7 @@ static int cm_lap_handler(struct cm_work *work)
+                       cm_free_msg(msg);
+               goto deref;
+       case IB_CM_LAP_RCVD:
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_LAP_COUNTER]);
+               goto unlock;
+       default:
+@@ -2859,7 +2860,7 @@ deref:   cm_deref_id(cm_id_priv);
+ static void cm_format_apr(struct cm_apr_msg *apr_msg,
+                         struct cm_id_private *cm_id_priv,
+                         enum ib_cm_apr_status status,
+-                        void *info,
++                        const void *info,
+                         u8 info_length,
+                         const void *private_data,
+                         u8 private_data_len)
+@@ -2879,12 +2880,13 @@ static void cm_format_apr(struct cm_apr_msg *apr_msg,
+ }
+ int ib_send_cm_apr(struct ib_cm_id *cm_id,
+-                 enum ib_cm_apr_status status,
+-                 void *info,
++                 int _status,
++                 const void *info,
+                  u8 info_length,
+                  const void *private_data,
+                  u8 private_data_len)
+ {
++      enum ib_cm_apr_status status = _status;
+       struct cm_id_private *cm_id_priv;
+       struct ib_mad_send_buf *msg;
+       unsigned long flags;
+@@ -3113,7 +3115,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
+       cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+       if (cur_cm_id_priv) {
+               spin_unlock_irq(&cm.lock);
+-              atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++              atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+                               counter[CM_SIDR_REQ_COUNTER]);
+               goto out; /* Duplicate message. */
+       }
+@@ -3327,10 +3329,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
+       if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+               msg->retries = 1;
+-      atomic_long_add(1 + msg->retries,
++      atomic_long_add_unchecked(1 + msg->retries,
+                       &port->counter_group[CM_XMIT].counter[attr_index]);
+       if (msg->retries)
+-              atomic_long_add(msg->retries,
++              atomic_long_add_unchecked(msg->retries,
+                               &port->counter_group[CM_XMIT_RETRIES].
+                               counter[attr_index]);
+@@ -3557,7 +3559,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+       }
+       attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
+-      atomic_long_inc(&port->counter_group[CM_RECV].
++      atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
+                       counter[attr_id - CM_ATTR_ID_OFFSET]);
+       work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
+@@ -3764,7 +3766,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
+       cm_attr = container_of(attr, struct cm_counter_attribute, attr);
+       return sprintf(buf, "%ld\n",
+-                     atomic_long_read(&group->counter[cm_attr->index]));
++                     atomic_long_read_unchecked(&group->counter[cm_attr->index]));
+ }
+ static const struct sysfs_ops cm_counter_ops = {
+diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
+index cdbb1f1..7ed4277 100644
+--- a/drivers/infiniband/core/fmr_pool.c
++++ b/drivers/infiniband/core/fmr_pool.c
+@@ -98,8 +98,8 @@ struct ib_fmr_pool {
+       struct task_struct       *thread;
+-      atomic_t                  req_ser;
+-      atomic_t                  flush_ser;
++      atomic_unchecked_t        req_ser;
++      atomic_unchecked_t        flush_ser;
+       wait_queue_head_t         force_wait;
+ };
+@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+       struct ib_fmr_pool *pool = pool_ptr;
+       do {
+-              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++              if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
+                       ib_fmr_batch_release(pool);
+-                      atomic_inc(&pool->flush_ser);
++                      atomic_inc_unchecked(&pool->flush_ser);
+                       wake_up_interruptible(&pool->force_wait);
+                       if (pool->flush_function)
+@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+-              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++              if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
+                   !kthread_should_stop())
+                       schedule();
+               __set_current_state(TASK_RUNNING);
+@@ -262,8 +262,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
+       pool->dirty_watermark = params->dirty_watermark;
+       pool->dirty_len       = 0;
+       spin_lock_init(&pool->pool_lock);
+-      atomic_set(&pool->req_ser,   0);
+-      atomic_set(&pool->flush_ser, 0);
++      atomic_set_unchecked(&pool->req_ser,   0);
++      atomic_set_unchecked(&pool->flush_ser, 0);
+       init_waitqueue_head(&pool->force_wait);
+       pool->thread = kthread_run(ib_fmr_cleanup_thread,
+@@ -388,11 +388,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
+       }
+       spin_unlock_irq(&pool->pool_lock);
+-      serial = atomic_inc_return(&pool->req_ser);
++      serial = atomic_inc_return_unchecked(&pool->req_ser);
+       wake_up_process(pool->thread);
+       if (wait_event_interruptible(pool->force_wait,
+-                                   atomic_read(&pool->flush_ser) - serial >= 0))
++                                   atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
+               return -EINTR;
+       return 0;
+@@ -502,7 +502,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
+               } else {
+                       list_add_tail(&fmr->list, &pool->dirty_list);
+                       if (++pool->dirty_len >= pool->dirty_watermark) {
+-                              atomic_inc(&pool->req_ser);
++                              atomic_inc_unchecked(&pool->req_ser);
+                               wake_up_process(pool->thread);
+                       }
+               }
+diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
+index 10469b0..e8b45f3 100644
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -176,11 +176,10 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+                       }
+                       {
+-                              struct netlink_dump_control c = {
++                              netlink_dump_control_no_const c = {
+                                       .dump = client->cb_table[op].dump,
+-                                      .module = client->cb_table[op].module,
+                               };
+-                              return netlink_dump_start(nls, skb, nlh, &c);
++                              return __netlink_dump_start(nls, skb, nlh, &c, NULL, client->cb_table[op].module);
+                       }
+               }
+       }
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index 15defef..9cd7c28 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -894,7 +894,7 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
+ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
+                          u8 port_num)
+ {
+-      struct attribute_group *hsag;
++      attribute_group_no_const *hsag;
+       struct rdma_hw_stats *stats;
+       int i, ret;
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index 7713ef0..0bb2981 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -920,14 +920,14 @@ static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
+                              const char __user *inbuf,
+                              int in_len, int out_len)
+ {
+-      return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
++      return ib_ucm_send_info(file, inbuf, in_len, ib_send_cm_rej);
+ }
+ static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
+                              const char __user *inbuf,
+                              int in_len, int out_len)
+ {
+-      return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
++      return ib_ucm_send_info(file, inbuf, in_len, ib_send_cm_apr);
+ }
+ static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index f664731..b46744f 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -974,6 +974,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
+       if (copy_from_user(&cmd, buf, sizeof cmd))
+               return -EFAULT;
++      if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
++              return -EFAULT;
++
+       INIT_UDATA(&udata, buf + sizeof cmd,
+                  (unsigned long) cmd.response + sizeof resp,
+                  in_len - sizeof cmd, out_len - sizeof resp);
+diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
+index 3c4b212..bf4f82b 100644
+--- a/drivers/infiniband/hw/cxgb4/device.c
++++ b/drivers/infiniband/hw/cxgb4/device.c
+@@ -111,7 +111,7 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
+       if (!wq->rdev->wr_log)
+               return;
+-      idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
++      idx = (atomic_inc_return_unchecked(&wq->rdev->wr_log_idx) - 1) &
+               (wq->rdev->wr_log_size - 1);
+       le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
+       getnstimeofday(&le.poll_host_ts);
+@@ -143,7 +143,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
+ #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
+-      idx = atomic_read(&dev->rdev.wr_log_idx) &
++      idx = atomic_read_unchecked(&dev->rdev.wr_log_idx) &
+               (dev->rdev.wr_log_size - 1);
+       end = idx - 1;
+       if (end < 0)
+@@ -840,7 +840,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
+                                      sizeof(*rdev->wr_log), GFP_KERNEL);
+               if (rdev->wr_log) {
+                       rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
+-                      atomic_set(&rdev->wr_log_idx, 0);
++                      atomic_set_unchecked(&rdev->wr_log_idx, 0);
+               } else {
+                       pr_err(MOD "error allocating wr_log. Logging disabled\n");
+               }
+diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+index 4b83b84..7d402e0 100644
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -180,7 +180,7 @@ struct c4iw_rdev {
+       struct c4iw_stats stats;
+       struct c4iw_hw_queue hw_queue;
+       struct t4_dev_status_page *status_page;
+-      atomic_t wr_log_idx;
++      atomic_unchecked_t wr_log_idx;
+       struct wr_log_entry *wr_log;
+       int wr_log_size;
+ };
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 0b91b0f..866b3b9 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+       int err;
+       struct fw_ri_tpte tpt;
+       u32 stag_idx;
+-      static atomic_t key;
++      static atomic_unchecked_t key;
+       if (c4iw_fatal_error(rdev))
+               return -EIO;
+@@ -287,7 +287,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+               if (rdev->stats.stag.cur > rdev->stats.stag.max)
+                       rdev->stats.stag.max = rdev->stats.stag.cur;
+               mutex_unlock(&rdev->stats.lock);
+-              *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
++              *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
+       }
+       PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+            __func__, stag_state, type, pdid, stag_idx);
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index 89c68da..addb2ad 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -537,7 +537,7 @@ static void tune_pcie_caps(struct hfi1_devdata *dd)
+  * PCI error infrastructure, registered via pci
+  */
+ static pci_ers_result_t
+-pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+       pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+index 2c4b4d0..b45e806 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+@@ -4604,46 +4604,46 @@ static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+ }
+ static struct i40iw_cqp_ops iw_cqp_ops = {
+-      i40iw_sc_cqp_init,
+-      i40iw_sc_cqp_create,
+-      i40iw_sc_cqp_post_sq,
+-      i40iw_sc_cqp_get_next_send_wqe,
+-      i40iw_sc_cqp_destroy,
+-      i40iw_sc_poll_for_cqp_op_done
++      .cqp_init = i40iw_sc_cqp_init,
++      .cqp_create = i40iw_sc_cqp_create,
++      .cqp_post_sq = i40iw_sc_cqp_post_sq,
++      .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
++      .cqp_destroy = i40iw_sc_cqp_destroy,
++      .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
+ };
+ static struct i40iw_ccq_ops iw_ccq_ops = {
+-      i40iw_sc_ccq_init,
+-      i40iw_sc_ccq_create,
+-      i40iw_sc_ccq_destroy,
+-      i40iw_sc_ccq_create_done,
+-      i40iw_sc_ccq_get_cqe_info,
+-      i40iw_sc_ccq_arm
++      .ccq_init = i40iw_sc_ccq_init,
++      .ccq_create = i40iw_sc_ccq_create,
++      .ccq_destroy = i40iw_sc_ccq_destroy,
++      .ccq_create_done = i40iw_sc_ccq_create_done,
++      .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
++      .ccq_arm = i40iw_sc_ccq_arm
+ };
+ static struct i40iw_ceq_ops iw_ceq_ops = {
+-      i40iw_sc_ceq_init,
+-      i40iw_sc_ceq_create,
+-      i40iw_sc_cceq_create_done,
+-      i40iw_sc_cceq_destroy_done,
+-      i40iw_sc_cceq_create,
+-      i40iw_sc_ceq_destroy,
+-      i40iw_sc_process_ceq
++      .ceq_init = i40iw_sc_ceq_init,
++      .ceq_create = i40iw_sc_ceq_create,
++      .cceq_create_done = i40iw_sc_cceq_create_done,
++      .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
++      .cceq_create = i40iw_sc_cceq_create,
++      .ceq_destroy = i40iw_sc_ceq_destroy,
++      .process_ceq = i40iw_sc_process_ceq
+ };
+ static struct i40iw_aeq_ops iw_aeq_ops = {
+-      i40iw_sc_aeq_init,
+-      i40iw_sc_aeq_create,
+-      i40iw_sc_aeq_destroy,
+-      i40iw_sc_get_next_aeqe,
+-      i40iw_sc_repost_aeq_entries,
+-      i40iw_sc_aeq_create_done,
+-      i40iw_sc_aeq_destroy_done
++      .aeq_init = i40iw_sc_aeq_init,
++      .aeq_create = i40iw_sc_aeq_create,
++      .aeq_destroy = i40iw_sc_aeq_destroy,
++      .get_next_aeqe = i40iw_sc_get_next_aeqe,
++      .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
++      .aeq_create_done = i40iw_sc_aeq_create_done,
++      .aeq_destroy_done = i40iw_sc_aeq_destroy_done
+ };
+ /* iwarp pd ops */
+ static struct i40iw_pd_ops iw_pd_ops = {
+-      i40iw_sc_pd_init,
++      .pd_init = i40iw_sc_pd_init,
+ };
+ static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+@@ -4662,61 +4662,59 @@ static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+ };
+ static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
+-      i40iw_sc_cq_init,
+-      i40iw_sc_cq_create,
+-      i40iw_sc_cq_destroy,
+-      i40iw_sc_cq_modify,
++      .cq_init = i40iw_sc_cq_init,
++      .cq_create = i40iw_sc_cq_create,
++      .cq_destroy = i40iw_sc_cq_destroy,
++      .cq_modify = i40iw_sc_cq_modify,
+ };
+ static struct i40iw_mr_ops iw_mr_ops = {
+-      i40iw_sc_alloc_stag,
+-      i40iw_sc_mr_reg_non_shared,
+-      i40iw_sc_mr_reg_shared,
+-      i40iw_sc_dealloc_stag,
+-      i40iw_sc_query_stag,
+-      i40iw_sc_mw_alloc
++      .alloc_stag = i40iw_sc_alloc_stag,
++      .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
++      .mr_reg_shared = i40iw_sc_mr_reg_shared,
++      .dealloc_stag = i40iw_sc_dealloc_stag,
++      .query_stag = i40iw_sc_query_stag,
++      .mw_alloc = i40iw_sc_mw_alloc
+ };
+ static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
+-      i40iw_sc_manage_push_page,
+-      i40iw_sc_manage_hmc_pm_func_table,
+-      i40iw_sc_set_hmc_resource_profile,
+-      i40iw_sc_commit_fpm_values,
+-      i40iw_sc_query_fpm_values,
+-      i40iw_sc_static_hmc_pages_allocated,
+-      i40iw_sc_add_arp_cache_entry,
+-      i40iw_sc_del_arp_cache_entry,
+-      i40iw_sc_query_arp_cache_entry,
+-      i40iw_sc_manage_apbvt_entry,
+-      i40iw_sc_manage_qhash_table_entry,
+-      i40iw_sc_alloc_local_mac_ipaddr_entry,
+-      i40iw_sc_add_local_mac_ipaddr_entry,
+-      i40iw_sc_del_local_mac_ipaddr_entry,
+-      i40iw_sc_cqp_nop,
+-      i40iw_sc_commit_fpm_values_done,
+-      i40iw_sc_query_fpm_values_done,
+-      i40iw_sc_manage_hmc_pm_func_table_done,
+-      i40iw_sc_suspend_qp,
+-      i40iw_sc_resume_qp
++      .manage_push_page = i40iw_sc_manage_push_page,
++      .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
++      .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
++      .commit_fpm_values = i40iw_sc_commit_fpm_values,
++      .query_fpm_values = i40iw_sc_query_fpm_values,
++      .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
++      .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
++      .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
++      .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
++      .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
++      .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
++      .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
++      .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
++      .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
++      .cqp_nop = i40iw_sc_cqp_nop,
++      .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
++      .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
++      .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
++      .update_suspend_qp = i40iw_sc_suspend_qp,
++      .update_resume_qp = i40iw_sc_resume_qp
+ };
+ static struct i40iw_hmc_ops iw_hmc_ops = {
+-      i40iw_sc_init_iw_hmc,
+-      i40iw_sc_parse_fpm_query_buf,
+-      i40iw_sc_configure_iw_fpm,
+-      i40iw_sc_parse_fpm_commit_buf,
+-      i40iw_sc_create_hmc_obj,
+-      i40iw_sc_del_hmc_obj,
+-      NULL,
+-      NULL
++      .init_iw_hmc = i40iw_sc_init_iw_hmc,
++      .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
++      .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
++      .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
++      .create_hmc_object = i40iw_sc_create_hmc_obj,
++      .del_hmc_object = i40iw_sc_del_hmc_obj
+ };
+ static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
+-      i40iw_hw_stat_init,
+-      i40iw_hw_stat_read_32,
+-      i40iw_hw_stat_read_64,
+-      i40iw_hw_stat_read_all,
+-      i40iw_hw_stat_refresh_all
++      .iw_hw_stat_init = i40iw_hw_stat_init,
++      .iw_hw_stat_read_32 = i40iw_hw_stat_read_32,
++      .iw_hw_stat_read_64 = i40iw_hw_stat_read_64,
++      .iw_hw_stat_read_all = i40iw_hw_stat_read_all,
++      .iw_hw_stat_refresh_all = i40iw_hw_stat_refresh_all
+ };
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+index 4d28c3c..ec6b0b7 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
+@@ -919,29 +919,29 @@ enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data
+ }
+ static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+-      i40iw_qp_post_wr,
+-      i40iw_qp_ring_push_db,
+-      i40iw_rdma_write,
+-      i40iw_rdma_read,
+-      i40iw_send,
+-      i40iw_inline_rdma_write,
+-      i40iw_inline_send,
+-      i40iw_stag_local_invalidate,
+-      i40iw_mw_bind,
+-      i40iw_post_receive,
+-      i40iw_nop
++      .iw_qp_post_wr = i40iw_qp_post_wr,
++      .iw_qp_ring_push_db = i40iw_qp_ring_push_db,
++      .iw_rdma_write = i40iw_rdma_write,
++      .iw_rdma_read = i40iw_rdma_read,
++      .iw_send = i40iw_send,
++      .iw_inline_rdma_write = i40iw_inline_rdma_write,
++      .iw_inline_send = i40iw_inline_send,
++      .iw_stag_local_invalidate = i40iw_stag_local_invalidate,
++      .iw_mw_bind = i40iw_mw_bind,
++      .iw_post_receive = i40iw_post_receive,
++      .iw_post_nop = i40iw_nop
+ };
+ static struct i40iw_cq_ops iw_cq_ops = {
+-      i40iw_cq_request_notification,
+-      i40iw_cq_poll_completion,
+-      i40iw_cq_post_entries,
+-      i40iw_clean_cq
++      .iw_cq_request_notification = i40iw_cq_request_notification,
++      .iw_cq_poll_completion = i40iw_cq_poll_completion,
++      .iw_cq_post_entries = i40iw_cq_post_entries,
++      .iw_cq_clean = i40iw_clean_cq
+ };
+ static struct i40iw_device_uk_ops iw_device_uk_ops = {
+-      i40iw_cq_uk_init,
+-      i40iw_qp_uk_init,
++      .iwarp_cq_uk_init = i40iw_cq_uk_init,
++      .iwarp_qp_uk_init = i40iw_qp_uk_init,
+ };
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
+index 276bcef..b2e3684 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
+@@ -343,7 +343,7 @@ struct i40iw_device_uk_ops {
+ struct i40iw_dev_uk {
+       struct i40iw_device_uk_ops ops_uk;
+-};
++} __no_const;
+ struct i40iw_sq_uk_wr_trk_info {
+       u64 wrid;
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index 0f21c3a..257e0a7 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -99,7 +99,7 @@ __be64 mlx4_ib_gen_node_guid(void)
+ __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
+ {
+-      return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
++      return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
+               cpu_to_be64(0xff00000000000000LL);
+ }
+diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
+index 097bfcc..06fe83a 100644
+--- a/drivers/infiniband/hw/mlx4/mcg.c
++++ b/drivers/infiniband/hw/mlx4/mcg.c
+@@ -1043,7 +1043,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
+ {
+       char name[20];
+-      atomic_set(&ctx->tid, 0);
++      atomic_set_unchecked(&ctx->tid, 0);
+       sprintf(name, "mlx4_ib_mcg%d", ctx->port);
+       ctx->mcg_wq = create_singlethread_workqueue(name);
+       if (!ctx->mcg_wq)
+diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+index 686ab48..736a1d7 100644
+--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+@@ -457,7 +457,7 @@ struct mlx4_ib_demux_ctx {
+       struct list_head        mcg_mgid0_list;
+       struct workqueue_struct *mcg_wq;
+       struct mlx4_ib_demux_pv_ctx **tun;
+-      atomic_t tid;
++      atomic_unchecked_t tid;
+       int    flushing; /* flushing the work queue */
+ };
+diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
+index c7f49bb..6a021bb 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
++++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
+@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
+       mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
+ }
+-int mthca_QUERY_FW(struct mthca_dev *dev)
++int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
+ {
+       struct mthca_mailbox *mailbox;
+       u32 *outbox;
+@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                            CMD_TIME_CLASS_B);
+ }
+-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
++int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                   int num_mtt)
+ {
+       return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
+@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
+                        0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
+ }
+-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
++int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+                  int eq_num)
+ {
+       return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
+@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
+                        CMD_TIME_CLASS_B);
+ }
+-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
++int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
+                 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+                 const void *in_mad, void *response_mad)
+ {
+diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
+index ded76c1..0cf0a08 100644
+--- a/drivers/infiniband/hw/mthca/mthca_main.c
++++ b/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -692,7 +692,7 @@ err_close:
+       return err;
+ }
+-static int mthca_setup_hca(struct mthca_dev *dev)
++static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
+ {
+       int err;
+diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
+index ed9a989..6aa5dc2 100644
+--- a/drivers/infiniband/hw/mthca/mthca_mr.c
++++ b/drivers/infiniband/hw/mthca/mthca_mr.c
+@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
+  * through the bitmaps)
+  */
+-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
++static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
+ {
+       int o;
+       int m;
+@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
+               return key;
+ }
+-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
++int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+                  u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
+ {
+       struct mthca_mailbox *mailbox;
+@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
+       return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
+ }
+-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
++int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
+                       u64 *buffer_list, int buffer_size_shift,
+                       int list_len, u64 iova, u64 total_size,
+                       u32 access, struct mthca_mr *mr)
+diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
+index da2335f..d6f4677 100644
+--- a/drivers/infiniband/hw/mthca/mthca_provider.c
++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
+@@ -772,7 +772,7 @@ unlock:
+       return 0;
+ }
+-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
++static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+ {
+       struct mthca_dev *dev = to_mdev(ibcq->device);
+       struct mthca_cq *cq = to_mcq(ibcq);
+diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
+index 35cbb17..d336a68 100644
+--- a/drivers/infiniband/hw/nes/nes.c
++++ b/drivers/infiniband/hw/nes/nes.c
+@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
+ LIST_HEAD(nes_adapter_list);
+ static LIST_HEAD(nes_dev_list);
+-atomic_t qps_destroyed;
++atomic_unchecked_t qps_destroyed;
+ static unsigned int ee_flsh_adapter;
+ static unsigned int sysfs_nonidx_addr;
+@@ -268,7 +268,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
+       struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
+       struct nes_adapter *nesadapter = nesdev->nesadapter;
+-      atomic_inc(&qps_destroyed);
++      atomic_inc_unchecked(&qps_destroyed);
+       /* Free the control structures */
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index bd9d132..70d84f4 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
+ extern unsigned int wqm_quanta;
+ extern struct list_head nes_adapter_list;
+-extern atomic_t cm_connects;
+-extern atomic_t cm_accepts;
+-extern atomic_t cm_disconnects;
+-extern atomic_t cm_closes;
+-extern atomic_t cm_connecteds;
+-extern atomic_t cm_connect_reqs;
+-extern atomic_t cm_rejects;
+-extern atomic_t mod_qp_timouts;
+-extern atomic_t qps_created;
+-extern atomic_t qps_destroyed;
+-extern atomic_t sw_qps_destroyed;
++extern atomic_unchecked_t cm_connects;
++extern atomic_unchecked_t cm_accepts;
++extern atomic_unchecked_t cm_disconnects;
++extern atomic_unchecked_t cm_closes;
++extern atomic_unchecked_t cm_connecteds;
++extern atomic_unchecked_t cm_connect_reqs;
++extern atomic_unchecked_t cm_rejects;
++extern atomic_unchecked_t mod_qp_timouts;
++extern atomic_unchecked_t qps_created;
++extern atomic_unchecked_t qps_destroyed;
++extern atomic_unchecked_t sw_qps_destroyed;
+ extern u32 mh_detected;
+ extern u32 mh_pauses_sent;
+ extern u32 cm_packets_sent;
+@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
+ extern u32 cm_packets_received;
+ extern u32 cm_packets_dropped;
+ extern u32 cm_packets_retrans;
+-extern atomic_t cm_listens_created;
+-extern atomic_t cm_listens_destroyed;
++extern atomic_unchecked_t cm_listens_created;
++extern atomic_unchecked_t cm_listens_destroyed;
+ extern u32 cm_backlog_drops;
+-extern atomic_t cm_loopbacks;
+-extern atomic_t cm_nodes_created;
+-extern atomic_t cm_nodes_destroyed;
+-extern atomic_t cm_accel_dropped_pkts;
+-extern atomic_t cm_resets_recvd;
+-extern atomic_t pau_qps_created;
+-extern atomic_t pau_qps_destroyed;
++extern atomic_unchecked_t cm_loopbacks;
++extern atomic_unchecked_t cm_nodes_created;
++extern atomic_unchecked_t cm_nodes_destroyed;
++extern atomic_unchecked_t cm_accel_dropped_pkts;
++extern atomic_unchecked_t cm_resets_recvd;
++extern atomic_unchecked_t pau_qps_created;
++extern atomic_unchecked_t pau_qps_destroyed;
+ extern u32 int_mod_timer_init;
+ extern u32 int_mod_cq_depth_256;
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 7f0aa23..3c20939 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
+ u32 cm_packets_retrans;
+ u32 cm_packets_created;
+ u32 cm_packets_received;
+-atomic_t cm_listens_created;
+-atomic_t cm_listens_destroyed;
++atomic_unchecked_t cm_listens_created;
++atomic_unchecked_t cm_listens_destroyed;
+ u32 cm_backlog_drops;
+-atomic_t cm_loopbacks;
+-atomic_t cm_nodes_created;
+-atomic_t cm_nodes_destroyed;
+-atomic_t cm_accel_dropped_pkts;
+-atomic_t cm_resets_recvd;
++atomic_unchecked_t cm_loopbacks;
++atomic_unchecked_t cm_nodes_created;
++atomic_unchecked_t cm_nodes_destroyed;
++atomic_unchecked_t cm_accel_dropped_pkts;
++atomic_unchecked_t cm_resets_recvd;
+ static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
+@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
+ /* instance of function pointers for client API */
+ /* set address of this instance to cm_core->cm_ops at cm_core alloc */
+ static const struct nes_cm_ops nes_cm_api = {
+-      mini_cm_accelerated,
+-      mini_cm_listen,
+-      mini_cm_del_listen,
+-      mini_cm_connect,
+-      mini_cm_close,
+-      mini_cm_accept,
+-      mini_cm_reject,
+-      mini_cm_recv_pkt,
+-      mini_cm_dealloc_core,
+-      mini_cm_get,
+-      mini_cm_set
++      .accelerated = mini_cm_accelerated,
++      .listen = mini_cm_listen,
++      .stop_listener = mini_cm_del_listen,
++      .connect = mini_cm_connect,
++      .close = mini_cm_close,
++      .accept = mini_cm_accept,
++      .reject = mini_cm_reject,
++      .recv_pkt = mini_cm_recv_pkt,
++      .destroy_cm_core = mini_cm_dealloc_core,
++      .get = mini_cm_get,
++      .set = mini_cm_set
+ };
+ static struct nes_cm_core *g_cm_core;
+-atomic_t cm_connects;
+-atomic_t cm_accepts;
+-atomic_t cm_disconnects;
+-atomic_t cm_closes;
+-atomic_t cm_connecteds;
+-atomic_t cm_connect_reqs;
+-atomic_t cm_rejects;
++atomic_unchecked_t cm_connects;
++atomic_unchecked_t cm_accepts;
++atomic_unchecked_t cm_disconnects;
++atomic_unchecked_t cm_closes;
++atomic_unchecked_t cm_connecteds;
++atomic_unchecked_t cm_connect_reqs;
++atomic_unchecked_t cm_rejects;
+ int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
+ {
+@@ -1333,7 +1333,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
+               kfree(listener);
+               listener = NULL;
+               ret = 0;
+-              atomic_inc(&cm_listens_destroyed);
++              atomic_inc_unchecked(&cm_listens_destroyed);
+       } else {
+               spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+       }
+@@ -1537,7 +1537,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+                 cm_node->rem_mac);
+       add_hte_node(cm_core, cm_node);
+-      atomic_inc(&cm_nodes_created);
++      atomic_inc_unchecked(&cm_nodes_created);
+       return cm_node;
+ }
+@@ -1596,7 +1596,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+       }
+       atomic_dec(&cm_core->node_cnt);
+-      atomic_inc(&cm_nodes_destroyed);
++      atomic_inc_unchecked(&cm_nodes_destroyed);
+       nesqp = cm_node->nesqp;
+       if (nesqp) {
+               nesqp->cm_node = NULL;
+@@ -1660,7 +1660,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
+ static void drop_packet(struct sk_buff *skb)
+ {
+-      atomic_inc(&cm_accel_dropped_pkts);
++      atomic_inc_unchecked(&cm_accel_dropped_pkts);
+       dev_kfree_skb_any(skb);
+ }
+@@ -1723,7 +1723,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ {
+       int     reset = 0;      /* whether to send reset in case of err.. */
+-      atomic_inc(&cm_resets_recvd);
++      atomic_inc_unchecked(&cm_resets_recvd);
+       nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
+                       " refcnt=%d\n", cm_node, cm_node->state,
+                       atomic_read(&cm_node->ref_count));
+@@ -2369,7 +2369,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+                               rem_ref_cm_node(cm_node->cm_core, cm_node);
+                               return NULL;
+                       }
+-                      atomic_inc(&cm_loopbacks);
++                      atomic_inc_unchecked(&cm_loopbacks);
+                       loopbackremotenode->loopbackpartner = cm_node;
+                       loopbackremotenode->tcp_cntxt.rcv_wscale =
+                               NES_CM_DEFAULT_RCV_WND_SCALE;
+@@ -2644,7 +2644,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
+                               nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
+                       else {
+                               rem_ref_cm_node(cm_core, cm_node);
+-                              atomic_inc(&cm_accel_dropped_pkts);
++                              atomic_inc_unchecked(&cm_accel_dropped_pkts);
+                               dev_kfree_skb_any(skb);
+                       }
+                       break;
+@@ -2965,7 +2965,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
+       if ((cm_id) && (cm_id->event_handler)) {
+               if (issue_disconn) {
+-                      atomic_inc(&cm_disconnects);
++                      atomic_inc_unchecked(&cm_disconnects);
+                       cm_event.event = IW_CM_EVENT_DISCONNECT;
+                       cm_event.status = disconn_status;
+                       cm_event.local_addr = cm_id->m_local_addr;
+@@ -2987,7 +2987,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
+               }
+               if (issue_close) {
+-                      atomic_inc(&cm_closes);
++                      atomic_inc_unchecked(&cm_closes);
+                       nes_disconnect(nesqp, 1);
+                       cm_id->provider_data = nesqp;
+@@ -3124,7 +3124,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+       nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
+               nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
+-      atomic_inc(&cm_accepts);
++      atomic_inc_unchecked(&cm_accepts);
+       nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+                       netdev_refcnt_read(nesvnic->netdev));
+@@ -3320,7 +3320,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+       struct nes_cm_core *cm_core;
+       u8 *start_buff;
+-      atomic_inc(&cm_rejects);
++      atomic_inc_unchecked(&cm_rejects);
+       cm_node = (struct nes_cm_node *)cm_id->provider_data;
+       loopback = cm_node->loopbackpartner;
+       cm_core = cm_node->cm_core;
+@@ -3382,7 +3382,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+                 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
+                 ntohs(laddr->sin_port));
+-      atomic_inc(&cm_connects);
++      atomic_inc_unchecked(&cm_connects);
+       nesqp->active_conn = 1;
+       /* cache the cm_id in the qp */
+@@ -3496,7 +3496,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
+                       g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
+                       return err;
+               }
+-              atomic_inc(&cm_listens_created);
++              atomic_inc_unchecked(&cm_listens_created);
+       }
+       cm_id->add_ref(cm_id);
+@@ -3603,7 +3603,7 @@ static void cm_event_connected(struct nes_cm_event *event)
+       if (nesqp->destroyed)
+               return;
+-      atomic_inc(&cm_connecteds);
++      atomic_inc_unchecked(&cm_connecteds);
+       nes_debug(NES_DBG_CM, "QP%u attempting to connect to  0x%08X:0x%04X on"
+                 " local port 0x%04X. jiffies = %lu.\n",
+                 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
+@@ -3788,7 +3788,7 @@ static void cm_event_reset(struct nes_cm_event *event)
+       cm_id->add_ref(cm_id);
+       ret = cm_id->event_handler(cm_id, &cm_event);
+-      atomic_inc(&cm_closes);
++      atomic_inc_unchecked(&cm_closes);
+       cm_event.event = IW_CM_EVENT_CLOSE;
+       cm_event.status = 0;
+       cm_event.provider_data = cm_id->provider_data;
+@@ -3828,7 +3828,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
+               return;
+       cm_id = cm_node->cm_id;
+-      atomic_inc(&cm_connect_reqs);
++      atomic_inc_unchecked(&cm_connect_reqs);
+       nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+                 cm_node, cm_id, jiffies);
+@@ -3877,7 +3877,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
+               return;
+       cm_id = cm_node->cm_id;
+-      atomic_inc(&cm_connect_reqs);
++      atomic_inc_unchecked(&cm_connect_reqs);
+       nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+                 cm_node, cm_id, jiffies);
+diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
+index 4166452..fc952c3 100644
+--- a/drivers/infiniband/hw/nes/nes_mgt.c
++++ b/drivers/infiniband/hw/nes/nes_mgt.c
+@@ -40,8 +40,8 @@
+ #include "nes.h"
+ #include "nes_mgt.h"
+-atomic_t pau_qps_created;
+-atomic_t pau_qps_destroyed;
++atomic_unchecked_t pau_qps_created;
++atomic_unchecked_t pau_qps_destroyed;
+ static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
+ {
+@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
+ {
+       struct sk_buff *skb;
+       unsigned long flags;
+-      atomic_inc(&pau_qps_destroyed);
++      atomic_inc_unchecked(&pau_qps_destroyed);
+       /* Free packets that have not yet been forwarded */
+       /* Lock is acquired by skb_dequeue when removing the skb */
+@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
+                                       cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
+                               skb_queue_head_init(&nesqp->pau_list);
+                               spin_lock_init(&nesqp->pau_lock);
+-                              atomic_inc(&pau_qps_created);
++                              atomic_inc_unchecked(&pau_qps_created);
+                               nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
+                       }
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index 2b27d13..8f9d46c 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -461,7 +461,7 @@ static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+ /**
+  * nes_netdev_start_xmit
+  */
+-static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct nes_vnic *nesvnic = netdev_priv(netdev);
+       struct nes_device *nesdev = nesvnic->nesdev;
+@@ -1264,36 +1264,36 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
+       target_stat_values[++index] = mh_detected;
+       target_stat_values[++index] = mh_pauses_sent;
+       target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
+-      target_stat_values[++index] = atomic_read(&cm_connects);
+-      target_stat_values[++index] = atomic_read(&cm_accepts);
+-      target_stat_values[++index] = atomic_read(&cm_disconnects);
+-      target_stat_values[++index] = atomic_read(&cm_connecteds);
+-      target_stat_values[++index] = atomic_read(&cm_connect_reqs);
+-      target_stat_values[++index] = atomic_read(&cm_rejects);
+-      target_stat_values[++index] = atomic_read(&mod_qp_timouts);
+-      target_stat_values[++index] = atomic_read(&qps_created);
+-      target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
+-      target_stat_values[++index] = atomic_read(&qps_destroyed);
+-      target_stat_values[++index] = atomic_read(&cm_closes);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
++      target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
++      target_stat_values[++index] = atomic_read_unchecked(&qps_created);
++      target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
+       target_stat_values[++index] = cm_packets_sent;
+       target_stat_values[++index] = cm_packets_bounced;
+       target_stat_values[++index] = cm_packets_created;
+       target_stat_values[++index] = cm_packets_received;
+       target_stat_values[++index] = cm_packets_dropped;
+       target_stat_values[++index] = cm_packets_retrans;
+-      target_stat_values[++index] = atomic_read(&cm_listens_created);
+-      target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
+       target_stat_values[++index] = cm_backlog_drops;
+-      target_stat_values[++index] = atomic_read(&cm_loopbacks);
+-      target_stat_values[++index] = atomic_read(&cm_nodes_created);
+-      target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
+-      target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
+-      target_stat_values[++index] = atomic_read(&cm_resets_recvd);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
++      target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
+       target_stat_values[++index] = nesadapter->free_4kpbl;
+       target_stat_values[++index] = nesadapter->free_256pbl;
+       target_stat_values[++index] = int_mod_timer_init;
+-      target_stat_values[++index] = atomic_read(&pau_qps_created);
+-      target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
++      target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
++      target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
+ }
+ /**
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index bd69125..10e85d5 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -46,9 +46,9 @@
+ #include <rdma/ib_umem.h>
+-atomic_t mod_qp_timouts;
+-atomic_t qps_created;
+-atomic_t sw_qps_destroyed;
++atomic_unchecked_t mod_qp_timouts;
++atomic_unchecked_t qps_created;
++atomic_unchecked_t sw_qps_destroyed;
+ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
+ static int nes_dereg_mr(struct ib_mr *ib_mr);
+@@ -1040,7 +1040,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+       if (init_attr->create_flags)
+               return ERR_PTR(-EINVAL);
+-      atomic_inc(&qps_created);
++      atomic_inc_unchecked(&qps_created);
+       switch (init_attr->qp_type) {
+               case IB_QPT_RC:
+                       if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+@@ -1376,7 +1376,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
+       struct iw_cm_event cm_event;
+       int ret = 0;
+-      atomic_inc(&sw_qps_destroyed);
++      atomic_inc_unchecked(&sw_qps_destroyed);
+       nesqp->destroyed = 1;
+       /* Blow away the connection if it exists. */
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index ce40340..b211076 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -150,7 +150,7 @@ static struct kparam_string kp_txselect = {
+       .string = txselect_list,
+       .maxlen = MAX_ATTEN_LEN
+ };
+-static int  setup_txselect(const char *, struct kernel_param *);
++static int  setup_txselect(const char *, const struct kernel_param *);
+ module_param_call(txselect, setup_txselect, param_get_string,
+                 &kp_txselect, S_IWUSR | S_IRUGO);
+ MODULE_PARM_DESC(txselect,
+@@ -6177,7 +6177,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
+ }
+ /* handle the txselect parameter changing */
+-static int setup_txselect(const char *str, struct kernel_param *kp)
++static int setup_txselect(const char *str, const struct kernel_param *kp)
+ {
+       struct qib_devdata *dd;
+       unsigned long val;
+diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
+index 6abe1c6..f866a31 100644
+--- a/drivers/infiniband/hw/qib/qib_pcie.c
++++ b/drivers/infiniband/hw/qib/qib_pcie.c
+@@ -622,7 +622,7 @@ static void qib_tune_pcie_caps(struct qib_devdata *dd)
+  * PCI error infrastructure, registered via pci
+  */
+ static pci_ers_result_t
+-qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++qib_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct qib_devdata *dd = pci_get_drvdata(pdev);
+       pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 22ba24f..194cc2b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -219,7 +219,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+       spin_lock_init(&qp->grp_lock);
+       spin_lock_init(&qp->state_lock);
+-      atomic_set(&qp->ssn, 0);
++      atomic_set_unchecked(&qp->ssn, 0);
+       atomic_set(&qp->skb_out, 0);
+ }
+@@ -525,7 +525,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
+       }
+       /* cleanup attributes */
+-      atomic_set(&qp->ssn, 0);
++      atomic_set_unchecked(&qp->ssn, 0);
+       qp->req.opcode = -1;
+       qp->req.need_retry = 0;
+       qp->req.noack_pkts = 0;
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 4552be9..0c68125 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -755,7 +755,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+       wqe->dma.cur_sge        = 0;
+       wqe->dma.sge_offset     = 0;
+       wqe->state              = wqe_state_posted;
+-      wqe->ssn                = atomic_add_return(1, &qp->ssn);
++      wqe->ssn                = atomic_add_return_unchecked(1, &qp->ssn);
+       return 0;
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index cac1d52..29bb903 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -262,7 +262,7 @@ struct rxe_qp {
+       struct rxe_comp_info    comp;
+       struct rxe_resp_info    resp;
+-      atomic_t                ssn;
++      atomic_unchecked_t      ssn;
+       atomic_t                skb_out;
+       int                     need_req_skb;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index cc1c1b0..fa712b0 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -1022,7 +1022,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+       spin_unlock_irqrestore(&priv->lock, flags);
+ }
+-static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_neigh *neigh;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index cdc7df4..a2fdfdb 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
+               nla_total_size(2);      /* IFLA_IPOIB_UMCAST */
+ }
+-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
++static struct rtnl_link_ops ipoib_link_ops = {
+       .kind           = "ipoib",
+       .maxtype        = IFLA_IPOIB_MAX,
+       .policy         = ipoib_policy,
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 883bbfe..91c32be 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -80,7 +80,7 @@ module_param(srpt_srq_size, int, 0444);
+ MODULE_PARM_DESC(srpt_srq_size,
+                "Shared receive queue (SRQ) size.");
+-static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
++static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
+ {
+       return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+ }
+@@ -196,8 +196,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
+ /**
+  * srpt_qp_event() - QP event callback function.
+  */
+-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
++static void srpt_qp_event(struct ib_event *event, void *_ch)
+ {
++      struct srpt_rdma_ch *ch = _ch;
+       pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
+                event->event, ch->cm_id, ch->sess_name, ch->state);
+@@ -1628,8 +1629,7 @@ retry:
+       }
+       qp_init->qp_context = (void *)ch;
+-      qp_init->event_handler
+-              = (void(*)(struct ib_event *, void*))srpt_qp_event;
++      qp_init->event_handler = srpt_qp_event;
+       qp_init->send_cq = ch->cq;
+       qp_init->recv_cq = ch->cq;
+       qp_init->srq = sdev->srq;
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index e9ae3d5..96e4940 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -997,7 +997,7 @@ static int evdev_set_mask(struct evdev_client *client,
+       if (!cnt)
+               return 0;
+-      mask = kcalloc(sizeof(unsigned long), BITS_TO_LONGS(cnt), GFP_KERNEL);
++      mask = kcalloc(BITS_TO_LONGS(cnt), sizeof(unsigned long), GFP_KERNEL);
+       if (!mask)
+               return -ENOMEM;
+diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
+index 4a2a9e3..b9261a7 100644
+--- a/drivers/input/gameport/gameport.c
++++ b/drivers/input/gameport/gameport.c
+@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
+  */
+ static void gameport_init_port(struct gameport *gameport)
+ {
+-      static atomic_t gameport_no = ATOMIC_INIT(-1);
++      static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
+       __module_get(THIS_MODULE);
+       mutex_init(&gameport->drv_mutex);
+       device_initialize(&gameport->dev);
+       dev_set_name(&gameport->dev, "gameport%lu",
+-                      (unsigned long)atomic_inc_return(&gameport_no));
++                      (unsigned long)atomic_inc_return_unchecked(&gameport_no));
+       gameport->dev.bus = &gameport_bus;
+       gameport->dev.release = gameport_release_port;
+       if (gameport->parent)
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index d95c34e..2a6da5f 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -1780,7 +1780,7 @@ EXPORT_SYMBOL_GPL(input_class);
+  */
+ struct input_dev *input_allocate_device(void)
+ {
+-      static atomic_t input_no = ATOMIC_INIT(-1);
++      static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
+       struct input_dev *dev;
+       dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
+@@ -1795,7 +1795,7 @@ struct input_dev *input_allocate_device(void)
+               INIT_LIST_HEAD(&dev->node);
+               dev_set_name(&dev->dev, "input%lu",
+-                           (unsigned long)atomic_inc_return(&input_no));
++                           (unsigned long)atomic_inc_return_unchecked(&input_no));
+               __module_get(THIS_MODULE);
+       }
+diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
+index 4a95b22..874c182 100644
+--- a/drivers/input/joystick/sidewinder.c
++++ b/drivers/input/joystick/sidewinder.c
+@@ -30,6 +30,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/input.h>
+ #include <linux/gameport.h>
+ #include <linux/jiffies.h>
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index 9c0ea36..1e1a411 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1855,7 +1855,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
+ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
+ {
+-      static atomic_t device_no = ATOMIC_INIT(-1);
++      static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
+       const struct ims_pcu_device_info *info;
+       int error;
+@@ -1886,7 +1886,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
+       }
+       /* Device appears to be operable, complete initialization */
+-      pcu->device_no = atomic_inc_return(&device_no);
++      pcu->device_no = atomic_inc_return_unchecked(&device_no);
+       /*
+        * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
+diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
+index e0ca6cd..b5a2681 100644
+--- a/drivers/input/mouse/psmouse.h
++++ b/drivers/input/mouse/psmouse.h
+@@ -126,7 +126,7 @@ struct psmouse_attribute {
+       ssize_t (*set)(struct psmouse *psmouse, void *data,
+                       const char *buf, size_t count);
+       bool protect;
+-};
++} __do_const;
+ #define to_psmouse_attr(a)    container_of((a), struct psmouse_attribute, dattr)
+ ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
+diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
+index b604564..3f14ae4 100644
+--- a/drivers/input/mousedev.c
++++ b/drivers/input/mousedev.c
+@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
+       spin_unlock_irq(&client->packet_lock);
+-      if (copy_to_user(buffer, data, count))
++      if (count > sizeof(data) || copy_to_user(buffer, data, count))
+               return -EFAULT;
+       return count;
+diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
+index 1ca7f55..2562607 100644
+--- a/drivers/input/serio/serio.c
++++ b/drivers/input/serio/serio.c
+@@ -512,7 +512,7 @@ static void serio_release_port(struct device *dev)
+  */
+ static void serio_init_port(struct serio *serio)
+ {
+-      static atomic_t serio_no = ATOMIC_INIT(-1);
++      static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
+       __module_get(THIS_MODULE);
+@@ -523,7 +523,7 @@ static void serio_init_port(struct serio *serio)
+       mutex_init(&serio->drv_mutex);
+       device_initialize(&serio->dev);
+       dev_set_name(&serio->dev, "serio%lu",
+-                   (unsigned long)atomic_inc_return(&serio_no));
++                   (unsigned long)atomic_inc_return_unchecked(&serio_no));
+       serio->dev.bus = &serio_bus;
+       serio->dev.release = serio_release_port;
+       serio->dev.groups = serio_device_attr_groups;
+diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
+index 71ef5d6..93380a9 100644
+--- a/drivers/input/serio/serio_raw.c
++++ b/drivers/input/serio/serio_raw.c
+@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
+ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
+ {
+-      static atomic_t serio_raw_no = ATOMIC_INIT(-1);
++      static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
+       struct serio_raw *serio_raw;
+       int err;
+@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
+       }
+       snprintf(serio_raw->name, sizeof(serio_raw->name),
+-               "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
++               "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
+       kref_init(&serio_raw->kref);
+       INIT_LIST_HEAD(&serio_raw->client_list);
+       init_waitqueue_head(&serio_raw->wait);
+diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
+index 92e2243..8fd9092 100644
+--- a/drivers/input/touchscreen/htcpen.c
++++ b/drivers/input/touchscreen/htcpen.c
+@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
+       }
+ };
+-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
++static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
+       {
+               .ident = "Shift",
+               .matches = {
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 96de97a..04eaea7 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -791,11 +791,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
+ static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
+ {
++      phys_addr_t physaddr;
+       WARN_ON(address & 0x7ULL);
+       memset(cmd, 0, sizeof(*cmd));
+-      cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
+-      cmd->data[1] = upper_32_bits(__pa(address));
++
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      if (object_starts_on_stack((void *)address)) {
++              void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
++              physaddr = __pa((u64)adjbuf);
++      } else
++#endif
++      physaddr = __pa(address);
++
++      cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
++      cmd->data[1] = upper_32_bits(physaddr);
+       cmd->data[2] = 1;
+       CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
+ }
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 641e887..df73c18 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -626,7 +626,7 @@ struct arm_smmu_domain {
+       struct arm_smmu_device          *smmu;
+       struct mutex                    init_mutex; /* Protects smmu pointer */
+-      struct io_pgtable_ops           *pgtbl_ops;
++      struct io_pgtable               *pgtbl;
+       spinlock_t                      pgtbl_lock;
+       enum arm_smmu_domain_stage      stage;
+@@ -1448,7 +1448,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       iommu_put_dma_cookie(domain);
+-      free_io_pgtable_ops(smmu_domain->pgtbl_ops);
++      free_io_pgtable(smmu_domain->pgtbl);
+       /* Free the CD and ASID, if we allocated them */
+       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+@@ -1526,7 +1526,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+       unsigned long ias, oas;
+       enum io_pgtable_fmt fmt;
+       struct io_pgtable_cfg pgtbl_cfg;
+-      struct io_pgtable_ops *pgtbl_ops;
++      struct io_pgtable *iop;
+       int (*finalise_stage_fn)(struct arm_smmu_domain *,
+                                struct io_pgtable_cfg *);
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+@@ -1564,16 +1564,16 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+               .iommu_dev      = smmu->dev,
+       };
+-      pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+-      if (!pgtbl_ops)
++      iop = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
++      if (!iop)
+               return -ENOMEM;
+       domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+-      smmu_domain->pgtbl_ops = pgtbl_ops;
++      smmu_domain->pgtbl = iop;
+       ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+       if (ret < 0)
+-              free_io_pgtable_ops(pgtbl_ops);
++              free_io_pgtable(iop);
+       return ret;
+ }
+@@ -1711,13 +1711,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+       int ret;
+       unsigned long flags;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+-      struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
++      struct io_pgtable *iop = smmu_domain->pgtbl;
+-      if (!ops)
++      if (!iop)
+               return -ENODEV;
+       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+-      ret = ops->map(ops, iova, paddr, size, prot);
++      ret = iop->ops->map(iop, iova, paddr, size, prot);
+       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+       return ret;
+ }
+@@ -1728,13 +1728,13 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
+       size_t ret;
+       unsigned long flags;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+-      struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
++      struct io_pgtable *iop = smmu_domain->pgtbl;
+-      if (!ops)
++      if (!iop)
+               return 0;
+       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+-      ret = ops->unmap(ops, iova, size);
++      ret = iop->ops->unmap(iop, iova, size);
+       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+       return ret;
+ }
+@@ -1745,13 +1745,13 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
+       phys_addr_t ret;
+       unsigned long flags;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+-      struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
++      struct io_pgtable *iop = smmu_domain->pgtbl;
+-      if (!ops)
++      if (!iop)
+               return 0;
+       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+-      ret = ops->iova_to_phys(ops, iova);
++      ret = iop->ops->iova_to_phys(iop, iova);
+       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+       return ret;
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 2db74eb..4bbcf9d 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -389,7 +389,7 @@ enum arm_smmu_domain_stage {
+ struct arm_smmu_domain {
+       struct arm_smmu_device          *smmu;
+-      struct io_pgtable_ops           *pgtbl_ops;
++      struct io_pgtable               *pgtbl;
+       spinlock_t                      pgtbl_lock;
+       struct arm_smmu_cfg             cfg;
+       enum arm_smmu_domain_stage      stage;
+@@ -831,7 +831,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ {
+       int irq, start, ret = 0;
+       unsigned long ias, oas;
+-      struct io_pgtable_ops *pgtbl_ops;
++      struct io_pgtable *pgtbl;
+       struct io_pgtable_cfg pgtbl_cfg;
+       enum io_pgtable_fmt fmt;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+@@ -950,8 +950,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+       };
+       smmu_domain->smmu = smmu;
+-      pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+-      if (!pgtbl_ops) {
++      pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
++      if (!pgtbl) {
+               ret = -ENOMEM;
+               goto out_clear_smmu;
+       }
+@@ -978,7 +978,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+       mutex_unlock(&smmu_domain->init_mutex);
+       /* Publish page table ops for map/unmap */
+-      smmu_domain->pgtbl_ops = pgtbl_ops;
++      smmu_domain->pgtbl = pgtbl;
+       return 0;
+ out_clear_smmu:
+@@ -1011,7 +1011,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+               devm_free_irq(smmu->dev, irq, domain);
+       }
+-      free_io_pgtable_ops(smmu_domain->pgtbl_ops);
++      free_io_pgtable(smmu_domain->pgtbl);
+       __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+ }
+@@ -1248,13 +1248,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+       int ret;
+       unsigned long flags;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+-      struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++      struct io_pgtable *iop = smmu_domain->pgtbl;
+-      if (!ops)
++      if (!iop)
+               return -ENODEV;
+       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+-      ret = ops->map(ops, iova, paddr, size, prot);
++      ret = iop->ops->map(iop, iova, paddr, size, prot);
+       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+       return ret;
+ }
+@@ -1265,13 +1265,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+       size_t ret;
+       unsigned long flags;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+-      struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++      struct io_pgtable *iop = smmu_domain->pgtbl;
+-      if (!ops)
++      if (!iop)
+               return 0;
+       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+-      ret = ops->unmap(ops, iova, size);
++      ret = iop->ops->unmap(iop, iova, size);
+       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+       return ret;
+ }
+@@ -1282,7 +1282,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+-      struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++      struct io_pgtable *iop = smmu_domain->pgtbl;
+       struct device *dev = smmu->dev;
+       void __iomem *cb_base;
+       u32 tmp;
+@@ -1303,7 +1303,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+               dev_err(dev,
+                       "iova to phys timed out on %pad. Falling back to software table walk.\n",
+                       &iova);
+-              return ops->iova_to_phys(ops, iova);
++              return iop->ops->iova_to_phys(iop, iova);
+       }
+       phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
+@@ -1322,9 +1322,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+       phys_addr_t ret;
+       unsigned long flags;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+-      struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++      struct io_pgtable *iop = smmu_domain->pgtbl;
+-      if (!ops)
++      if (!iop)
+               return 0;
+       spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+@@ -1332,7 +1332,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+                       smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+               ret = arm_smmu_iova_to_phys_hard(domain, iova);
+       } else {
+-              ret = ops->iova_to_phys(ops, iova);
++              ret = iop->ops->iova_to_phys(iop, iova);
+       }
+       spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+@@ -1809,10 +1809,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+       if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
+               smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
++      pax_open_kernel();
+       if (arm_smmu_ops.pgsize_bitmap == -1UL)
+-              arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
++              const_cast(arm_smmu_ops.pgsize_bitmap) = smmu->pgsize_bitmap;
+       else
+-              arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
++              const_cast(arm_smmu_ops.pgsize_bitmap) |= smmu->pgsize_bitmap;
++      pax_close_kernel();
+       dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
+                  smmu->pgsize_bitmap);
+diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
+index def8ca1..039660d 100644
+--- a/drivers/iommu/io-pgtable-arm-v7s.c
++++ b/drivers/iommu/io-pgtable-arm-v7s.c
+@@ -49,9 +49,6 @@
+ #define io_pgtable_to_data(x)                                         \
+       container_of((x), struct arm_v7s_io_pgtable, iop)
+-#define io_pgtable_ops_to_data(x)                                     \
+-      io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+-
+ /*
+  * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
+  * and 12 bits in a page. With some carefully-chosen coefficients we can
+@@ -426,11 +423,10 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
+       return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+ }
+-static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
++static int arm_v7s_map(struct io_pgtable *iop, unsigned long iova,
+                       phys_addr_t paddr, size_t size, int prot)
+ {
+-      struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+-      struct io_pgtable *iop = &data->iop;
++      struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
+       int ret;
+       /* If no access, then nothing to do */
+@@ -593,10 +589,10 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
+       return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
+ }
+-static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
++static int arm_v7s_unmap(struct io_pgtable *iop, unsigned long iova,
+                        size_t size)
+ {
+-      struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
++      struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
+       size_t unmapped;
+       unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
+@@ -606,10 +602,10 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+       return unmapped;
+ }
+-static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
++static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable *iop,
+                                       unsigned long iova)
+ {
+-      struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
++      struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
+       arm_v7s_iopte *ptep = data->pgd, pte;
+       int lvl = 0;
+       u32 mask;
+@@ -628,6 +624,12 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
+       return (pte & mask) | (iova & ~mask);
+ }
++static struct io_pgtable_ops arm_v7s_io_pgtable_ops = {
++      .map            = arm_v7s_map,
++      .unmap          = arm_v7s_unmap,
++      .iova_to_phys   = arm_v7s_iova_to_phys,
++};
++
+ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
+                                               void *cookie)
+ {
+@@ -658,11 +660,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
+       if (!data->l2_tables)
+               goto out_free_data;
+-      data->iop.ops = (struct io_pgtable_ops) {
+-              .map            = arm_v7s_map,
+-              .unmap          = arm_v7s_unmap,
+-              .iova_to_phys   = arm_v7s_iova_to_phys,
+-      };
++      data->iop.ops = &arm_v7s_io_pgtable_ops;
+       /* We have to do this early for __arm_v7s_alloc_table to work... */
+       data->iop.cfg = *cfg;
+@@ -751,7 +749,7 @@ static struct iommu_gather_ops dummy_tlb_ops = {
+ static int __init arm_v7s_do_selftests(void)
+ {
+-      struct io_pgtable_ops *ops;
++      struct io_pgtable *pgtbl;
+       struct io_pgtable_cfg cfg = {
+               .tlb = &dummy_tlb_ops,
+               .oas = 32,
+@@ -766,8 +764,8 @@ static int __init arm_v7s_do_selftests(void)
+       cfg_cookie = &cfg;
+-      ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg);
+-      if (!ops) {
++      pgtbl = alloc_io_pgtable(ARM_V7S, &cfg, &cfg);
++      if (!pgtbl) {
+               pr_err("selftest: failed to allocate io pgtable ops\n");
+               return -EINVAL;
+       }
+@@ -776,13 +774,13 @@ static int __init arm_v7s_do_selftests(void)
+        * Initial sanity checks.
+        * Empty page tables shouldn't provide any translations.
+        */
+-      if (ops->iova_to_phys(ops, 42))
++      if (pgtbl->ops->iova_to_phys(pgtbl, 42))
+               return __FAIL(ops);
+-      if (ops->iova_to_phys(ops, SZ_1G + 42))
++      if (pgtbl->ops->iova_to_phys(pgtbl, SZ_1G + 42))
+               return __FAIL(ops);
+-      if (ops->iova_to_phys(ops, SZ_2G + 42))
++      if (pgtbl->ops->iova_to_phys(pgtbl, SZ_2G + 42))
+               return __FAIL(ops);
+       /*
+@@ -792,18 +790,18 @@ static int __init arm_v7s_do_selftests(void)
+       i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
+       while (i != BITS_PER_LONG) {
+               size = 1UL << i;
+-              if (ops->map(ops, iova, iova, size, IOMMU_READ |
++              if (pgtbl->ops->map(pgtbl, iova, iova, size, IOMMU_READ |
+                                                   IOMMU_WRITE |
+                                                   IOMMU_NOEXEC |
+                                                   IOMMU_CACHE))
+                       return __FAIL(ops);
+               /* Overlapping mappings */
+-              if (!ops->map(ops, iova, iova + size, size,
++              if (!pgtbl->ops->map(pgtbl, iova, iova + size, size,
+                             IOMMU_READ | IOMMU_NOEXEC))
+                       return __FAIL(ops);
+-              if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
++              if (pgtbl->ops->iova_to_phys(pgtbl, iova + 42) != (iova + 42))
+                       return __FAIL(ops);
+               iova += SZ_16M;
+@@ -817,14 +815,14 @@ static int __init arm_v7s_do_selftests(void)
+       size = 1UL << __ffs(cfg.pgsize_bitmap);
+       while (i < loopnr) {
+               iova_start = i * SZ_16M;
+-              if (ops->unmap(ops, iova_start + size, size) != size)
++              if (pgtbl->ops->unmap(pgtbl, iova_start + size, size) != size)
+                       return __FAIL(ops);
+               /* Remap of partial unmap */
+-              if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
++              if (pgtbl->ops->map(pgtbl, iova_start + size, size, size, IOMMU_READ))
+                       return __FAIL(ops);
+-              if (ops->iova_to_phys(ops, iova_start + size + 42)
++              if (pgtbl->ops->iova_to_phys(pgtbl, iova_start + size + 42)
+                   != (size + 42))
+                       return __FAIL(ops);
+               i++;
+@@ -836,17 +834,17 @@ static int __init arm_v7s_do_selftests(void)
+       while (i != BITS_PER_LONG) {
+               size = 1UL << i;
+-              if (ops->unmap(ops, iova, size) != size)
++              if (pgtbl->ops->unmap(pgtbl, iova, size) != size)
+                       return __FAIL(ops);
+-              if (ops->iova_to_phys(ops, iova + 42))
++              if (pgtbl->ops->iova_to_phys(pgtbl, iova + 42))
+                       return __FAIL(ops);
+               /* Remap full block */
+-              if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
++              if (pgtbl->ops->map(pgtbl, iova, iova, size, IOMMU_WRITE))
+                       return __FAIL(ops);
+-              if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
++              if (pgtbl->ops->iova_to_phys(pgtbl, iova + 42) != (iova + 42))
+                       return __FAIL(ops);
+               iova += SZ_16M;
+@@ -854,7 +852,7 @@ static int __init arm_v7s_do_selftests(void)
+               i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
+       }
+-      free_io_pgtable_ops(ops);
++      free_io_pgtable(pgtbl);
+       selftest_running = false;
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index f5c90e1..90a737c 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -39,9 +39,6 @@
+ #define io_pgtable_to_data(x)                                         \
+       container_of((x), struct arm_lpae_io_pgtable, iop)
+-#define io_pgtable_ops_to_data(x)                                     \
+-      io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+-
+ /*
+  * For consistency with the architecture, we always consider
+  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
+@@ -381,10 +378,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
+       return pte;
+ }
+-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
++static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
+                       phys_addr_t paddr, size_t size, int iommu_prot)
+ {
+-      struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++      struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+       arm_lpae_iopte *ptep = data->pgd;
+       int ret, lvl = ARM_LPAE_START_LVL(data);
+       arm_lpae_iopte prot;
+@@ -530,11 +527,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+       return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+ }
+-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
++static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
+                         size_t size)
+ {
+       size_t unmapped;
+-      struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++      struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+       arm_lpae_iopte *ptep = data->pgd;
+       int lvl = ARM_LPAE_START_LVL(data);
+@@ -545,10 +542,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+       return unmapped;
+ }
+-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
++static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
+                                        unsigned long iova)
+ {
+-      struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++      struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+       arm_lpae_iopte pte, *ptep = data->pgd;
+       int lvl = ARM_LPAE_START_LVL(data);
+@@ -615,6 +612,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
+       }
+ }
++static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
++      .map            = arm_lpae_map,
++      .unmap          = arm_lpae_unmap,
++      .iova_to_phys   = arm_lpae_iova_to_phys,
++};
++
+ static struct arm_lpae_io_pgtable *
+ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+ {
+@@ -651,11 +654,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+       pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+       data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+-      data->iop.ops = (struct io_pgtable_ops) {
+-              .map            = arm_lpae_map,
+-              .unmap          = arm_lpae_unmap,
+-              .iova_to_phys   = arm_lpae_iova_to_phys,
+-      };
++      data->iop.ops = &arm_lpae_io_pgtable_ops;
+       return data;
+ }
+@@ -916,15 +915,15 @@ static void dummy_tlb_sync(void *cookie)
+       WARN_ON(cookie != cfg_cookie);
+ }
+-static struct iommu_gather_ops dummy_tlb_ops __initdata = {
++static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
+       .tlb_flush_all  = dummy_tlb_flush_all,
+       .tlb_add_flush  = dummy_tlb_add_flush,
+       .tlb_sync       = dummy_tlb_sync,
+ };
+-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
++static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
+ {
+-      struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++      struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
+       pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
+@@ -934,9 +933,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
+               data->bits_per_level, data->pgd);
+ }
+-#define __FAIL(ops, i)        ({                                              \
++#define __FAIL(iop, i)        ({                                              \
+               WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
+-              arm_lpae_dump_ops(ops);                                 \
++              arm_lpae_dump_ops(iop);                                 \
+               selftest_running = false;                               \
+               -EFAULT;                                                \
+ })
+@@ -951,30 +950,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+       int i, j;
+       unsigned long iova;
+       size_t size;
+-      struct io_pgtable_ops *ops;
++      struct io_pgtable *iop;
++      const struct io_pgtable_ops *ops;
+       selftest_running = true;
+       for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+               cfg_cookie = cfg;
+-              ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
+-              if (!ops) {
++              iop = alloc_io_pgtable(fmts[i], cfg, cfg);
++              if (!iop) {
+                       pr_err("selftest: failed to allocate io pgtable ops\n");
+                       return -ENOMEM;
+               }
++              ops = iop->ops;
+               /*
+                * Initial sanity checks.
+                * Empty page tables shouldn't provide any translations.
+                */
+-              if (ops->iova_to_phys(ops, 42))
+-                      return __FAIL(ops, i);
++              if (ops->iova_to_phys(iop, 42))
++                      return __FAIL(iop, i);
+-              if (ops->iova_to_phys(ops, SZ_1G + 42))
+-                      return __FAIL(ops, i);
++              if (ops->iova_to_phys(iop, SZ_1G + 42))
++                      return __FAIL(iop, i);
+-              if (ops->iova_to_phys(ops, SZ_2G + 42))
+-                      return __FAIL(ops, i);
++              if (ops->iova_to_phys(iop, SZ_2G + 42))
++                      return __FAIL(iop, i);
+               /*
+                * Distinct mappings of different granule sizes.
+@@ -984,19 +985,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+               while (j != BITS_PER_LONG) {
+                       size = 1UL << j;
+-                      if (ops->map(ops, iova, iova, size, IOMMU_READ |
++                      if (ops->map(iop, iova, iova, size, IOMMU_READ |
+                                                           IOMMU_WRITE |
+                                                           IOMMU_NOEXEC |
+                                                           IOMMU_CACHE))
+-                              return __FAIL(ops, i);
++                              return __FAIL(iop, i);
+                       /* Overlapping mappings */
+-                      if (!ops->map(ops, iova, iova + size, size,
++                      if (!ops->map(iop, iova, iova + size, size,
+                                     IOMMU_READ | IOMMU_NOEXEC))
+-                              return __FAIL(ops, i);
++                              return __FAIL(iop, i);
+-                      if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+-                              return __FAIL(ops, i);
++                      if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
++                              return __FAIL(iop, i);
+                       iova += SZ_1G;
+                       j++;
+@@ -1005,15 +1006,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+               /* Partial unmap */
+               size = 1UL << __ffs(cfg->pgsize_bitmap);
+-              if (ops->unmap(ops, SZ_1G + size, size) != size)
+-                      return __FAIL(ops, i);
++              if (ops->unmap(iop, SZ_1G + size, size) != size)
++                      return __FAIL(iop, i);
+               /* Remap of partial unmap */
+-              if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+-                      return __FAIL(ops, i);
++              if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
++                      return __FAIL(iop, i);
+-              if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+-                      return __FAIL(ops, i);
++              if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
++                      return __FAIL(iop, i);
+               /* Full unmap */
+               iova = 0;
+@@ -1021,25 +1022,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+               while (j != BITS_PER_LONG) {
+                       size = 1UL << j;
+-                      if (ops->unmap(ops, iova, size) != size)
+-                              return __FAIL(ops, i);
++                      if (ops->unmap(iop, iova, size) != size)
++                              return __FAIL(iop, i);
+-                      if (ops->iova_to_phys(ops, iova + 42))
+-                              return __FAIL(ops, i);
++                      if (ops->iova_to_phys(iop, iova + 42))
++                              return __FAIL(iop, i);
+                       /* Remap full block */
+-                      if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+-                              return __FAIL(ops, i);
++                      if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
++                              return __FAIL(iop, i);
+-                      if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+-                              return __FAIL(ops, i);
++                      if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
++                              return __FAIL(iop, i);
+                       iova += SZ_1G;
+                       j++;
+                       j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+               }
+-              free_io_pgtable_ops(ops);
++              free_io_pgtable(iop);
+       }
+       selftest_running = false;
+diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
+index 127558d..bc60b81 100644
+--- a/drivers/iommu/io-pgtable.c
++++ b/drivers/iommu/io-pgtable.c
+@@ -37,7 +37,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
+ #endif
+ };
+-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
++struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
+                                           struct io_pgtable_cfg *cfg,
+                                           void *cookie)
+ {
+@@ -59,21 +59,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+       iop->cookie     = cookie;
+       iop->cfg        = *cfg;
+-      return &iop->ops;
++      return iop;
+ }
+ /*
+  * It is the IOMMU driver's responsibility to ensure that the page table
+  * is no longer accessible to the walker by this point.
+  */
+-void free_io_pgtable_ops(struct io_pgtable_ops *ops)
++void free_io_pgtable(struct io_pgtable *iop)
+ {
+-      struct io_pgtable *iop;
+-
+-      if (!ops)
++      if (!iop)
+               return;
+-      iop = container_of(ops, struct io_pgtable, ops);
+       io_pgtable_tlb_flush_all(iop);
+       io_pgtable_init_table[iop->fmt]->free(iop);
+ }
+diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
+index 969d82c..1ba9b6e 100644
+--- a/drivers/iommu/io-pgtable.h
++++ b/drivers/iommu/io-pgtable.h
+@@ -109,17 +109,18 @@ struct io_pgtable_cfg {
+  * These functions map directly onto the iommu_ops member functions with
+  * the same names.
+  */
++struct io_pgtable;
+ struct io_pgtable_ops {
+-      int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
++      int (*map)(struct io_pgtable *iop, unsigned long iova,
+                  phys_addr_t paddr, size_t size, int prot);
+-      int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
++      int (*unmap)(struct io_pgtable *iop, unsigned long iova,
+                    size_t size);
+-      phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
++      phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
+                                   unsigned long iova);
+ };
+ /**
+- * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
++ * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
+  *
+  * @fmt:    The page table format.
+  * @cfg:    The page table configuration. This will be modified to represent
+@@ -128,9 +129,9 @@ struct io_pgtable_ops {
+  * @cookie: An opaque token provided by the IOMMU driver and passed back to
+  *          the callback routines in cfg->tlb.
+  */
+-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+-                                          struct io_pgtable_cfg *cfg,
+-                                          void *cookie);
++struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
++                                  struct io_pgtable_cfg *cfg,
++                                  void *cookie);
+ /**
+  * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+@@ -139,7 +140,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+  *
+  * @ops: The ops returned from alloc_io_pgtable_ops.
+  */
+-void free_io_pgtable_ops(struct io_pgtable_ops *ops);
++void free_io_pgtable(struct io_pgtable *iop);
+ /*
+@@ -161,11 +162,9 @@ struct io_pgtable {
+       void                    *cookie;
+       bool                    tlb_sync_pending;
+       struct io_pgtable_cfg   cfg;
+-      struct io_pgtable_ops   ops;
++      const struct io_pgtable_ops     *ops;
+ };
+-#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
+-
+ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
+ {
+       iop->cfg.tlb->tlb_flush_all(iop->cookie);
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index b06d935..59bad56 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -944,7 +944,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
+ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
+ {
+       int err;
+-      struct notifier_block *nb;
++      notifier_block_no_const *nb;
+       struct iommu_callback_data cb = {
+               .ops = ops,
+       };
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
+index 2fdbac6..7095311 100644
+--- a/drivers/iommu/ipmmu-vmsa.c
++++ b/drivers/iommu/ipmmu-vmsa.c
+@@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
+       struct iommu_domain io_domain;
+       struct io_pgtable_cfg cfg;
+-      struct io_pgtable_ops *iop;
++      struct io_pgtable *iop;
+       unsigned int context_id;
+       spinlock_t lock;                        /* Protects mappings */
+@@ -319,8 +319,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+        */
+       domain->cfg.iommu_dev = domain->mmu->dev;
+-      domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
+-                                         domain);
++      domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
+       if (!domain->iop)
+               return -EINVAL;
+@@ -478,7 +477,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
+        * been detached.
+        */
+       ipmmu_domain_destroy_context(domain);
+-      free_io_pgtable_ops(domain->iop);
++      free_io_pgtable(domain->iop);
+       kfree(domain);
+ }
+@@ -547,7 +546,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
+       if (!domain)
+               return -ENODEV;
+-      return domain->iop->map(domain->iop, iova, paddr, size, prot);
++      return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
+ }
+ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
+@@ -555,7 +554,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
+ {
+       struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
+-      return domain->iop->unmap(domain->iop, iova, size);
++      return domain->iop->ops->unmap(domain->iop, iova, size);
+ }
+ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
+@@ -565,7 +564,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
+       /* TODO: Is locking needed ? */
+-      return domain->iop->iova_to_phys(domain->iop, iova);
++      return domain->iop->ops->iova_to_phys(domain->iop, iova);
+ }
+ static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
+diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
+index 49721b4..62874d3 100644
+--- a/drivers/iommu/irq_remapping.c
++++ b/drivers/iommu/irq_remapping.c
+@@ -153,7 +153,7 @@ int __init irq_remap_enable_fault_handling(void)
+ void panic_if_irq_remap(const char *msg)
+ {
+       if (irq_remapping_enabled)
+-              panic(msg);
++              panic("%s", msg);
+ }
+ void ir_ack_apic_edge(struct irq_data *data)
+diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
+index b09692b..aa64d59 100644
+--- a/drivers/iommu/msm_iommu.c
++++ b/drivers/iommu/msm_iommu.c
+@@ -53,7 +53,7 @@ struct msm_priv {
+       struct list_head list_attached;
+       struct iommu_domain domain;
+       struct io_pgtable_cfg   cfg;
+-      struct io_pgtable_ops   *iop;
++      struct io_pgtable       *iop;
+       struct device           *dev;
+       spinlock_t              pgtlock; /* pagetable lock */
+ };
+@@ -360,13 +360,15 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
+               .iommu_dev = priv->dev,
+       };
+-      priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
++      priv->iop = alloc_io_pgtable(ARM_V7S, &priv->cfg, priv);
+       if (!priv->iop) {
+               dev_err(priv->dev, "Failed to allocate pgtable\n");
+               return -EINVAL;
+       }
+-      msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
++      pax_open_kernel();
++      const_cast(msm_iommu_ops.pgsize_bitmap) = priv->cfg.pgsize_bitmap;
++      pax_close_kernel();
+       return 0;
+ }
+@@ -429,7 +431,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
+       struct msm_iommu_ctx_dev *master;
+       int ret;
+-      free_io_pgtable_ops(priv->iop);
++      free_io_pgtable(priv->iop);
+       spin_lock_irqsave(&msm_iommu_lock, flags);
+       list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+@@ -455,7 +457,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
+       int ret;
+       spin_lock_irqsave(&priv->pgtlock, flags);
+-      ret = priv->iop->map(priv->iop, iova, pa, len, prot);
++      ret = priv->iop->ops->map(priv->iop, iova, pa, len, prot);
+       spin_unlock_irqrestore(&priv->pgtlock, flags);
+       return ret;
+@@ -468,7 +470,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+       unsigned long flags;
+       spin_lock_irqsave(&priv->pgtlock, flags);
+-      len = priv->iop->unmap(priv->iop, iova, len);
++      len = priv->iop->ops->unmap(priv->iop, iova, len);
+       spin_unlock_irqrestore(&priv->pgtlock, flags);
+       return len;
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index b12c12d..27bf745 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -97,7 +97,7 @@ struct mtk_iommu_domain {
+       spinlock_t                      pgtlock; /* lock for page table */
+       struct io_pgtable_cfg           cfg;
+-      struct io_pgtable_ops           *iop;
++      struct io_pgtable               *iop;
+       struct iommu_domain             domain;
+ };
+@@ -235,7 +235,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+       if (data->enable_4GB)
+               dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
+-      dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
++      dom->iop = alloc_io_pgtable(ARM_V7S, &dom->cfg, data);
+       if (!dom->iop) {
+               dev_err(data->dev, "Failed to alloc io pgtable\n");
+               return -EINVAL;
+@@ -328,7 +328,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
+       int ret;
+       spin_lock_irqsave(&dom->pgtlock, flags);
+-      ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
++      ret = dom->iop->ops->map(dom->iop, iova, paddr, size, prot);
+       spin_unlock_irqrestore(&dom->pgtlock, flags);
+       return ret;
+@@ -342,7 +342,7 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
+       size_t unmapsz;
+       spin_lock_irqsave(&dom->pgtlock, flags);
+-      unmapsz = dom->iop->unmap(dom->iop, iova, size);
++      unmapsz = dom->iop->ops->unmap(dom->iop, iova, size);
+       spin_unlock_irqrestore(&dom->pgtlock, flags);
+       return unmapsz;
+@@ -356,7 +356,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+       phys_addr_t pa;
+       spin_lock_irqsave(&dom->pgtlock, flags);
+-      pa = dom->iop->iova_to_phys(dom->iop, iova);
++      pa = dom->iop->ops->iova_to_phys(dom->iop, iova);
+       spin_unlock_irqrestore(&dom->pgtlock, flags);
+       return pa;
+@@ -615,7 +615,7 @@ static int mtk_iommu_remove(struct platform_device *pdev)
+       if (iommu_present(&platform_bus_type))
+               bus_set_iommu(&platform_bus_type, NULL);
+-      free_io_pgtable_ops(data->m4u_dom->iop);
++      free_io_pgtable(data->m4u_dom->iop);
+       clk_disable_unprepare(data->bclk);
+       devm_free_irq(&pdev->dev, data->irq, data);
+       component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 390fac5..74fed85 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -392,7 +392,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
+       chained_irq_exit(chip, desc);
+ }
+-static struct irq_chip gic_chip = {
++static irq_chip_no_const gic_chip __read_only = {
+       .irq_mask               = gic_mask_irq,
+       .irq_unmask             = gic_unmask_irq,
+       .irq_eoi                = gic_eoi_irq,
+diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
+index 6b304eb..6e3a1413 100644
+--- a/drivers/irqchip/irq-i8259.c
++++ b/drivers/irqchip/irq-i8259.c
+@@ -204,7 +204,7 @@ spurious_8259A_irq:
+                       printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+                       spurious_irq_mask |= irqmask;
+               }
+-              atomic_inc(&irq_err_count);
++              atomic_inc_unchecked(&irq_err_count);
+               /*
+                * Theoretically we do not have to handle this IRQ,
+                * but in Linux this does not cause problems and is
+diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
+index 013fc96..36a9a97 100644
+--- a/drivers/irqchip/irq-mmp.c
++++ b/drivers/irqchip/irq-mmp.c
+@@ -122,7 +122,7 @@ static void icu_unmask_irq(struct irq_data *d)
+       }
+ }
+-struct irq_chip icu_irq_chip = {
++irq_chip_no_const icu_irq_chip __read_only = {
+       .name           = "icu_irq",
+       .irq_mask       = icu_mask_irq,
+       .irq_mask_ack   = icu_mask_ack_irq,
+diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
+index 713177d..3849ddd 100644
+--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
++++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
+@@ -396,7 +396,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
+       struct intc_irqpin_iomem *i;
+       struct resource *io[INTC_IRQPIN_REG_NR];
+       struct resource *irq;
+-      struct irq_chip *irq_chip;
++      irq_chip_no_const *irq_chip;
+       void (*enable_fn)(struct irq_data *d);
+       void (*disable_fn)(struct irq_data *d);
+       const char *name = dev_name(dev);
+diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
+index 2325fb3..fca7529 100644
+--- a/drivers/irqchip/irq-ts4800.c
++++ b/drivers/irqchip/irq-ts4800.c
+@@ -93,7 +93,7 @@ static int ts4800_ic_probe(struct platform_device *pdev)
+ {
+       struct device_node *node = pdev->dev.of_node;
+       struct ts4800_irq_data *data;
+-      struct irq_chip *irq_chip;
++      irq_chip_no_const *irq_chip;
+       struct resource *res;
+       int parent_irq;
+diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
+index 6a2df32..dc962f1 100644
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -81,8 +81,8 @@ struct capiminor {
+       struct capi20_appl      *ap;
+       u32                     ncci;
+-      atomic_t                datahandle;
+-      atomic_t                msgid;
++      atomic_unchecked_t      datahandle;
++      atomic_unchecked_t      msgid;
+       struct tty_port port;
+       int                ttyinstop;
+@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
+               capimsg_setu16(s, 2, mp->ap->applid);
+               capimsg_setu8 (s, 4, CAPI_DATA_B3);
+               capimsg_setu8 (s, 5, CAPI_RESP);
+-              capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
++              capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
+               capimsg_setu32(s, 8, mp->ncci);
+               capimsg_setu16(s, 12, datahandle);
+       }
+@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
+               mp->outbytes -= len;
+               spin_unlock_bh(&mp->outlock);
+-              datahandle = atomic_inc_return(&mp->datahandle);
++              datahandle = atomic_inc_return_unchecked(&mp->datahandle);
+               skb_push(skb, CAPI_DATA_B3_REQ_LEN);
+               memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+               capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+               capimsg_setu16(skb->data, 2, mp->ap->applid);
+               capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
+               capimsg_setu8 (skb->data, 5, CAPI_REQ);
+-              capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
++              capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
+               capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
+               capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
+               capimsg_setu16(skb->data, 16, len);     /* Data length */
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
+index aecec6d..11e13c5 100644
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
+ static const struct gigaset_ops gigops = {
+-      gigaset_write_cmd,
+-      gigaset_write_room,
+-      gigaset_chars_in_buffer,
+-      gigaset_brkchars,
+-      gigaset_init_bchannel,
+-      gigaset_close_bchannel,
+-      gigaset_initbcshw,
+-      gigaset_freebcshw,
+-      gigaset_reinitbcshw,
+-      gigaset_initcshw,
+-      gigaset_freecshw,
+-      gigaset_set_modem_ctrl,
+-      gigaset_baud_rate,
+-      gigaset_set_line_ctrl,
+-      gigaset_isoc_send_skb,
+-      gigaset_isoc_input,
++      .write_cmd = gigaset_write_cmd,
++      .write_room = gigaset_write_room,
++      .chars_in_buffer = gigaset_chars_in_buffer,
++      .brkchars = gigaset_brkchars,
++      .init_bchannel = gigaset_init_bchannel,
++      .close_bchannel = gigaset_close_bchannel,
++      .initbcshw = gigaset_initbcshw,
++      .freebcshw = gigaset_freebcshw,
++      .reinitbcshw = gigaset_reinitbcshw,
++      .initcshw = gigaset_initcshw,
++      .freecshw = gigaset_freecshw,
++      .set_modem_ctrl = gigaset_set_modem_ctrl,
++      .baud_rate = gigaset_baud_rate,
++      .set_line_ctrl = gigaset_set_line_ctrl,
++      .send_skb = gigaset_isoc_send_skb,
++      .handle_input = gigaset_isoc_input,
+ };
+ /* bas_gigaset_init
+diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
+index 600c79b..3752bab 100644
+--- a/drivers/isdn/gigaset/interface.c
++++ b/drivers/isdn/gigaset/interface.c
+@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
+       }
+       tty->driver_data = cs;
+-      ++cs->port.count;
++      atomic_inc(&cs->port.count);
+-      if (cs->port.count == 1) {
++      if (atomic_read(&cs->port.count) == 1) {
+               tty_port_tty_set(&cs->port, tty);
+               cs->port.low_latency = 1;
+       }
+@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
+       if (!cs->connected)
+               gig_dbg(DEBUG_IF, "not connected");     /* nothing to do */
+-      else if (!cs->port.count)
++      else if (!atomic_read(&cs->port.count))
+               dev_warn(cs->dev, "%s: device not opened\n", __func__);
+-      else if (!--cs->port.count)
++      else if (!atomic_dec_return(&cs->port.count))
+               tty_port_tty_set(&cs->port, NULL);
+       mutex_unlock(&cs->mutex);
+diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
+index d1f8ab9..c0412f2 100644
+--- a/drivers/isdn/gigaset/ser-gigaset.c
++++ b/drivers/isdn/gigaset/ser-gigaset.c
+@@ -445,22 +445,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
+ }
+ static const struct gigaset_ops ops = {
+-      gigaset_write_cmd,
+-      gigaset_write_room,
+-      gigaset_chars_in_buffer,
+-      gigaset_brkchars,
+-      gigaset_init_bchannel,
+-      gigaset_close_bchannel,
+-      gigaset_initbcshw,
+-      gigaset_freebcshw,
+-      gigaset_reinitbcshw,
+-      gigaset_initcshw,
+-      gigaset_freecshw,
+-      gigaset_set_modem_ctrl,
+-      gigaset_baud_rate,
+-      gigaset_set_line_ctrl,
+-      gigaset_m10x_send_skb,  /* asyncdata.c */
+-      gigaset_m10x_input,     /* asyncdata.c */
++      .write_cmd = gigaset_write_cmd,
++      .write_room = gigaset_write_room,
++      .chars_in_buffer = gigaset_chars_in_buffer,
++      .brkchars = gigaset_brkchars,
++      .init_bchannel = gigaset_init_bchannel,
++      .close_bchannel = gigaset_close_bchannel,
++      .initbcshw = gigaset_initbcshw,
++      .freebcshw = gigaset_freebcshw,
++      .reinitbcshw = gigaset_reinitbcshw,
++      .initcshw = gigaset_initcshw,
++      .freecshw = gigaset_freecshw,
++      .set_modem_ctrl = gigaset_set_modem_ctrl,
++      .baud_rate = gigaset_baud_rate,
++      .set_line_ctrl = gigaset_set_line_ctrl,
++      .send_skb = gigaset_m10x_send_skb,      /* asyncdata.c */
++      .handle_input = gigaset_m10x_input,     /* asyncdata.c */
+ };
+diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
+index 5f306e2..ff14829 100644
+--- a/drivers/isdn/gigaset/usb-gigaset.c
++++ b/drivers/isdn/gigaset/usb-gigaset.c
+@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+       gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
+       memcpy(cs->hw.usb->bchars, buf, 6);
+       return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
+-                             0, 0, &buf, 6, 2000);
++                             0, 0, cs->hw.usb->bchars, 6, 2000);
+ }
+ static void gigaset_freebcshw(struct bc_state *bcs)
+@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
+ }
+ static const struct gigaset_ops ops = {
+-      gigaset_write_cmd,
+-      gigaset_write_room,
+-      gigaset_chars_in_buffer,
+-      gigaset_brkchars,
+-      gigaset_init_bchannel,
+-      gigaset_close_bchannel,
+-      gigaset_initbcshw,
+-      gigaset_freebcshw,
+-      gigaset_reinitbcshw,
+-      gigaset_initcshw,
+-      gigaset_freecshw,
+-      gigaset_set_modem_ctrl,
+-      gigaset_baud_rate,
+-      gigaset_set_line_ctrl,
+-      gigaset_m10x_send_skb,
+-      gigaset_m10x_input,
++      .write_cmd = gigaset_write_cmd,
++      .write_room = gigaset_write_room,
++      .chars_in_buffer = gigaset_chars_in_buffer,
++      .brkchars = gigaset_brkchars,
++      .init_bchannel = gigaset_init_bchannel,
++      .close_bchannel = gigaset_close_bchannel,
++      .initbcshw = gigaset_initbcshw,
++      .freebcshw = gigaset_freebcshw,
++      .reinitbcshw = gigaset_reinitbcshw,
++      .initcshw = gigaset_initcshw,
++      .freecshw = gigaset_freecshw,
++      .set_modem_ctrl = gigaset_set_modem_ctrl,
++      .baud_rate = gigaset_baud_rate,
++      .set_line_ctrl = gigaset_set_line_ctrl,
++      .send_skb = gigaset_m10x_send_skb,
++      .handle_input = gigaset_m10x_input,
+ };
+ /*
+diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
+index 4d9b195..455075c 100644
+--- a/drivers/isdn/hardware/avm/b1.c
++++ b/drivers/isdn/hardware/avm/b1.c
+@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
+       }
+       if (left) {
+               if (t4file->user) {
+-                      if (copy_from_user(buf, dp, left))
++                      if (left > sizeof buf || copy_from_user(buf, dp, left))
+                               return -EFAULT;
+               } else {
+                       memcpy(buf, dp, left);
+@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
+       }
+       if (left) {
+               if (config->user) {
+-                      if (copy_from_user(buf, dp, left))
++                      if (left > sizeof buf || copy_from_user(buf, dp, left))
+                               return -EFAULT;
+               } else {
+                       memcpy(buf, dp, left);
+diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
+index 7a0bdbd..0a7b7db 100644
+--- a/drivers/isdn/hardware/eicon/capifunc.c
++++ b/drivers/isdn/hardware/eicon/capifunc.c
+@@ -57,7 +57,7 @@ static u16 diva_send_message(struct capi_ctr *,
+                            diva_os_message_buffer_s *);
+ extern void diva_os_set_controller_struct(struct capi_ctr *);
+-extern void DIVA_DIDD_Read(DESCRIPTOR *, int);
++extern void DIVA_DIDD_Read(void *, int);
+ /*
+  * debug
+@@ -1032,7 +1032,6 @@ static void didd_callback(void *context, DESCRIPTOR *adapter, int removal)
+                       stop_dbg();
+               } else {
+                       memcpy(&MAdapter, adapter, sizeof(MAdapter));
+-                      dprintf = (DIVA_DI_PRINTF) MAdapter.request;
+                       DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT);
+               }
+       } else if ((adapter->type > 0) && (adapter->type < 16)) {       /* IDI Adapter */
+@@ -1060,7 +1059,6 @@ static int divacapi_connect_didd(void)
+       for (x = 0; x < MAX_DESCRIPTORS; x++) {
+               if (DIDD_Table[x].type == IDI_DIMAINT) {        /* MAINT found */
+                       memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
+-                      dprintf = (DIVA_DI_PRINTF) MAdapter.request;
+                       DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT);
+                       break;
+               }
+@@ -1072,7 +1070,7 @@ static int divacapi_connect_didd(void)
+                       req.didd_notify.e.Req = 0;
+                       req.didd_notify.e.Rc =
+                               IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
+-                      req.didd_notify.info.callback = (void *)didd_callback;
++                      req.didd_notify.info.callback = didd_callback;
+                       req.didd_notify.info.context = NULL;
+                       DAdapter.request((ENTITY *)&req);
+                       if (req.didd_notify.e.Rc != 0xff) {
+diff --git a/drivers/isdn/hardware/eicon/dadapter.c b/drivers/isdn/hardware/eicon/dadapter.c
+index 5142099..642b7de 100644
+--- a/drivers/isdn/hardware/eicon/dadapter.c
++++ b/drivers/isdn/hardware/eicon/dadapter.c
+@@ -63,10 +63,14 @@ static void no_printf(unsigned char *format, ...)
+    ------------------------------------------------------------------------- */
+ #include "debuglib.c"
++static void IDI_CALL_LINK_T no_request(ENTITY IDI_CALL_ENTITY_T *i)
++{
++}
++
+ static DESCRIPTOR  MAdapter =  {IDI_DIMAINT, /* Adapter Type */
+                               0x00,     /* Channels */
+                               0x0000,    /* Features */
+-                              (IDI_CALL)no_printf};
++                              no_request};
+ /* --------------------------------------------------------------------------
+    DAdapter. Only IDI clients with buffer, that is huge enough to
+    get all descriptors will receive information about DAdapter
+@@ -100,6 +104,11 @@ void diva_didd_load_time_init(void) {
+ void diva_didd_load_time_finit(void) {
+       diva_os_destroy_spin_lock(&didd_spin, "didd");
+ }
++
++static void diva_didd_no_request(ENTITY *e)
++{
++}
++
+ /* --------------------------------------------------------------------------
+    Called in order to register new adapter in adapter array
+    return adapter handle (> 0) on success
+@@ -111,13 +120,12 @@ static int diva_didd_add_descriptor(DESCRIPTOR *d) {
+       if (d->type == IDI_DIMAINT) {
+               if (d->request) {
+                       MAdapter.request = d->request;
+-                      dprintf = (DIVA_DI_PRINTF)d->request;
+                       diva_notify_adapter_change(&MAdapter, 0); /* Inserted */
+                       DBG_TRC(("DIMAINT registered, dprintf=%08x", d->request))
+                               } else {
+                       DBG_TRC(("DIMAINT removed"))
+                               diva_notify_adapter_change(&MAdapter, 1); /* About to remove */
+-                      MAdapter.request = (IDI_CALL)no_printf;
++                      MAdapter.request = diva_didd_no_request;
+                       dprintf = no_printf;
+               }
+               return (NEW_MAX_DESCRIPTORS);
+@@ -149,7 +157,7 @@ static int diva_didd_remove_descriptor(IDI_CALL request) {
+               DBG_TRC(("DIMAINT removed"))
+                       dprintf = no_printf;
+               diva_notify_adapter_change(&MAdapter, 1); /* About to remove */
+-              MAdapter.request = (IDI_CALL)no_printf;
++              MAdapter.request = diva_didd_no_request;
+               return (0);
+       }
+       for (i = 0; (Adapters && (i < NEW_MAX_DESCRIPTORS)); i++) {
+@@ -222,7 +230,7 @@ static void IDI_CALL_LINK_T diva_dadapter_request( \
+       case IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY: {
+               diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info;
+               pinfo->handle = diva_register_adapter_callback(         \
+-                      (didd_adapter_change_callback_t)pinfo->callback,
++                      pinfo->callback,
+                       (void IDI_CALL_ENTITY_T *)pinfo->context);
+               e->Rc = 0xff;
+       } break;
+diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
+index b0b23ed..e3d4e18 100644
+--- a/drivers/isdn/hardware/eicon/diddfunc.c
++++ b/drivers/isdn/hardware/eicon/diddfunc.c
+@@ -28,12 +28,12 @@ static DESCRIPTOR _DAdapter;
+ /*
+  * didd callback function
+  */
+-static void *didd_callback(void *context, DESCRIPTOR *adapter,
++static void didd_callback(void *context, DESCRIPTOR *adapter,
+                          int removal)
+ {
+       if (adapter->type == IDI_DADAPTER) {
+               DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."))
+-                      return (NULL);
++                      return;
+       } else if (adapter->type == IDI_DIMAINT) {
+               if (removal) {
+                       DbgDeregister();
+@@ -41,7 +41,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
+                       DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT);
+               }
+       }
+-      return (NULL);
+ }
+ /*
+@@ -63,7 +62,7 @@ static int __init connect_didd(void)
+                       req.didd_notify.e.Req = 0;
+                       req.didd_notify.e.Rc =
+                               IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
+-                      req.didd_notify.info.callback = (void *)didd_callback;
++                      req.didd_notify.info.callback = didd_callback;
+                       req.didd_notify.info.context = NULL;
+                       _DAdapter.request((ENTITY *)&req);
+                       if (req.didd_notify.e.Rc != 0xff)
+diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
+index 4be5f88..1dbd479 100644
+--- a/drivers/isdn/hardware/eicon/divasfunc.c
++++ b/drivers/isdn/hardware/eicon/divasfunc.c
+@@ -130,12 +130,12 @@ static void stop_dbg(void)
+ /*
+  * didd callback function
+  */
+-static void *didd_callback(void *context, DESCRIPTOR *adapter,
++static void didd_callback(void *context, DESCRIPTOR *adapter,
+                          int removal)
+ {
+       if (adapter->type == IDI_DADAPTER) {
+               DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
+-              return (NULL);
++              return;
+       }
+       if (adapter->type == IDI_DIMAINT) {
+@@ -143,11 +143,9 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
+                       stop_dbg();
+               } else {
+                       memcpy(&MAdapter, adapter, sizeof(MAdapter));
+-                      dprintf = (DIVA_DI_PRINTF) MAdapter.request;
+                       start_dbg();
+               }
+       }
+-      return (NULL);
+ }
+ /*
+@@ -169,7 +167,7 @@ static int __init connect_didd(void)
+                       req.didd_notify.e.Req = 0;
+                       req.didd_notify.e.Rc =
+                               IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
+-                      req.didd_notify.info.callback = (void *)didd_callback;
++                      req.didd_notify.info.callback = didd_callback;
+                       req.didd_notify.info.context = NULL;
+                       DAdapter.request((ENTITY *)&req);
+                       if (req.didd_notify.e.Rc != 0xff) {
+@@ -179,7 +177,6 @@ static int __init connect_didd(void)
+                       notify_handle = req.didd_notify.info.handle;
+               } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */
+                       memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
+-                      dprintf = (DIVA_DI_PRINTF) MAdapter.request;
+                       start_dbg();
+               }
+       }
+diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
+index dd6b53a..42661f6 100644
+--- a/drivers/isdn/hardware/eicon/divasync.h
++++ b/drivers/isdn/hardware/eicon/divasync.h
+@@ -138,7 +138,7 @@ typedef struct _diva_xdi_dma_descriptor_operation {
+ #define IDI_SYNC_REQ_DIDD_GET_CFG_LIB_IFC           0x10
+ typedef struct _diva_didd_adapter_notify {
+       dword handle; /* Notification handle */
+-      void *callback;
++      didd_adapter_change_callback_t callback;
+       void *context;
+ } diva_didd_adapter_notify_t;
+ typedef struct _diva_didd_add_adapter {
+diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
+index fef6586..22353ff 100644
+--- a/drivers/isdn/hardware/eicon/idifunc.c
++++ b/drivers/isdn/hardware/eicon/idifunc.c
+@@ -154,18 +154,17 @@ rescan:
+ /*
+  * DIDD notify callback
+  */
+-static void *didd_callback(void *context, DESCRIPTOR *adapter,
++static void didd_callback(void *context, DESCRIPTOR *adapter,
+                          int removal)
+ {
+       if (adapter->type == IDI_DADAPTER) {
+               DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
+-              return (NULL);
++              return;
+       } else if (adapter->type == IDI_DIMAINT) {
+               if (removal) {
+                       stop_dbg();
+               } else {
+                       memcpy(&MAdapter, adapter, sizeof(MAdapter));
+-                      dprintf = (DIVA_DI_PRINTF) MAdapter.request;
+                       DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT);
+               }
+       } else if ((adapter->type > 0) && (adapter->type < 16)) {       /* IDI Adapter */
+@@ -175,7 +174,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
+                       um_new_card(adapter);
+               }
+       }
+-      return (NULL);
+ }
+ /*
+@@ -197,7 +195,7 @@ static int __init connect_didd(void)
+                       req.didd_notify.e.Req = 0;
+                       req.didd_notify.e.Rc =
+                               IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
+-                      req.didd_notify.info.callback = (void *)didd_callback;
++                      req.didd_notify.info.callback = didd_callback;
+                       req.didd_notify.info.context = NULL;
+                       DAdapter.request((ENTITY *)&req);
+                       if (req.didd_notify.e.Rc != 0xff) {
+@@ -207,7 +205,6 @@ static int __init connect_didd(void)
+                       notify_handle = req.didd_notify.info.handle;
+               } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */
+                       memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
+-                      dprintf = (DIVA_DI_PRINTF) MAdapter.request;
+                       DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT);
+               } else if ((DIDD_Table[x].type > 0)
+                          && (DIDD_Table[x].type < 16)) {      /* IDI Adapter found */
+diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
+index 1cd9aff..3775d52 100644
+--- a/drivers/isdn/hardware/eicon/mntfunc.c
++++ b/drivers/isdn/hardware/eicon/mntfunc.c
+@@ -26,8 +26,13 @@ extern void DIVA_DIDD_Read(void *, int);
+ static dword notify_handle;
+ static DESCRIPTOR DAdapter;
+ static DESCRIPTOR MAdapter;
++
++static void didd_nothing(ENTITY IDI_CALL_ENTITY_T *e)
++{
++      diva_maint_prtComp((char *)e);
++}
+ static DESCRIPTOR MaintDescriptor =
+-{ IDI_DIMAINT, 0, 0, (IDI_CALL) diva_maint_prtComp };
++{ IDI_DIMAINT, 0, 0, didd_nothing };
+ extern int diva_os_copy_to_user(void *os_handle, void __user *dst,
+                               const void *src, int length);
+@@ -44,7 +49,7 @@ static void no_printf(unsigned char *x, ...)
+ /*
+  *  DIDD callback function
+  */
+-static void *didd_callback(void *context, DESCRIPTOR *adapter,
++static void didd_callback(void *context, DESCRIPTOR *adapter,
+                          int removal)
+ {
+       if (adapter->type == IDI_DADAPTER) {
+@@ -56,7 +61,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
+                       dprintf = no_printf;
+               } else {
+                       memcpy(&MAdapter, adapter, sizeof(MAdapter));
+-                      dprintf = (DIVA_DI_PRINTF) MAdapter.request;
+                       DbgRegister("MAINT", DRIVERRELEASE_MNT, DBG_DEFAULT);
+               }
+       } else if ((adapter->type > 0) && (adapter->type < 16)) {
+@@ -66,7 +70,6 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
+                       diva_mnt_add_xdi_adapter(adapter);
+               }
+       }
+-      return (NULL);
+ }
+ /*
+@@ -88,7 +91,7 @@ static int __init connect_didd(void)
+                       req.didd_notify.e.Req = 0;
+                       req.didd_notify.e.Rc =
+                               IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
+-                      req.didd_notify.info.callback = (void *)didd_callback;
++                      req.didd_notify.info.callback = didd_callback;
+                       req.didd_notify.info.context = NULL;
+                       DAdapter.request((ENTITY *)&req);
+                       if (req.didd_notify.e.Rc != 0xff)
+diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
+index 292991c..f36f4cb 100644
+--- a/drivers/isdn/hardware/mISDN/avmfritz.c
++++ b/drivers/isdn/hardware/mISDN/avmfritz.c
+@@ -156,7 +156,7 @@ _set_debug(struct fritzcard *card)
+ }
+ static int
+-set_debug(const char *val, struct kernel_param *kp)
++set_debug(const char *val, const struct kernel_param *kp)
+ {
+       int ret;
+       struct fritzcard *card;
+diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
+index 28543d7..bd8cf91 100644
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
+@@ -2856,8 +2856,9 @@ irq_notforus:
+  */
+ static void
+-hfcmulti_dbusy_timer(struct hfc_multi *hc)
++hfcmulti_dbusy_timer(unsigned long _hc)
+ {
++      //struct hfc_multi *hc = (struct hfc_multi *)_hc;
+ }
+@@ -3878,7 +3879,7 @@ hfcmulti_initmode(struct dchannel *dch)
+               if (hc->dnum[pt]) {
+                       mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
+                                     -1, 0, -1, 0);
+-                      dch->timer.function = (void *) hfcmulti_dbusy_timer;
++                      dch->timer.function = hfcmulti_dbusy_timer;
+                       dch->timer.data = (long) dch;
+                       init_timer(&dch->timer);
+               }
+@@ -3986,7 +3987,7 @@ hfcmulti_initmode(struct dchannel *dch)
+               hc->chan[i].slot_rx = -1;
+               hc->chan[i].conf = -1;
+               mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
+-              dch->timer.function = (void *) hfcmulti_dbusy_timer;
++              dch->timer.function = hfcmulti_dbusy_timer;
+               dch->timer.data = (long) dch;
+               init_timer(&dch->timer);
+               hc->chan[i - 2].slot_tx = -1;
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
+index ff48da6..497fb7b 100644
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
+@@ -301,8 +301,9 @@ reset_hfcpci(struct hfc_pci *hc)
+  * Timer function called when kernel timer expires
+  */
+ static void
+-hfcpci_Timer(struct hfc_pci *hc)
++hfcpci_Timer(unsigned long _hc)
+ {
++      struct hfc_pci *hc = (struct hfc_pci *)_hc;
+       hc->hw.timer.expires = jiffies + 75;
+       /* WD RESET */
+ /*
+@@ -1241,8 +1242,9 @@ hfcpci_int(int intno, void *dev_id)
+  * timer callback for D-chan busy resolution. Currently no function
+  */
+ static void
+-hfcpci_dbusy_timer(struct hfc_pci *hc)
++hfcpci_dbusy_timer(unsigned long _hc)
+ {
++//    struct hfc_pci *hc = (struct hfc_pci *)_hc;
+ }
+ /*
+@@ -1717,7 +1719,7 @@ static void
+ inithfcpci(struct hfc_pci *hc)
+ {
+       printk(KERN_DEBUG "inithfcpci: entered\n");
+-      hc->dch.timer.function = (void *) hfcpci_dbusy_timer;
++      hc->dch.timer.function = hfcpci_dbusy_timer;
+       hc->dch.timer.data = (long) &hc->dch;
+       init_timer(&hc->dch.timer);
+       hc->chanlimit = 2;
+@@ -2044,7 +2046,7 @@ setup_hw(struct hfc_pci *hc)
+       Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
+       /* At this point the needed PCI config is done */
+       /* fifos are still not enabled */
+-      hc->hw.timer.function = (void *) hfcpci_Timer;
++      hc->hw.timer.function = hfcpci_Timer;
+       hc->hw.timer.data = (long) hc;
+       init_timer(&hc->hw.timer);
+       /* default PCM master */
+@@ -2293,9 +2295,9 @@ _hfcpci_softirq(struct device *dev, void *arg)
+ }
+ static void
+-hfcpci_softirq(void *arg)
++hfcpci_softirq(unsigned long arg)
+ {
+-      WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
++      WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, (void *)arg,
+                                     _hfcpci_softirq) != 0);
+       /* if next event would be in the past ... */
+@@ -2330,7 +2332,7 @@ HFC_init(void)
+       if (poll != HFCPCI_BTRANS_THRESHOLD) {
+               printk(KERN_INFO "%s: Using alternative poll value of %d\n",
+                      __func__, poll);
+-              hfc_tl.function = (void *)hfcpci_softirq;
++              hfc_tl.function = hfcpci_softirq;
+               hfc_tl.data = 0;
+               init_timer(&hfc_tl);
+               hfc_tl.expires = jiffies + tics;
+diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+index d5bdbaf..a7cdc61 100644
+--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
++++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+@@ -244,7 +244,7 @@ _set_debug(struct inf_hw *card)
+ }
+ static int
+-set_debug(const char *val, struct kernel_param *kp)
++set_debug(const char *val, const struct kernel_param *kp)
+ {
+       int ret;
+       struct inf_hw *card;
+@@ -586,9 +586,10 @@ reset_inf(struct inf_hw *hw)
+ }
+ static int
+-inf_ctrl(struct inf_hw *hw, u32 cmd, u_long arg)
++inf_ctrl(struct ipac_hw *_hw, u32 cmd, u_long arg)
+ {
+       int ret = 0;
++      struct inf_hw *hw = container_of(_hw, struct inf_hw, ipac);
+       switch (cmd) {
+       case HW_RESET_REQ:
+@@ -915,7 +916,7 @@ setup_instance(struct inf_hw *card)
+       spin_lock_init(&card->lock);
+       card->ipac.isac.hwlock = &card->lock;
+       card->ipac.hwlock = &card->lock;
+-      card->ipac.ctrl = (void *)&inf_ctrl;
++      card->ipac.ctrl = &inf_ctrl;
+       err = setup_io(card);
+       if (err)
+diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
+index aa9b6c3..ffd3257 100644
+--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
++++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
+@@ -727,8 +727,9 @@ isac_release(struct isac_hw *isac)
+ }
+ static void
+-dbusy_timer_handler(struct isac_hw *isac)
++dbusy_timer_handler(unsigned long _isac)
+ {
++      struct isac_hw *isac = (struct isac_hw *)_isac;
+       int rbch, star;
+       u_long flags;
+@@ -796,7 +797,7 @@ isac_init(struct isac_hw *isac)
+       }
+       isac->mon_tx = NULL;
+       isac->mon_rx = NULL;
+-      isac->dch.timer.function = (void *) dbusy_timer_handler;
++      isac->dch.timer.function = dbusy_timer_handler;
+       isac->dch.timer.data = (long)isac;
+       init_timer(&isac->dch.timer);
+       isac->mocr = 0xaa;
+diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
+index afde4ed..e9fcae4 100644
+--- a/drivers/isdn/hardware/mISDN/netjet.c
++++ b/drivers/isdn/hardware/mISDN/netjet.c
+@@ -111,7 +111,7 @@ _set_debug(struct tiger_hw *card)
+ }
+ static int
+-set_debug(const char *val, struct kernel_param *kp)
++set_debug(const char *val, const struct kernel_param *kp)
+ {
+       int ret;
+       struct tiger_hw *card;
+diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
+index 9815bb4..3d6181e 100644
+--- a/drivers/isdn/hardware/mISDN/speedfax.c
++++ b/drivers/isdn/hardware/mISDN/speedfax.c
+@@ -94,7 +94,7 @@ _set_debug(struct sfax_hw *card)
+ }
+ static int
+-set_debug(const char *val, struct kernel_param *kp)
++set_debug(const char *val, const struct kernel_param *kp)
+ {
+       int ret;
+       struct sfax_hw *card;
+@@ -186,9 +186,10 @@ reset_speedfax(struct sfax_hw *sf)
+ }
+ static int
+-sfax_ctrl(struct sfax_hw  *sf, u32 cmd, u_long arg)
++sfax_ctrl(void *_sf, u32 cmd, u_long arg)
+ {
+       int ret = 0;
++      struct sfax_hw *sf = (struct sfax_hw *)_sf;
+       switch (cmd) {
+       case HW_RESET_REQ:
+@@ -386,7 +387,7 @@ setup_instance(struct sfax_hw *card)
+       spin_lock_init(&card->lock);
+       card->isac.hwlock = &card->lock;
+       card->isar.hwlock = &card->lock;
+-      card->isar.ctrl = (void *)&sfax_ctrl;
++      card->isar.ctrl = &sfax_ctrl;
+       card->isac.name = card->name;
+       card->isar.name = card->name;
+       card->isar.owner = THIS_MODULE;
+diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
+index 7416755..2914e7c 100644
+--- a/drivers/isdn/hardware/mISDN/w6692.c
++++ b/drivers/isdn/hardware/mISDN/w6692.c
+@@ -101,7 +101,7 @@ _set_debug(struct w6692_hw *card)
+ }
+ static int
+-set_debug(const char *val, struct kernel_param *kp)
++set_debug(const char *val, const struct kernel_param *kp)
+ {
+       int ret;
+       struct w6692_hw *card;
+@@ -819,8 +819,9 @@ w6692_irq(int intno, void *dev_id)
+ }
+ static void
+-dbusy_timer_handler(struct dchannel *dch)
++dbusy_timer_handler(unsigned long _dch)
+ {
++      struct dchannel *dch = (struct dchannel *)_dch;
+       struct w6692_hw *card = dch->hw;
+       int             rbch, star;
+       u_long          flags;
+@@ -852,7 +853,7 @@ void initW6692(struct w6692_hw *card)
+ {
+       u8      val;
+-      card->dch.timer.function = (void *)dbusy_timer_handler;
++      card->dch.timer.function = dbusy_timer_handler;
+       card->dch.timer.data = (u_long)&card->dch;
+       init_timer(&card->dch.timer);
+       w6692_mode(&card->bc[0], ISDN_P_NONE);
+diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
+index 36817e0..b02bb98 100644
+--- a/drivers/isdn/hisax/amd7930_fn.c
++++ b/drivers/isdn/hisax/amd7930_fn.c
+@@ -685,8 +685,9 @@ DC_Close_Amd7930(struct IsdnCardState *cs) {
+ static void
+-dbusy_timer_handler(struct IsdnCardState *cs)
++dbusy_timer_handler(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       u_long flags;
+       struct PStack *stptr;
+       WORD dtcr, der;
+@@ -789,7 +790,7 @@ void Amd7930_init(struct IsdnCardState *cs)
+ void setup_Amd7930(struct IsdnCardState *cs)
+ {
+       INIT_WORK(&cs->tqueue, Amd7930_bh);
+-      cs->dbusytimer.function = (void *) dbusy_timer_handler;
++      cs->dbusytimer.function = dbusy_timer_handler;
+       cs->dbusytimer.data = (long) cs;
+       init_timer(&cs->dbusytimer);
+ }
+diff --git a/drivers/isdn/hisax/arcofi.c b/drivers/isdn/hisax/arcofi.c
+index 29ec2df..9c7123c 100644
+--- a/drivers/isdn/hisax/arcofi.c
++++ b/drivers/isdn/hisax/arcofi.c
+@@ -112,7 +112,8 @@ arcofi_fsm(struct IsdnCardState *cs, int event, void *data) {
+ }
+ static void
+-arcofi_timer(struct IsdnCardState *cs) {
++arcofi_timer(unsigned long _cs) {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       arcofi_fsm(cs, ARCOFI_TIMEOUT, NULL);
+ }
+@@ -125,7 +126,7 @@ clear_arcofi(struct IsdnCardState *cs) {
+ void
+ init_arcofi(struct IsdnCardState *cs) {
+-      cs->dc.isac.arcofitimer.function = (void *) arcofi_timer;
++      cs->dc.isac.arcofitimer.function = arcofi_timer;
+       cs->dc.isac.arcofitimer.data = (long) cs;
+       init_timer(&cs->dc.isac.arcofitimer);
+       init_waitqueue_head(&cs->dc.isac.arcofi_wait);
+diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
+index bf04d2a..a7d53c9 100644
+--- a/drivers/isdn/hisax/config.c
++++ b/drivers/isdn/hisax/config.c
+@@ -659,7 +659,7 @@ int jiftime(char *s, long mark)
+ static u_char tmpbuf[HISAX_STATUS_BUFSIZE];
+-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
++void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt,
+                     va_list args)
+ {
+       /* if head == NULL the fmt contains the full info */
+@@ -729,7 +729,7 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
+       }
+ }
+-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...)
++void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...)
+ {
+       va_list args;
+diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
+index 4fc90de..fda68cd 100644
+--- a/drivers/isdn/hisax/diva.c
++++ b/drivers/isdn/hisax/diva.c
+@@ -796,8 +796,9 @@ reset_diva(struct IsdnCardState *cs)
+ #define DIVA_ASSIGN 1
+ static void
+-diva_led_handler(struct IsdnCardState *cs)
++diva_led_handler(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       int blink = 0;
+       if ((cs->subtyp == DIVA_IPAC_ISA) ||
+@@ -898,7 +899,7 @@ Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg)
+           (cs->subtyp != DIVA_IPAC_PCI) &&
+           (cs->subtyp != DIVA_IPACX_PCI)) {
+               spin_lock_irqsave(&cs->lock, flags);
+-              diva_led_handler(cs);
++              diva_led_handler((unsigned long)cs);
+               spin_unlock_irqrestore(&cs->lock, flags);
+       }
+       return (0);
+@@ -976,7 +977,7 @@ static int setup_diva_common(struct IsdnCardState *cs)
+               printk(KERN_INFO "Diva: IPACX Design Id: %x\n",
+                      MemReadISAC_IPACX(cs, IPACX_ID) & 0x3F);
+       } else { /* DIVA 2.0 */
+-              cs->hw.diva.tl.function = (void *) diva_led_handler;
++              cs->hw.diva.tl.function = diva_led_handler;
+               cs->hw.diva.tl.data = (long) cs;
+               init_timer(&cs->hw.diva.tl);
+               cs->readisac  = &ReadISAC;
+diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
+index d8ef64d..9c50267 100644
+--- a/drivers/isdn/hisax/elsa.c
++++ b/drivers/isdn/hisax/elsa.c
+@@ -606,8 +606,9 @@ check_arcofi(struct IsdnCardState *cs)
+ #endif /* ARCOFI_USE */
+ static void
+-elsa_led_handler(struct IsdnCardState *cs)
++elsa_led_handler(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       int blink = 0;
+       if (cs->subtyp == ELSA_PCMCIA || cs->subtyp == ELSA_PCMCIA_IPAC)
+@@ -715,7 +716,7 @@ Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg)
+                       init_modem(cs);
+               }
+ #endif
+-              elsa_led_handler(cs);
++              elsa_led_handler((unsigned long)cs);
+               return (ret);
+       case (MDL_REMOVE | REQUEST):
+               cs->hw.elsa.status &= 0;
+@@ -767,7 +768,7 @@ Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg)
+               else
+                       cs->hw.elsa.status &= ~ELSA_BAD_PWR;
+       }
+-      elsa_led_handler(cs);
++      elsa_led_handler((unsigned long)cs);
+       return (ret);
+ }
+@@ -1147,7 +1148,7 @@ static int setup_elsa_common(struct IsdnCard *card)
+       init_arcofi(cs);
+ #endif
+       setup_isac(cs);
+-      cs->hw.elsa.tl.function = (void *) elsa_led_handler;
++      cs->hw.elsa.tl.function = elsa_led_handler;
+       cs->hw.elsa.tl.data = (long) cs;
+       init_timer(&cs->hw.elsa.tl);
+       /* Teste Timer */
+diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
+index c7a9471..5409bd3 100644
+--- a/drivers/isdn/hisax/fsm.c
++++ b/drivers/isdn/hisax/fsm.c
+@@ -85,8 +85,9 @@ FsmChangeState(struct FsmInst *fi, int newstate)
+ }
+ static void
+-FsmExpireTimer(struct FsmTimer *ft)
++FsmExpireTimer(unsigned long _ft)
+ {
++      struct FsmTimer *ft = (struct FsmTimer *)_ft;
+ #if FSM_TIMER_DEBUG
+       if (ft->fi->debug)
+               ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
+@@ -98,7 +99,7 @@ void
+ FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
+ {
+       ft->fi = fi;
+-      ft->tl.function = (void *) FsmExpireTimer;
++      ft->tl.function = FsmExpireTimer;
+       ft->tl.data = (long) ft;
+ #if FSM_TIMER_DEBUG
+       if (ft->fi->debug)
+diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
+index 9600cd7..86ca5a3 100644
+--- a/drivers/isdn/hisax/hfc4s8s_l1.c
++++ b/drivers/isdn/hisax/hfc4s8s_l1.c
+@@ -299,8 +299,9 @@ Read_hfc16_stable(hfc4s8s_hw *hw, int reg)
+ /* D-channel call from HiSax */
+ /*****************************/
+ static void
+-dch_l2l1(struct hisax_d_if *iface, int pr, void *arg)
++dch_l2l1(struct hisax_if *_iface, int pr, void *arg)
+ {
++      struct hisax_d_if *iface = container_of(_iface, struct hisax_d_if, ifc);
+       struct hfc4s8s_l1 *l1 = iface->ifc.priv;
+       struct sk_buff *skb = (struct sk_buff *) arg;
+       u_long flags;
+@@ -591,8 +592,9 @@ bch_l2l1(struct hisax_if *ifc, int pr, void *arg)
+ /* layer 1 timer function */
+ /**************************/
+ static void
+-hfc_l1_timer(struct hfc4s8s_l1 *l1)
++hfc_l1_timer(unsigned long _l1)
+ {
++      struct hfc4s8s_l1 *l1 = (struct hfc4s8s_l1 *)_l1;
+       u_long flags;
+       if (!l1->enabled)
+@@ -1396,16 +1398,16 @@ setup_instance(hfc4s8s_hw *hw)
+               l1p = hw->l1 + i;
+               spin_lock_init(&l1p->lock);
+               l1p->hw = hw;
+-              l1p->l1_timer.function = (void *) hfc_l1_timer;
++              l1p->l1_timer.function = hfc_l1_timer;
+               l1p->l1_timer.data = (long) (l1p);
+               init_timer(&l1p->l1_timer);
+               l1p->st_num = i;
+               skb_queue_head_init(&l1p->d_tx_queue);
+               l1p->d_if.ifc.priv = hw->l1 + i;
+-              l1p->d_if.ifc.l2l1 = (void *) dch_l2l1;
++              l1p->d_if.ifc.l2l1 = dch_l2l1;
+               spin_lock_init(&l1p->b_ch[0].lock);
+-              l1p->b_ch[0].b_if.ifc.l2l1 = (void *) bch_l2l1;
++              l1p->b_ch[0].b_if.ifc.l2l1 = bch_l2l1;
+               l1p->b_ch[0].b_if.ifc.priv = (void *) &l1p->b_ch[0];
+               l1p->b_ch[0].l1p = hw->l1 + i;
+               l1p->b_ch[0].bchan = 1;
+@@ -1413,7 +1415,7 @@ setup_instance(hfc4s8s_hw *hw)
+               skb_queue_head_init(&l1p->b_ch[0].tx_queue);
+               spin_lock_init(&l1p->b_ch[1].lock);
+-              l1p->b_ch[1].b_if.ifc.l2l1 = (void *) bch_l2l1;
++              l1p->b_ch[1].b_if.ifc.l2l1 = bch_l2l1;
+               l1p->b_ch[1].b_if.ifc.priv = (void *) &l1p->b_ch[1];
+               l1p->b_ch[1].l1p = hw->l1 + i;
+               l1p->b_ch[1].bchan = 2;
+diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
+index a756e5c..e4789ba 100644
+--- a/drivers/isdn/hisax/hfc_2bds0.c
++++ b/drivers/isdn/hisax/hfc_2bds0.c
+@@ -1014,7 +1014,7 @@ setstack_hfcd(struct PStack *st, struct IsdnCardState *cs)
+ }
+ static void
+-hfc_dbusy_timer(struct IsdnCardState *cs)
++hfc_dbusy_timer(unsigned long _cs)
+ {
+ }
+@@ -1073,7 +1073,7 @@ set_cs_func(struct IsdnCardState *cs)
+       cs->writeisacfifo = &dummyf;
+       cs->BC_Read_Reg = &ReadReg;
+       cs->BC_Write_Reg = &WriteReg;
+-      cs->dbusytimer.function = (void *) hfc_dbusy_timer;
++      cs->dbusytimer.function = hfc_dbusy_timer;
+       cs->dbusytimer.data = (long) cs;
+       init_timer(&cs->dbusytimer);
+       INIT_WORK(&cs->tqueue, hfcd_bh);
+diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
+index 90449e1..9a5394c 100644
+--- a/drivers/isdn/hisax/hfc_pci.c
++++ b/drivers/isdn/hisax/hfc_pci.c
+@@ -165,8 +165,9 @@ reset_hfcpci(struct IsdnCardState *cs)
+ /* Timer function called when kernel timer expires */
+ /***************************************************/
+ static void
+-hfcpci_Timer(struct IsdnCardState *cs)
++hfcpci_Timer(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       cs->hw.hfcpci.timer.expires = jiffies + 75;
+       /* WD RESET */
+ /*      WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
+@@ -1095,8 +1096,9 @@ hfcpci_interrupt(int intno, void *dev_id)
+ /* timer callback for D-chan busy resolution. Currently no function */
+ /********************************************************************/
+ static void
+-hfcpci_dbusy_timer(struct IsdnCardState *cs)
++hfcpci_dbusy_timer(unsigned long _cs)
+ {
++      //struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+ }
+ /*************************************/
+@@ -1582,7 +1584,7 @@ inithfcpci(struct IsdnCardState *cs)
+       cs->bcs[1].BC_SetStack = setstack_2b;
+       cs->bcs[0].BC_Close = close_hfcpci;
+       cs->bcs[1].BC_Close = close_hfcpci;
+-      cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
++      cs->dbusytimer.function = hfcpci_dbusy_timer;
+       cs->dbusytimer.data = (long) cs;
+       init_timer(&cs->dbusytimer);
+       mode_hfcpci(cs->bcs, 0, 0);
+@@ -1746,7 +1748,7 @@ setup_hfcpci(struct IsdnCard *card)
+       cs->BC_Write_Reg = NULL;
+       cs->irq_func = &hfcpci_interrupt;
+       cs->irq_flags |= IRQF_SHARED;
+-      cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
++      cs->hw.hfcpci.timer.function = hfcpci_Timer;
+       cs->hw.hfcpci.timer.data = (long) cs;
+       init_timer(&cs->hw.hfcpci.timer);
+       cs->cardmsg = &hfcpci_card_msg;
+diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
+index 13b2151..d3e0732 100644
+--- a/drivers/isdn/hisax/hfc_sx.c
++++ b/drivers/isdn/hisax/hfc_sx.c
+@@ -418,8 +418,9 @@ reset_hfcsx(struct IsdnCardState *cs)
+ /* Timer function called when kernel timer expires */
+ /***************************************************/
+ static void
+-hfcsx_Timer(struct IsdnCardState *cs)
++hfcsx_Timer(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       cs->hw.hfcsx.timer.expires = jiffies + 75;
+       /* WD RESET */
+ /*      WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcsx.ctmt | 0x80);
+@@ -860,8 +861,9 @@ hfcsx_interrupt(int intno, void *dev_id)
+ /* timer callback for D-chan busy resolution. Currently no function */
+ /********************************************************************/
+ static void
+-hfcsx_dbusy_timer(struct IsdnCardState *cs)
++hfcsx_dbusy_timer(unsigned long _cs)
+ {
++      //struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+ }
+ /*************************************/
+@@ -1495,7 +1497,7 @@ int setup_hfcsx(struct IsdnCard *card)
+       } else
+               return (0);     /* no valid card type */
+-      cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
++      cs->dbusytimer.function = hfcsx_dbusy_timer;
+       cs->dbusytimer.data = (long) cs;
+       init_timer(&cs->dbusytimer);
+       INIT_WORK(&cs->tqueue, hfcsx_bh);
+@@ -1507,7 +1509,7 @@ int setup_hfcsx(struct IsdnCard *card)
+       cs->BC_Write_Reg = NULL;
+       cs->irq_func = &hfcsx_interrupt;
+-      cs->hw.hfcsx.timer.function = (void *) hfcsx_Timer;
++      cs->hw.hfcsx.timer.function = hfcsx_Timer;
+       cs->hw.hfcsx.timer.data = (long) cs;
+       cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */
+       cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */
+diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
+index 678bd52..1c4f12a 100644
+--- a/drivers/isdn/hisax/hfc_usb.c
++++ b/drivers/isdn/hisax/hfc_usb.c
+@@ -343,8 +343,10 @@ handle_led(hfcusb_data *hfc, int event)
+ /* ISDN l1 timer T3 expires */
+ static void
+-l1_timer_expire_t3(hfcusb_data *hfc)
++l1_timer_expire_t3(unsigned long _hfc)
+ {
++      hfcusb_data *hfc = (hfcusb_data *)_hfc;
++
+       hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION,
+                          NULL);
+@@ -360,8 +362,10 @@ l1_timer_expire_t3(hfcusb_data *hfc)
+ /* ISDN l1 timer T4 expires */
+ static void
+-l1_timer_expire_t4(hfcusb_data *hfc)
++l1_timer_expire_t4(unsigned long _hfc)
+ {
++      hfcusb_data *hfc = (hfcusb_data *)_hfc;
++
+       hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION,
+                          NULL);
+@@ -1167,12 +1171,12 @@ hfc_usb_init(hfcusb_data *hfc)
+       /* init the t3 timer */
+       init_timer(&hfc->t3_timer);
+       hfc->t3_timer.data = (long) hfc;
+-      hfc->t3_timer.function = (void *) l1_timer_expire_t3;
++      hfc->t3_timer.function = l1_timer_expire_t3;
+       /* init the t4 timer */
+       init_timer(&hfc->t4_timer);
+       hfc->t4_timer.data = (long) hfc;
+-      hfc->t4_timer.function = (void *) l1_timer_expire_t4;
++      hfc->t4_timer.function = l1_timer_expire_t4;
+       /* init the background machinery for control requests */
+       hfc->ctrl_read.bRequestType = 0xc0;
+diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
+index 394da64..85f5f63 100644
+--- a/drivers/isdn/hisax/hfcscard.c
++++ b/drivers/isdn/hisax/hfcscard.c
+@@ -41,8 +41,10 @@ hfcs_interrupt(int intno, void *dev_id)
+ }
+ static void
+-hfcs_Timer(struct IsdnCardState *cs)
++hfcs_Timer(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
++
+       cs->hw.hfcD.timer.expires = jiffies + 75;
+       /* WD RESET */
+ /*    WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt | 0x80);
+@@ -253,7 +255,7 @@ int setup_hfcs(struct IsdnCard *card)
+               outb(0x57, cs->hw.hfcD.addr | 1);
+       }
+       set_cs_func(cs);
+-      cs->hw.hfcD.timer.function = (void *) hfcs_Timer;
++      cs->hw.hfcD.timer.function = hfcs_Timer;
+       cs->hw.hfcD.timer.data = (long) cs;
+       init_timer(&cs->hw.hfcD.timer);
+       cs->cardmsg = &hfcs_card_msg;
+diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
+index 6ead6314..338d040 100644
+--- a/drivers/isdn/hisax/hisax.h
++++ b/drivers/isdn/hisax/hisax.h
+@@ -1288,9 +1288,9 @@ int jiftime(char *s, long mark);
+ int HiSax_command(isdn_ctrl *ic);
+ int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
+ __printf(3, 4)
+-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
++void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...);
+ __printf(3, 0)
+-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
++void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, va_list args);
+ void HiSax_reportcard(int cardnr, int sel);
+ int QuickHex(char *txt, u_char *p, int cnt);
+ void LogFrame(struct IsdnCardState *cs, u_char *p, int size);
+diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
+index 96d1df0..77a05ee 100644
+--- a/drivers/isdn/hisax/icc.c
++++ b/drivers/isdn/hisax/icc.c
+@@ -580,8 +580,9 @@ DC_Close_icc(struct IsdnCardState *cs) {
+ }
+ static void
+-dbusy_timer_handler(struct IsdnCardState *cs)
++dbusy_timer_handler(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       struct PStack *stptr;
+       int     rbch, star;
+@@ -676,7 +677,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
+ void setup_icc(struct IsdnCardState *cs)
+ {
+       INIT_WORK(&cs->tqueue, icc_bh);
+-      cs->dbusytimer.function = (void *) dbusy_timer_handler;
++      cs->dbusytimer.function = dbusy_timer_handler;
+       cs->dbusytimer.data = (long) cs;
+       init_timer(&cs->dbusytimer);
+ }
+diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
+index 9cc26b4..d7fa044 100644
+--- a/drivers/isdn/hisax/ipacx.c
++++ b/drivers/isdn/hisax/ipacx.c
+@@ -35,7 +35,7 @@
+ static void ph_command(struct IsdnCardState *cs, unsigned int command);
+ static inline void cic_int(struct IsdnCardState *cs);
+ static void dch_l2l1(struct PStack *st, int pr, void *arg);
+-static void dbusy_timer_handler(struct IsdnCardState *cs);
++static void dbusy_timer_handler(unsigned long _cs);
+ static void dch_empty_fifo(struct IsdnCardState *cs, int count);
+ static void dch_fill_fifo(struct IsdnCardState *cs);
+ static inline void dch_int(struct IsdnCardState *cs);
+@@ -198,8 +198,9 @@ dch_l2l1(struct PStack *st, int pr, void *arg)
+ //----------------------------------------------------------
+ //----------------------------------------------------------
+ static void
+-dbusy_timer_handler(struct IsdnCardState *cs)
++dbusy_timer_handler(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       struct PStack *st;
+       int     rbchd, stard;
+@@ -424,7 +425,7 @@ dch_init(struct IsdnCardState *cs)
+       cs->setstack_d      = dch_setstack;
+-      cs->dbusytimer.function = (void *) dbusy_timer_handler;
++      cs->dbusytimer.function = dbusy_timer_handler;
+       cs->dbusytimer.data = (long) cs;
+       init_timer(&cs->dbusytimer);
+diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
+index df7e05c..0f7dca1 100644
+--- a/drivers/isdn/hisax/isac.c
++++ b/drivers/isdn/hisax/isac.c
+@@ -584,8 +584,9 @@ DC_Close_isac(struct IsdnCardState *cs)
+ }
+ static void
+-dbusy_timer_handler(struct IsdnCardState *cs)
++dbusy_timer_handler(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       struct PStack *stptr;
+       int     rbch, star;
+@@ -677,7 +678,7 @@ void clear_pending_isac_ints(struct IsdnCardState *cs)
+ void setup_isac(struct IsdnCardState *cs)
+ {
+       INIT_WORK(&cs->tqueue, isac_bh);
+-      cs->dbusytimer.function = (void *) dbusy_timer_handler;
++      cs->dbusytimer.function = dbusy_timer_handler;
+       cs->dbusytimer.data = (long) cs;
+       init_timer(&cs->dbusytimer);
+ }
+diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
+index f4956c7..122d249 100644
+--- a/drivers/isdn/hisax/isar.c
++++ b/drivers/isdn/hisax/isar.c
+@@ -1267,7 +1267,8 @@ isar_int_main(struct IsdnCardState *cs)
+ }
+ static void
+-ftimer_handler(struct BCState *bcs) {
++ftimer_handler(unsigned long _bcs) {
++      struct BCState *bcs = (struct BCState *)_bcs;
+       if (bcs->cs->debug)
+               debugl1(bcs->cs, "ftimer flags %04lx",
+                       bcs->Flag);
+@@ -1902,7 +1903,7 @@ void initisar(struct IsdnCardState *cs)
+       cs->bcs[1].BC_SetStack = setstack_isar;
+       cs->bcs[0].BC_Close = close_isarstate;
+       cs->bcs[1].BC_Close = close_isarstate;
+-      cs->bcs[0].hw.isar.ftimer.function = (void *) ftimer_handler;
++      cs->bcs[0].hw.isar.ftimer.function = ftimer_handler;
+       cs->bcs[0].hw.isar.ftimer.data = (long) &cs->bcs[0];
+       init_timer(&cs->bcs[0].hw.isar.ftimer);
+       cs->bcs[1].hw.isar.ftimer.function = (void *) ftimer_handler;
+diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
+index c754706..8b1ffd5 100644
+--- a/drivers/isdn/hisax/isdnl3.c
++++ b/drivers/isdn/hisax/isdnl3.c
+@@ -160,8 +160,9 @@ newl3state(struct l3_process *pc, int state)
+ }
+ static void
+-L3ExpireTimer(struct L3Timer *t)
++L3ExpireTimer(unsigned long _t)
+ {
++      struct L3Timer *t = (struct L3Timer *)_t;
+       t->pc->st->lli.l4l3(t->pc->st, t->event, t->pc);
+ }
+@@ -169,7 +170,7 @@ void
+ L3InitTimer(struct l3_process *pc, struct L3Timer *t)
+ {
+       t->pc = pc;
+-      t->tl.function = (void *) L3ExpireTimer;
++      t->tl.function = L3ExpireTimer;
+       t->tl.data = (long) t;
+       init_timer(&t->tl);
+ }
+diff --git a/drivers/isdn/hisax/saphir.c b/drivers/isdn/hisax/saphir.c
+index 6b2d0ec..4bf5a9e 100644
+--- a/drivers/isdn/hisax/saphir.c
++++ b/drivers/isdn/hisax/saphir.c
+@@ -159,8 +159,9 @@ Start_ISAC:
+ }
+ static void
+-SaphirWatchDog(struct IsdnCardState *cs)
++SaphirWatchDog(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       u_long flags;
+       spin_lock_irqsave(&cs->lock, flags);
+@@ -268,7 +269,7 @@ int setup_saphir(struct IsdnCard *card)
+              cs->irq, cs->hw.saphir.cfg_reg);
+       setup_isac(cs);
+-      cs->hw.saphir.timer.function = (void *) SaphirWatchDog;
++      cs->hw.saphir.timer.function = SaphirWatchDog;
+       cs->hw.saphir.timer.data = (long) cs;
+       init_timer(&cs->hw.saphir.timer);
+       cs->hw.saphir.timer.expires = jiffies + 4 * HZ;
+diff --git a/drivers/isdn/hisax/teleint.c b/drivers/isdn/hisax/teleint.c
+index bf64754..e2a3709 100644
+--- a/drivers/isdn/hisax/teleint.c
++++ b/drivers/isdn/hisax/teleint.c
+@@ -179,8 +179,9 @@ Start_ISAC:
+ }
+ static void
+-TeleInt_Timer(struct IsdnCardState *cs)
++TeleInt_Timer(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       int stat = 0;
+       u_long flags;
+@@ -278,7 +279,7 @@ int setup_TeleInt(struct IsdnCard *card)
+       cs->bcs[0].hw.hfc.send = NULL;
+       cs->bcs[1].hw.hfc.send = NULL;
+       cs->hw.hfc.fifosize = 7 * 1024 + 512;
+-      cs->hw.hfc.timer.function = (void *) TeleInt_Timer;
++      cs->hw.hfc.timer.function = TeleInt_Timer;
+       cs->hw.hfc.timer.data = (long) cs;
+       init_timer(&cs->hw.hfc.timer);
+       if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) {
+diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
+index a858955..908285b 100644
+--- a/drivers/isdn/hisax/w6692.c
++++ b/drivers/isdn/hisax/w6692.c
+@@ -681,8 +681,9 @@ DC_Close_W6692(struct IsdnCardState *cs)
+ }
+ static void
+-dbusy_timer_handler(struct IsdnCardState *cs)
++dbusy_timer_handler(unsigned long _cs)
+ {
++      struct IsdnCardState *cs = (struct IsdnCardState *)_cs;
+       struct PStack *stptr;
+       int rbch, star;
+       u_long flags;
+@@ -901,7 +902,7 @@ static void initW6692(struct IsdnCardState *cs, int part)
+       if (part & 1) {
+               cs->setstack_d = setstack_W6692;
+               cs->DC_Close = DC_Close_W6692;
+-              cs->dbusytimer.function = (void *) dbusy_timer_handler;
++              cs->dbusytimer.function = dbusy_timer_handler;
+               cs->dbusytimer.data = (long) cs;
+               init_timer(&cs->dbusytimer);
+               resetW6692(cs);
+diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
+index 9b856e1..fa03c92 100644
+--- a/drivers/isdn/i4l/isdn_common.c
++++ b/drivers/isdn/i4l/isdn_common.c
+@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
+                       } else
+                               return -EINVAL;
+               case IIOCDBGVAR:
++                      if (!capable(CAP_SYS_RAWIO))
++                              return -EPERM;
+                       if (arg) {
+                               if (copy_to_user(argp, &dev, sizeof(ulong)))
+                                       return -EFAULT;
+diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
+index 91d5730..336523e 100644
+--- a/drivers/isdn/i4l/isdn_concap.c
++++ b/drivers/isdn/i4l/isdn_concap.c
+@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
+ }
+ struct concap_device_ops isdn_concap_reliable_dl_dops = {
+-      &isdn_concap_dl_data_req,
+-      &isdn_concap_dl_connect_req,
+-      &isdn_concap_dl_disconn_req
++      .data_req = &isdn_concap_dl_data_req,
++      .connect_req = &isdn_concap_dl_connect_req,
++      .disconn_req = &isdn_concap_dl_disconn_req
+ };
+ /* The following should better go into a dedicated source file such that
+diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
+index 63eaa0a..00a663c 100644
+--- a/drivers/isdn/i4l/isdn_tty.c
++++ b/drivers/isdn/i4l/isdn_tty.c
+@@ -1499,9 +1499,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
+ #ifdef ISDN_DEBUG_MODEM_OPEN
+       printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
+-             port->count);
++             atomic_read(&port->count));
+ #endif
+-      port->count++;
++      atomic_inc(&port->count);
+       port->tty = tty;
+       /*
+        * Start up serial port
+@@ -1545,7 +1545,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
+ #endif
+               return;
+       }
+-      if ((tty->count == 1) && (port->count != 1)) {
++      if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
+               /*
+                * Uh, oh.  tty->count is 1, which means that the tty
+                * structure will be freed.  Info->count should always
+@@ -1554,15 +1554,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
+                * serial port won't be shutdown.
+                */
+               printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
+-                     "info->count is %d\n", port->count);
+-              port->count = 1;
++                     "info->count is %d\n", atomic_read(&port->count));
++              atomic_set(&port->count, 1);
+       }
+-      if (--port->count < 0) {
++      if (atomic_dec_return(&port->count) < 0) {
+               printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
+-                     info->line, port->count);
+-              port->count = 0;
++                     info->line, atomic_read(&port->count));
++              atomic_set(&port->count, 0);
+       }
+-      if (port->count) {
++      if (atomic_read(&port->count)) {
+ #ifdef ISDN_DEBUG_MODEM_OPEN
+               printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
+ #endif
+@@ -1617,7 +1617,7 @@ isdn_tty_hangup(struct tty_struct *tty)
+       if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
+               return;
+       isdn_tty_shutdown(info);
+-      port->count = 0;
++      atomic_set(&port->count, 0);
+       tty_port_set_active(port, 0);
+       port->tty = NULL;
+       wake_up_interruptible(&port->open_wait);
+@@ -1962,7 +1962,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
+       for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
+               modem_info *info = &dev->mdm.info[i];
+-              if (info->port.count == 0)
++              if (atomic_read(&info->port.count) == 0)
+                       continue;
+               if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) &&  /* SI1 is matching */
+                   (info->emu.mdmreg[REG_SI2] == si2)) {         /* SI2 is matching */
+diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
+index 0c5d8de..ba60076 100644
+--- a/drivers/isdn/i4l/isdn_x25iface.c
++++ b/drivers/isdn/i4l/isdn_x25iface.c
+@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
+ static struct concap_proto_ops ix25_pops = {
+-      &isdn_x25iface_proto_new,
+-      &isdn_x25iface_proto_del,
+-      &isdn_x25iface_proto_restart,
+-      &isdn_x25iface_proto_close,
+-      &isdn_x25iface_xmit,
+-      &isdn_x25iface_receive,
+-      &isdn_x25iface_connect_ind,
+-      &isdn_x25iface_disconn_ind
++      .proto_new = &isdn_x25iface_proto_new,
++      .proto_del = &isdn_x25iface_proto_del,
++      .restart = &isdn_x25iface_proto_restart,
++      .close = &isdn_x25iface_proto_close,
++      .encap_and_xmit = &isdn_x25iface_xmit,
++      .data_ind = &isdn_x25iface_receive,
++      .connect_ind = &isdn_x25iface_connect_ind,
++      .disconn_ind = &isdn_x25iface_disconn_ind
+ };
+ /* error message helper function */
+diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
+index fc1733a..27bf261 100644
+--- a/drivers/isdn/mISDN/dsp.h
++++ b/drivers/isdn/mISDN/dsp.h
+@@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
+ extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
+ extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
+ extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
+-extern void dsp_cmx_send(void *arg);
++extern void dsp_cmx_send(unsigned long arg);
+ extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
+ extern int dsp_cmx_del_conf_member(struct dsp *dsp);
+ extern int dsp_cmx_del_conf(struct dsp_conf *conf);
+@@ -259,7 +259,7 @@ extern u8 *dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len,
+ extern int dsp_tone(struct dsp *dsp, int tone);
+ extern void dsp_tone_copy(struct dsp *dsp, u8 *data, int len);
+-extern void dsp_tone_timeout(void *arg);
++extern void dsp_tone_timeout(unsigned long arg);
+ extern void dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len);
+ extern void dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len);
+diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
+index 8e3aa00..723faf8 100644
+--- a/drivers/isdn/mISDN/dsp_cmx.c
++++ b/drivers/isdn/mISDN/dsp_cmx.c
+@@ -1625,8 +1625,8 @@ unsigned long    dsp_spl_jiffies; /* calculate the next time to fire */
+ static u16    dsp_count; /* last sample count */
+ static int    dsp_count_valid; /* if we have last sample count */
+-void
+-dsp_cmx_send(void *arg)
++void __intentional_overflow(-1)
++dsp_cmx_send(unsigned long arg)
+ {
+       struct dsp_conf *conf;
+       struct dsp_conf_member *member;
+diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
+index 0222b1a..67fb76a 100644
+--- a/drivers/isdn/mISDN/dsp_core.c
++++ b/drivers/isdn/mISDN/dsp_core.c
+@@ -1092,7 +1092,7 @@ dspcreate(struct channel_req *crq)
+       ndsp->pcm_bank_tx = -1;
+       ndsp->hfc_conf = -1; /* current conference number */
+       /* set tone timer */
+-      ndsp->tone.tl.function = (void *)dsp_tone_timeout;
++      ndsp->tone.tl.function = dsp_tone_timeout;
+       ndsp->tone.tl.data = (long) ndsp;
+       init_timer(&ndsp->tone.tl);
+@@ -1204,7 +1204,7 @@ static int __init dsp_init(void)
+       }
+       /* set sample timer */
+-      dsp_spl_tl.function = (void *)dsp_cmx_send;
++      dsp_spl_tl.function = dsp_cmx_send;
+       dsp_spl_tl.data = 0;
+       init_timer(&dsp_spl_tl);
+       dsp_spl_tl.expires = jiffies + dsp_tics;
+diff --git a/drivers/isdn/mISDN/dsp_tones.c b/drivers/isdn/mISDN/dsp_tones.c
+index 057e0d6..ed229b5 100644
+--- a/drivers/isdn/mISDN/dsp_tones.c
++++ b/drivers/isdn/mISDN/dsp_tones.c
+@@ -457,9 +457,9 @@ dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
+  * timer expires *
+  *****************/
+ void
+-dsp_tone_timeout(void *arg)
++dsp_tone_timeout(unsigned long arg)
+ {
+-      struct dsp *dsp = arg;
++      struct dsp *dsp = (struct dsp *)arg;
+       struct dsp_tone *tone = &dsp->tone;
+       struct pattern *pat = (struct pattern *)tone->pattern;
+       int index = tone->index;
+diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
+index 26477d4..4fa3876 100644
+--- a/drivers/isdn/mISDN/fsm.c
++++ b/drivers/isdn/mISDN/fsm.c
+@@ -97,8 +97,9 @@ mISDN_FsmChangeState(struct FsmInst *fi, int newstate)
+ EXPORT_SYMBOL(mISDN_FsmChangeState);
+ static void
+-FsmExpireTimer(struct FsmTimer *ft)
++FsmExpireTimer(unsigned long _ft)
+ {
++      struct FsmTimer *ft = (struct FsmTimer *)_ft;
+ #if FSM_TIMER_DEBUG
+       if (ft->fi->debug)
+               ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
+@@ -110,7 +111,7 @@ void
+ mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
+ {
+       ft->fi = fi;
+-      ft->tl.function = (void *) FsmExpireTimer;
++      ft->tl.function = FsmExpireTimer;
+       ft->tl.data = (long) ft;
+ #if FSM_TIMER_DEBUG
+       if (ft->fi->debug)
+diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
+index 67c2187..fc71e33 100644
+--- a/drivers/isdn/mISDN/l1oip_core.c
++++ b/drivers/isdn/mISDN/l1oip_core.c
+@@ -840,7 +840,7 @@ l1oip_send_bh(struct work_struct *work)
+  * timer stuff
+  */
+ static void
+-l1oip_keepalive(void *data)
++l1oip_keepalive(unsigned long data)
+ {
+       struct l1oip *hc = (struct l1oip *)data;
+@@ -848,7 +848,7 @@ l1oip_keepalive(void *data)
+ }
+ static void
+-l1oip_timeout(void *data)
++l1oip_timeout(unsigned long data)
+ {
+       struct l1oip                    *hc = (struct l1oip *)data;
+       struct dchannel         *dch = hc->chan[hc->d_idx].dch;
+@@ -1435,13 +1435,13 @@ init_card(struct l1oip *hc, int pri, int bundle)
+       if (ret)
+               return ret;
+-      hc->keep_tl.function = (void *)l1oip_keepalive;
++      hc->keep_tl.function = l1oip_keepalive;
+       hc->keep_tl.data = (ulong)hc;
+       init_timer(&hc->keep_tl);
+       hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */
+       add_timer(&hc->keep_tl);
+-      hc->timeout_tl.function = (void *)l1oip_timeout;
++      hc->timeout_tl.function = l1oip_timeout;
+       hc->timeout_tl.data = (ulong)hc;
+       init_timer(&hc->timeout_tl);
+       hc->timeout_on = 0; /* state that we have timer off */
+diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
+index 0f9ed1e..492789f 100644
+--- a/drivers/leds/leds-clevo-mail.c
++++ b/drivers/leds/leds-clevo-mail.c
+@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
+  * detected as working, but in reality it is not) as low as
+  * possible.
+  */
+-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
++static const struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
+       {
+               .callback = clevo_mail_led_dmi_callback,
+               .ident = "Clevo D410J",
+diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
+index 732eb86..a9db867 100644
+--- a/drivers/leds/leds-ss4200.c
++++ b/drivers/leds/leds-ss4200.c
+@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
+  * detected as working, but in reality it is not) as low as
+  * possible.
+  */
+-static struct dmi_system_id nas_led_whitelist[] __initdata = {
++static const struct dmi_system_id nas_led_whitelist[] __initconst = {
+       {
+               .callback = ss4200_led_dmi_callback,
+               .ident = "Intel SS4200-E",
+diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
+index 9e385b3..7077882 100644
+--- a/drivers/lguest/core.c
++++ b/drivers/lguest/core.c
+@@ -87,7 +87,7 @@ static __init int map_switcher(void)
+        * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
+        * It goes in the first page, which we map in momentarily.
+        */
+-      memcpy(kmap(lg_switcher_pages[0]), start_switcher_text,
++      memcpy(kmap(lg_switcher_pages[0]), (void *)ktla_ktva((unsigned long)start_switcher_text),
+              end_switcher_text - start_switcher_text);
+       kunmap(lg_switcher_pages[0]);
+@@ -106,9 +106,16 @@ static __init int map_switcher(void)
+        * We want the switcher text to be read-only and executable, and
+        * the stacks to be read-write and non-executable.
+        */
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      switcher_text_vma = __get_vm_area(PAGE_SIZE, VM_ALLOC|VM_NO_GUARD|VM_KERNEXEC,
++                                        switcher_addr,
++                                        switcher_addr + PAGE_SIZE);
++#else
+       switcher_text_vma = __get_vm_area(PAGE_SIZE, VM_ALLOC|VM_NO_GUARD,
+                                         switcher_addr,
+                                         switcher_addr + PAGE_SIZE);
++#endif
+       if (!switcher_text_vma) {
+               err = -ENOMEM;
+diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
+index e3abebc9..6a35328 100644
+--- a/drivers/lguest/page_tables.c
++++ b/drivers/lguest/page_tables.c
+@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
+ /*:*/
+ #ifdef CONFIG_X86_PAE
+-static void release_pmd(pmd_t *spmd)
++static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
+ {
+       /* If the entry's not present, there's nothing to release. */
+       if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
+index 6e9042e..befd030 100644
+--- a/drivers/lguest/x86/core.c
++++ b/drivers/lguest/x86/core.c
+@@ -60,7 +60,7 @@ static struct {
+ /* Offset from where switcher.S was compiled to where we've copied it */
+ static unsigned long switcher_offset(void)
+ {
+-      return switcher_addr - (unsigned long)start_switcher_text;
++      return switcher_addr - ktla_ktva((unsigned long)start_switcher_text);
+ }
+ /* This cpu's struct lguest_pages (after the Switcher text page) */
+@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
+        * These copies are pretty cheap, so we do them unconditionally: */
+       /* Save the current Host top-level page directory.
+        */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++      pages->state.host_cr3 = read_cr3();
++#else
+       pages->state.host_cr3 = __pa(current->mm->pgd);
++#endif
++
+       /*
+        * Set up the Guest's page tables to see this CPU's pages (and no
+        * other CPU's pages).
+@@ -498,7 +504,7 @@ void __init lguest_arch_host_init(void)
+        * compiled-in switcher code and the high-mapped copy we just made.
+        */
+       for (i = 0; i < IDT_ENTRIES; i++)
+-              default_idt_entries[i] += switcher_offset();
++              default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
+       /*
+        * Set up the Switcher's per-cpu areas.
+@@ -581,7 +587,7 @@ void __init lguest_arch_host_init(void)
+        * it will be undisturbed when we switch.  To change %cs and jump we
+        * need this structure to feed to Intel's "lcall" instruction.
+        */
+-      lguest_entry.offset = (long)switch_to_guest + switcher_offset();
++      lguest_entry.offset = ktla_ktva((unsigned long)switch_to_guest) + switcher_offset();
+       lguest_entry.segment = LGUEST_CS;
+       /*
+diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
+index 40634b0..4f5855e 100644
+--- a/drivers/lguest/x86/switcher_32.S
++++ b/drivers/lguest/x86/switcher_32.S
+@@ -87,6 +87,7 @@
+ #include <asm/page.h>
+ #include <asm/segment.h>
+ #include <asm/lguest.h>
++#include <asm/processor-flags.h>
+ // We mark the start of the code to copy
+ // It's placed in .text tho it's never run here
+@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
+       // Changes type when we load it: damn Intel!
+       // For after we switch over our page tables
+       // That entry will be read-only: we'd crash.
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov     %cr0, %edx
++      xor     $X86_CR0_WP, %edx
++      mov     %edx, %cr0
++#endif
++
+       movl    $(GDT_ENTRY_TSS*8), %edx
+       ltr     %dx
+@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
+       // Let's clear it again for our return.
+       // The GDT descriptor of the Host
+       // Points to the table after two "size" bytes
+-      movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
++      movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
+       // Clear "used" from type field (byte 5, bit 2)
+-      andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
++      andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
++
++#ifdef CONFIG_PAX_KERNEXEC
++      mov     %cr0, %eax
++      xor     $X86_CR0_WP, %eax
++      mov     %eax, %cr0
++#endif
+       // Once our page table's switched, the Guest is live!
+       // The Host fades as we run this final step.
+@@ -295,13 +309,12 @@ deliver_to_host:
+       // I consulted gcc, and it gave
+       // These instructions, which I gladly credit:
+       leal    (%edx,%ebx,8), %eax
+-      movzwl  (%eax),%edx
+-      movl    4(%eax), %eax
+-      xorw    %ax, %ax
+-      orl     %eax, %edx
++      movl    4(%eax), %edx
++      movw    (%eax), %dx
+       // Now the address of the handler's in %edx
+       // We call it now: its "iret" drops us home.
+-      jmp     *%edx
++      ljmp    $__KERNEL_CS, $1f
++1:    jmp     *%edx
+ // Every interrupt can come to us here
+ // But we must truly tell each apart.
+diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
+index 37fcaad..e2be8ad 100644
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -231,7 +231,7 @@ static void rrpc_put_blks(struct rrpc *rrpc)
+ static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
+ {
+-      int next = atomic_inc_return(&rrpc->next_lun);
++      int next = atomic_inc_return_unchecked(&rrpc->next_lun);
+       return &rrpc->luns[next % rrpc->nr_luns];
+ }
+@@ -1389,7 +1389,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
+       rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
+       /* simple round-robin strategy */
+-      atomic_set(&rrpc->next_lun, -1);
++      atomic_set_unchecked(&rrpc->next_lun, -1);
+       ret = rrpc_area_init(rrpc, &soffset);
+       if (ret < 0) {
+diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
+index 5e87d52..2666040 100644
+--- a/drivers/lightnvm/rrpc.h
++++ b/drivers/lightnvm/rrpc.h
+@@ -104,7 +104,7 @@ struct rrpc {
+       /* Write strategy variables. Move these into each for structure for each
+        * strategy
+        */
+-      atomic_t next_lun; /* Whenever a page is written, this is updated
++      atomic_unchecked_t next_lun; /* Whenever a page is written, this is updated
+                           * to point to the next write lun
+                           */
+diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
+index 4d20088..de60cb2 100644
+--- a/drivers/md/bcache/Kconfig
++++ b/drivers/md/bcache/Kconfig
+@@ -20,6 +20,7 @@ config BCACHE_CLOSURES_DEBUG
+       bool "Debug closures"
+       depends on BCACHE
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       ---help---
+       Keeps all active closures in a linked list and provides a debugfs
+       interface to list them, which makes it possible to see asynchronous
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index ca4abe1..0b029ef 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -631,7 +631,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+       for (i = 0; i < KEY_PTRS(&b->key); i++) {
+               SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+-              atomic_long_add(sectors,
++              atomic_long_add_unchecked(sectors,
+                               &PTR_CACHE(c, &b->key, i)->sectors_written);
+       }
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 6b420a5..d5acb8f 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -433,12 +433,12 @@ struct cache {
+       /* The rest of this all shows up in sysfs */
+ #define IO_ERROR_SHIFT                20
+-      atomic_t                io_errors;
+-      atomic_t                io_count;
++      atomic_unchecked_t      io_errors;
++      atomic_unchecked_t      io_count;
+-      atomic_long_t           meta_sectors_written;
+-      atomic_long_t           btree_sectors_written;
+-      atomic_long_t           sectors_written;
++      atomic_long_unchecked_t meta_sectors_written;
++      atomic_long_unchecked_t btree_sectors_written;
++      atomic_long_unchecked_t sectors_written;
+ };
+ struct gc_stat {
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 76f7534..f5ad9e6 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -336,15 +336,17 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
+       w->journal      = NULL;
+ }
+-static void btree_node_write_unlock(struct closure *cl)
++static void btree_node_write_unlock(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct btree *b = container_of(cl, struct btree, io);
+       up(&b->io_mutex);
+ }
+-static void __btree_node_write_done(struct closure *cl)
++static void __btree_node_write_done(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct btree *b = container_of(cl, struct btree, io);
+       struct btree_write *w = btree_prev_write(b);
+@@ -358,8 +360,9 @@ static void __btree_node_write_done(struct closure *cl)
+       closure_return_with_destructor(cl, btree_node_write_unlock);
+ }
+-static void btree_node_write_done(struct closure *cl)
++static void btree_node_write_done(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct btree *b = container_of(cl, struct btree, io);
+       struct bio_vec *bv;
+       int n;
+@@ -367,7 +370,7 @@ static void btree_node_write_done(struct closure *cl)
+       bio_for_each_segment_all(bv, b->bio, n)
+               __free_page(bv->bv_page);
+-      __btree_node_write_done(cl);
++      __btree_node_write_done(&cl->work);
+ }
+ static void btree_node_write_endio(struct bio *bio)
+@@ -467,7 +470,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
+       do_btree_node_write(b);
+-      atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
++      atomic_long_add_unchecked(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
+                       &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+       b->written += set_blocks(i, block_bytes(b->c));
+diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
+index 864e673..9c022d1 100644
+--- a/drivers/md/bcache/closure.c
++++ b/drivers/md/bcache/closure.c
+@@ -29,12 +29,12 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
+                       closure_queue(cl);
+               } else {
+                       struct closure *parent = cl->parent;
+-                      closure_fn *destructor = cl->fn;
++                      work_func_t destructor = cl->fn;
+                       closure_debug_destroy(cl);
+                       if (destructor)
+-                              destructor(cl);
++                              destructor(&cl->work);
+                       if (parent)
+                               closure_put(parent);
+diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
+index 9b2fe2d..be17fd2 100644
+--- a/drivers/md/bcache/closure.h
++++ b/drivers/md/bcache/closure.h
+@@ -152,7 +152,7 @@ struct closure {
+                       struct workqueue_struct *wq;
+                       struct task_struct      *task;
+                       struct llist_node       list;
+-                      closure_fn              *fn;
++                      work_func_t             fn;
+               };
+               struct work_struct      work;
+       };
+@@ -236,10 +236,10 @@ static inline void closure_set_stopped(struct closure *cl)
+       atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+ }
+-static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
++static inline void set_closure_fn(struct closure *cl, work_func_t fn,
+                                 struct workqueue_struct *wq)
+ {
+-      BUG_ON(object_is_on_stack(cl));
++      BUG_ON(object_starts_on_stack(cl));
+       closure_set_ip(cl);
+       cl->fn = fn;
+       cl->wq = wq;
+@@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl)
+               INIT_WORK(&cl->work, cl->work.func);
+               BUG_ON(!queue_work(wq, &cl->work));
+       } else
+-              cl->fn(cl);
++              cl->fn(&cl->work);
+ }
+ /**
+@@ -373,7 +373,7 @@ do {                                                                       \
+  * asynchronously out of a new closure - @parent will then wait for @cl to
+  * finish.
+  */
+-static inline void closure_call(struct closure *cl, closure_fn fn,
++static inline void closure_call(struct closure *cl, work_func_t fn,
+                               struct workqueue_struct *wq,
+                               struct closure *parent)
+ {
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index e97b0ac..5aff0fa 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -60,7 +60,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
+        */
+       if (ca->set->error_decay) {
+-              unsigned count = atomic_inc_return(&ca->io_count);
++              unsigned count = atomic_inc_return_unchecked(&ca->io_count);
+               while (count > ca->set->error_decay) {
+                       unsigned errors;
+@@ -72,16 +72,16 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
+                        * succesfully do so, we rescale the errors once:
+                        */
+-                      count = atomic_cmpxchg(&ca->io_count, old, new);
++                      count = atomic_cmpxchg_unchecked(&ca->io_count, old, new);
+                       if (count == old) {
+                               count = new;
+-                              errors = atomic_read(&ca->io_errors);
++                              errors = atomic_read_unchecked(&ca->io_errors);
+                               do {
+                                       old = errors;
+                                       new = ((uint64_t) errors * 127) / 128;
+-                                      errors = atomic_cmpxchg(&ca->io_errors,
++                                      errors = atomic_cmpxchg_unchecked(&ca->io_errors,
+                                                               old, new);
+                               } while (old != errors);
+                       }
+@@ -90,7 +90,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
+       if (error) {
+               char buf[BDEVNAME_SIZE];
+-              unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
++              unsigned errors = atomic_add_return_unchecked(1 << IO_ERROR_SHIFT,
+                                                   &ca->io_errors);
+               errors >>= IO_ERROR_SHIFT;
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 6925023..bff91f0 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -555,10 +555,11 @@ static void journal_write_endio(struct bio *bio)
+       closure_put(&w->c->journal.io);
+ }
+-static void journal_write(struct closure *);
++static void journal_write(struct work_struct *);
+-static void journal_write_done(struct closure *cl)
++static void journal_write_done(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct journal *j = container_of(cl, struct journal, io);
+       struct journal_write *w = (j->cur == j->w)
+               ? &j->w[1]
+@@ -568,17 +569,19 @@ static void journal_write_done(struct closure *cl)
+       continue_at_nobarrier(cl, journal_write, system_wq);
+ }
+-static void journal_write_unlock(struct closure *cl)
++static void journal_write_unlock(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+       c->journal.io_in_flight = 0;
+       spin_unlock(&c->journal.lock);
+ }
+-static void journal_write_unlocked(struct closure *cl)
++static void journal_write_unlocked(struct work_struct *work)
+       __releases(c->journal.lock)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+       struct cache *ca;
+       struct journal_write *w = c->journal.cur;
+@@ -621,7 +624,7 @@ static void journal_write_unlocked(struct closure *cl)
+               ca = PTR_CACHE(c, k, i);
+               bio = &ca->journal.bio;
+-              atomic_long_add(sectors, &ca->meta_sectors_written);
++              atomic_long_add_unchecked(sectors, &ca->meta_sectors_written);
+               bio_reset(bio);
+               bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
+@@ -654,12 +657,13 @@ static void journal_write_unlocked(struct closure *cl)
+       continue_at(cl, journal_write_done, NULL);
+ }
+-static void journal_write(struct closure *cl)
++static void journal_write(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+       spin_lock(&c->journal.lock);
+-      journal_write_unlocked(cl);
++      journal_write_unlocked(&cl->work);
+ }
+ static void journal_try_write(struct cache_set *c)
+diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
+index 1881319..bec4997 100644
+--- a/drivers/md/bcache/movinggc.c
++++ b/drivers/md/bcache/movinggc.c
+@@ -34,14 +34,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
+ /* Moving GC - IO loop */
+-static void moving_io_destructor(struct closure *cl)
++static void moving_io_destructor(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       kfree(io);
+ }
+-static void write_moving_finish(struct closure *cl)
++static void write_moving_finish(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       struct bio *bio = &io->bio.bio;
+       struct bio_vec *bv;
+@@ -92,8 +94,9 @@ static void moving_init(struct moving_io *io)
+       bch_bio_map(bio, NULL);
+ }
+-static void write_moving(struct closure *cl)
++static void write_moving(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       struct data_insert_op *op = &io->op;
+@@ -116,8 +119,9 @@ static void write_moving(struct closure *cl)
+       continue_at(cl, write_moving_finish, op->wq);
+ }
+-static void read_moving_submit(struct closure *cl)
++static void read_moving_submit(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       struct bio *bio = &io->bio.bio;
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 4b177fe..be3cbd4 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -24,7 +24,7 @@
+ struct kmem_cache *bch_search_cache;
+-static void bch_data_insert_start(struct closure *);
++static void bch_data_insert_start(struct work_struct *);
+ static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
+ {
+@@ -53,8 +53,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
+ /* Insert data into cache */
+-static void bch_data_insert_keys(struct closure *cl)
++static void bch_data_insert_keys(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       atomic_t *journal_ref = NULL;
+       struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
+@@ -143,8 +144,9 @@ out:
+       continue_at(cl, bch_data_insert_keys, op->wq);
+ }
+-static void bch_data_insert_error(struct closure *cl)
++static void bch_data_insert_error(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       /*
+@@ -170,7 +172,7 @@ static void bch_data_insert_error(struct closure *cl)
+       op->insert_keys.top = dst;
+-      bch_data_insert_keys(cl);
++      bch_data_insert_keys(&cl->work);
+ }
+ static void bch_data_insert_endio(struct bio *bio)
+@@ -191,8 +193,9 @@ static void bch_data_insert_endio(struct bio *bio)
+       bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
+ }
+-static void bch_data_insert_start(struct closure *cl)
++static void bch_data_insert_start(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       struct bio *bio = op->bio, *n;
+@@ -313,8 +316,9 @@ err:
+  * If s->bypass is true, instead of inserting the data it invalidates the
+  * region of the cache represented by s->cache_bio and op->inode.
+  */
+-void bch_data_insert(struct closure *cl)
++void bch_data_insert(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       trace_bcache_write(op->c, op->inode, op->bio,
+@@ -322,7 +326,7 @@ void bch_data_insert(struct closure *cl)
+       bch_keylist_init(&op->insert_keys);
+       bio_get(op->bio);
+-      bch_data_insert_start(cl);
++      bch_data_insert_start(&cl->work);
+ }
+ /* Congested? */
+@@ -570,8 +574,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+       return n == bio ? MAP_DONE : MAP_CONTINUE;
+ }
+-static void cache_lookup(struct closure *cl)
++static void cache_lookup(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, iop.cl);
+       struct bio *bio = &s->bio.bio;
+       int ret;
+@@ -631,8 +636,9 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
+       bio_cnt_set(bio, 3);
+ }
+-static void search_free(struct closure *cl)
++static void search_free(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       bio_complete(s);
+@@ -676,19 +682,21 @@ static inline struct search *search_alloc(struct bio *bio,
+ /* Cached devices */
+-static void cached_dev_bio_complete(struct closure *cl)
++static void cached_dev_bio_complete(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+-      search_free(cl);
++      search_free(&cl->work);
+       cached_dev_put(dc);
+ }
+ /* Process reads */
+-static void cached_dev_cache_miss_done(struct closure *cl)
++static void cached_dev_cache_miss_done(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       if (s->iop.replace_collision)
+@@ -702,11 +710,12 @@ static void cached_dev_cache_miss_done(struct closure *cl)
+                       __free_page(bv->bv_page);
+       }
+-      cached_dev_bio_complete(cl);
++      cached_dev_bio_complete(&cl->work);
+ }
+-static void cached_dev_read_error(struct closure *cl)
++static void cached_dev_read_error(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       struct bio *bio = &s->bio.bio;
+@@ -725,8 +734,9 @@ static void cached_dev_read_error(struct closure *cl)
+       continue_at(cl, cached_dev_cache_miss_done, NULL);
+ }
+-static void cached_dev_read_done(struct closure *cl)
++static void cached_dev_read_done(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+@@ -765,8 +775,9 @@ static void cached_dev_read_done(struct closure *cl)
+       continue_at(cl, cached_dev_cache_miss_done, NULL);
+ }
+-static void cached_dev_read_done_bh(struct closure *cl)
++static void cached_dev_read_done_bh(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+@@ -864,13 +875,14 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
+ /* Process writes */
+-static void cached_dev_write_complete(struct closure *cl)
++static void cached_dev_write_complete(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+       up_read_non_owner(&dc->writeback_lock);
+-      cached_dev_bio_complete(cl);
++      cached_dev_bio_complete(&cl->work);
+ }
+ static void cached_dev_write(struct cached_dev *dc, struct search *s)
+@@ -942,8 +954,9 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
+       continue_at(cl, cached_dev_write_complete, NULL);
+ }
+-static void cached_dev_nodata(struct closure *cl)
++static void cached_dev_nodata(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       struct bio *bio = &s->bio.bio;
+@@ -1063,8 +1076,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
+       return MAP_CONTINUE;
+ }
+-static void flash_dev_nodata(struct closure *cl)
++static void flash_dev_nodata(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct search *s = container_of(cl, struct search, cl);
+       if (s->iop.flush_journal)
+diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
+index 1ff3687..b8f4a05 100644
+--- a/drivers/md/bcache/request.h
++++ b/drivers/md/bcache/request.h
+@@ -33,7 +33,7 @@ struct data_insert_op {
+ };
+ unsigned bch_get_congested(struct cache_set *);
+-void bch_data_insert(struct closure *cl);
++void bch_data_insert(struct work_struct *work);
+ void bch_cached_dev_request_init(struct cached_dev *dc);
+ void bch_flash_dev_request_init(struct bcache_device *d);
+diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
+index 0ca072c..5e6e5c3 100644
+--- a/drivers/md/bcache/stats.c
++++ b/drivers/md/bcache/stats.c
+@@ -120,7 +120,7 @@ void bch_cache_accounting_destroy(struct cache_accounting *acc)
+       kobject_put(&acc->hour.kobj);
+       kobject_put(&acc->day.kobj);
+-      atomic_set(&acc->closing, 1);
++      atomic_set_unchecked(&acc->closing, 1);
+       if (del_timer_sync(&acc->timer))
+               closure_return(&acc->cl);
+ }
+@@ -151,7 +151,7 @@ static void scale_accounting(unsigned long data)
+       struct cache_accounting *acc = (struct cache_accounting *) data;
+ #define move_stat(name) do {                                          \
+-      unsigned t = atomic_xchg(&acc->collector.name, 0);              \
++      unsigned t = atomic_xchg_unchecked(&acc->collector.name, 0);    \
+       t <<= 16;                                                       \
+       acc->five_minute.name += t;                                     \
+       acc->hour.name += t;                                            \
+@@ -174,7 +174,7 @@ static void scale_accounting(unsigned long data)
+       acc->timer.expires += accounting_delay;
+-      if (!atomic_read(&acc->closing))
++      if (!atomic_read_unchecked(&acc->closing))
+               add_timer(&acc->timer);
+       else
+               closure_return(&acc->cl);
+@@ -185,14 +185,14 @@ static void mark_cache_stats(struct cache_stat_collector *stats,
+ {
+       if (!bypass)
+               if (hit)
+-                      atomic_inc(&stats->cache_hits);
++                      atomic_inc_unchecked(&stats->cache_hits);
+               else
+-                      atomic_inc(&stats->cache_misses);
++                      atomic_inc_unchecked(&stats->cache_misses);
+       else
+               if (hit)
+-                      atomic_inc(&stats->cache_bypass_hits);
++                      atomic_inc_unchecked(&stats->cache_bypass_hits);
+               else
+-                      atomic_inc(&stats->cache_bypass_misses);
++                      atomic_inc_unchecked(&stats->cache_bypass_misses);
+ }
+ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+@@ -206,22 +206,22 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
+ {
+       struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+-      atomic_inc(&dc->accounting.collector.cache_readaheads);
+-      atomic_inc(&c->accounting.collector.cache_readaheads);
++      atomic_inc_unchecked(&dc->accounting.collector.cache_readaheads);
++      atomic_inc_unchecked(&c->accounting.collector.cache_readaheads);
+ }
+ void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
+ {
+       struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+-      atomic_inc(&dc->accounting.collector.cache_miss_collisions);
+-      atomic_inc(&c->accounting.collector.cache_miss_collisions);
++      atomic_inc_unchecked(&dc->accounting.collector.cache_miss_collisions);
++      atomic_inc_unchecked(&c->accounting.collector.cache_miss_collisions);
+ }
+ void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
+                              int sectors)
+ {
+-      atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
+-      atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
++      atomic_add_unchecked(sectors, &dc->accounting.collector.sectors_bypassed);
++      atomic_add_unchecked(sectors, &c->accounting.collector.sectors_bypassed);
+ }
+ void bch_cache_accounting_init(struct cache_accounting *acc,
+diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
+index adbff14..018c2d2 100644
+--- a/drivers/md/bcache/stats.h
++++ b/drivers/md/bcache/stats.h
+@@ -2,13 +2,13 @@
+ #define _BCACHE_STATS_H_
+ struct cache_stat_collector {
+-      atomic_t cache_hits;
+-      atomic_t cache_misses;
+-      atomic_t cache_bypass_hits;
+-      atomic_t cache_bypass_misses;
+-      atomic_t cache_readaheads;
+-      atomic_t cache_miss_collisions;
+-      atomic_t sectors_bypassed;
++      atomic_unchecked_t cache_hits;
++      atomic_unchecked_t cache_misses;
++      atomic_unchecked_t cache_bypass_hits;
++      atomic_unchecked_t cache_bypass_misses;
++      atomic_unchecked_t cache_readaheads;
++      atomic_unchecked_t cache_miss_collisions;
++      atomic_unchecked_t sectors_bypassed;
+ };
+ struct cache_stats {
+@@ -28,7 +28,7 @@ struct cache_stats {
+ struct cache_accounting {
+       struct closure          cl;
+       struct timer_list       timer;
+-      atomic_t                closing;
++      atomic_unchecked_t      closing;
+       struct cache_stat_collector collector;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 849ad44..a9e695e 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -240,8 +240,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
+       submit_bio(bio);
+ }
+-static void bch_write_bdev_super_unlock(struct closure *cl)
++static void bch_write_bdev_super_unlock(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+       up(&dc->sb_write_mutex);
+@@ -274,8 +275,9 @@ static void write_super_endio(struct bio *bio)
+       closure_put(&ca->set->sb_write);
+ }
+-static void bcache_write_super_unlock(struct closure *cl)
++static void bcache_write_super_unlock(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+       up(&c->sb_write_mutex);
+@@ -325,8 +327,9 @@ static void uuid_endio(struct bio *bio)
+       closure_put(cl);
+ }
+-static void uuid_io_unlock(struct closure *cl)
++static void uuid_io_unlock(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+       up(&c->uuid_write_mutex);
+@@ -531,7 +534,7 @@ void bch_prio_write(struct cache *ca)
+       ca->disk_buckets->seq++;
+-      atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
++      atomic_long_add_unchecked(ca->sb.bucket_size * prio_buckets(ca),
+                       &ca->meta_sectors_written);
+       //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+@@ -1051,8 +1054,9 @@ void bch_cached_dev_release(struct kobject *kobj)
+       module_put(THIS_MODULE);
+ }
+-static void cached_dev_free(struct closure *cl)
++static void cached_dev_free(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+       cancel_delayed_work_sync(&dc->writeback_rate_update);
+@@ -1076,8 +1080,9 @@ static void cached_dev_free(struct closure *cl)
+       kobject_put(&dc->disk.kobj);
+ }
+-static void cached_dev_flush(struct closure *cl)
++static void cached_dev_flush(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+       struct bcache_device *d = &dc->disk;
+@@ -1193,8 +1198,9 @@ void bch_flash_dev_release(struct kobject *kobj)
+       kfree(d);
+ }
+-static void flash_dev_free(struct closure *cl)
++static void flash_dev_free(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+       mutex_lock(&bch_register_lock);
+       bcache_device_free(d);
+@@ -1202,8 +1208,9 @@ static void flash_dev_free(struct closure *cl)
+       kobject_put(&d->kobj);
+ }
+-static void flash_dev_flush(struct closure *cl)
++static void flash_dev_flush(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+       mutex_lock(&bch_register_lock);
+@@ -1322,8 +1329,9 @@ void bch_cache_set_release(struct kobject *kobj)
+       module_put(THIS_MODULE);
+ }
+-static void cache_set_free(struct closure *cl)
++static void cache_set_free(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, cl);
+       struct cache *ca;
+       unsigned i;
+@@ -1368,8 +1376,9 @@ static void cache_set_free(struct closure *cl)
+       kobject_put(&c->kobj);
+ }
+-static void cache_set_flush(struct closure *cl)
++static void cache_set_flush(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, caching);
+       struct cache *ca;
+       struct btree *b;
+@@ -1410,8 +1419,9 @@ static void cache_set_flush(struct closure *cl)
+       closure_return(cl);
+ }
+-static void __cache_set_unregister(struct closure *cl)
++static void __cache_set_unregister(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct cache_set *c = container_of(cl, struct cache_set, caching);
+       struct cached_dev *dc;
+       size_t i;
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index b3ff57d..b2e30fb 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -739,15 +739,15 @@ SHOW(__bch_cache)
+       sysfs_hprint(block_size,        block_bytes(ca));
+       sysfs_print(nbuckets,           ca->sb.nbuckets);
+       sysfs_print(discard,            ca->discard);
+-      sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
++      sysfs_hprint(written, atomic_long_read_unchecked(&ca->sectors_written) << 9);
+       sysfs_hprint(btree_written,
+-                   atomic_long_read(&ca->btree_sectors_written) << 9);
++                   atomic_long_read_unchecked(&ca->btree_sectors_written) << 9);
+       sysfs_hprint(metadata_written,
+-                   (atomic_long_read(&ca->meta_sectors_written) +
+-                    atomic_long_read(&ca->btree_sectors_written)) << 9);
++                   (atomic_long_read_unchecked(&ca->meta_sectors_written) +
++                    atomic_long_read_unchecked(&ca->btree_sectors_written)) << 9);
+       sysfs_print(io_errors,
+-                  atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
++                  atomic_read_unchecked(&ca->io_errors) >> IO_ERROR_SHIFT);
+       if (attr == &sysfs_cache_replacement_policy)
+               return bch_snprint_string_list(buf, PAGE_SIZE,
+@@ -870,11 +870,11 @@ STORE(__bch_cache)
+       }
+       if (attr == &sysfs_clear_stats) {
+-              atomic_long_set(&ca->sectors_written, 0);
+-              atomic_long_set(&ca->btree_sectors_written, 0);
+-              atomic_long_set(&ca->meta_sectors_written, 0);
+-              atomic_set(&ca->io_count, 0);
+-              atomic_set(&ca->io_errors, 0);
++              atomic_long_set_unchecked(&ca->sectors_written, 0);
++              atomic_long_set_unchecked(&ca->btree_sectors_written, 0);
++              atomic_long_set_unchecked(&ca->meta_sectors_written, 0);
++              atomic_set_unchecked(&ca->io_count, 0);
++              atomic_set_unchecked(&ca->io_errors, 0);
+       }
+       return size;
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index d9fd2a6..749b6c6 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -117,14 +117,16 @@ static void dirty_init(struct keybuf_key *w)
+       bch_bio_map(bio, NULL);
+ }
+-static void dirty_io_destructor(struct closure *cl)
++static void dirty_io_destructor(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       kfree(io);
+ }
+-static void write_dirty_finish(struct closure *cl)
++static void write_dirty_finish(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       struct keybuf_key *w = io->bio.bi_private;
+       struct cached_dev *dc = io->dc;
+@@ -176,8 +178,9 @@ static void dirty_endio(struct bio *bio)
+       closure_put(&io->cl);
+ }
+-static void write_dirty(struct closure *cl)
++static void write_dirty(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       struct keybuf_key *w = io->bio.bi_private;
+@@ -203,8 +206,9 @@ static void read_dirty_endio(struct bio *bio)
+       dirty_endio(bio);
+ }
+-static void read_dirty_submit(struct closure *cl)
++static void read_dirty_submit(struct work_struct *work)
+ {
++      struct closure *cl = container_of(work, struct closure, work);
+       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       closure_bio_submit(&io->bio, cl);
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 13041ee..2d1c184 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1965,7 +1965,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
+                  chunk_kb ? "KB" : "B");
+       if (bitmap->storage.file) {
+               seq_printf(seq, ", file: ");
+-              seq_file_path(seq, bitmap->storage.file, " \t\n");
++              seq_file_path(seq, bitmap->storage.file, " \t\n\\");
+       }
+       seq_printf(seq, "\n");
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 59b2c50..60bca53 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -118,7 +118,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
+  */
+ struct dm_hook_info {
+       bio_end_io_t *bi_end_io;
+-};
++} __no_const;
+ static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
+                       bio_end_io_t *bi_end_io, void *bi_private)
+@@ -182,16 +182,16 @@ struct cache_features {
+ };
+ struct cache_stats {
+-      atomic_t read_hit;
+-      atomic_t read_miss;
+-      atomic_t write_hit;
+-      atomic_t write_miss;
+-      atomic_t demotion;
+-      atomic_t promotion;
+-      atomic_t copies_avoided;
+-      atomic_t cache_cell_clash;
+-      atomic_t commit_count;
+-      atomic_t discard_count;
++      atomic_unchecked_t read_hit;
++      atomic_unchecked_t read_miss;
++      atomic_unchecked_t write_hit;
++      atomic_unchecked_t write_miss;
++      atomic_unchecked_t demotion;
++      atomic_unchecked_t promotion;
++      atomic_unchecked_t copies_avoided;
++      atomic_unchecked_t cache_cell_clash;
++      atomic_unchecked_t commit_count;
++      atomic_unchecked_t discard_count;
+ };
+ /*
+@@ -270,8 +270,8 @@ struct cache {
+       atomic_t nr_io_migrations;
+       wait_queue_head_t quiescing_wait;
+-      atomic_t quiescing;
+-      atomic_t quiescing_ack;
++      atomic_unchecked_t quiescing;
++      atomic_unchecked_t quiescing_ack;
+       /*
+        * cache_size entries, dirty if set
+@@ -395,8 +395,10 @@ static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
+       return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
+ }
+-static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
++static void free_prison_cell(void *_cache, struct dm_bio_prison_cell *cell)
+ {
++      struct cache *cache = _cache;
++
+       dm_bio_prison_free_cell(cache->prison, cell);
+ }
+@@ -493,8 +495,10 @@ static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
+  * You can't have more than two cells in a prealloc struct.  BUG() will be
+  * called if you try and overfill.
+  */
+-static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
++static void prealloc_put_cell(void *_p, struct dm_bio_prison_cell *cell)
+ {
++      struct prealloc *p = _p;
++
+       if (!p->cell2)
+               p->cell2 = cell;
+@@ -637,7 +641,7 @@ static void set_discard(struct cache *cache, dm_dblock_t b)
+       unsigned long flags;
+       BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
+-      atomic_inc(&cache->stats.discard_count);
++      atomic_inc_unchecked(&cache->stats.discard_count);
+       spin_lock_irqsave(&cache->lock, flags);
+       set_bit(from_dblock(b), cache->discard_bitset);
+@@ -685,10 +689,10 @@ static void load_stats(struct cache *cache)
+       struct dm_cache_statistics stats;
+       dm_cache_metadata_get_stats(cache->cmd, &stats);
+-      atomic_set(&cache->stats.read_hit, stats.read_hits);
+-      atomic_set(&cache->stats.read_miss, stats.read_misses);
+-      atomic_set(&cache->stats.write_hit, stats.write_hits);
+-      atomic_set(&cache->stats.write_miss, stats.write_misses);
++      atomic_set_unchecked(&cache->stats.read_hit, stats.read_hits);
++      atomic_set_unchecked(&cache->stats.read_miss, stats.read_misses);
++      atomic_set_unchecked(&cache->stats.write_hit, stats.write_hits);
++      atomic_set_unchecked(&cache->stats.write_miss, stats.write_misses);
+ }
+ static void save_stats(struct cache *cache)
+@@ -698,10 +702,10 @@ static void save_stats(struct cache *cache)
+       if (get_cache_mode(cache) >= CM_READ_ONLY)
+               return;
+-      stats.read_hits = atomic_read(&cache->stats.read_hit);
+-      stats.read_misses = atomic_read(&cache->stats.read_miss);
+-      stats.write_hits = atomic_read(&cache->stats.write_hit);
+-      stats.write_misses = atomic_read(&cache->stats.write_miss);
++      stats.read_hits = atomic_read_unchecked(&cache->stats.read_hit);
++      stats.read_misses = atomic_read_unchecked(&cache->stats.read_miss);
++      stats.write_hits = atomic_read_unchecked(&cache->stats.write_hit);
++      stats.write_misses = atomic_read_unchecked(&cache->stats.write_miss);
+       dm_cache_metadata_set_stats(cache->cmd, &stats);
+ }
+@@ -1326,7 +1330,7 @@ static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+ static void avoid_copy(struct dm_cache_migration *mg)
+ {
+-      atomic_inc(&mg->cache->stats.copies_avoided);
++      atomic_inc_unchecked(&mg->cache->stats.copies_avoided);
+       migration_success_pre_commit(mg);
+ }
+@@ -1636,7 +1640,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
+       cell_prealloc = prealloc_get_cell(structs);
+       r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
+-                           (cell_free_fn) prealloc_put_cell,
++                           prealloc_put_cell,
+                            structs, &new_ocell);
+       if (r > 0)
+               return;
+@@ -1653,13 +1657,13 @@ static bool spare_migration_bandwidth(struct cache *cache)
+ static void inc_hit_counter(struct cache *cache, struct bio *bio)
+ {
+-      atomic_inc(bio_data_dir(bio) == READ ?
++      atomic_inc_unchecked(bio_data_dir(bio) == READ ?
+                  &cache->stats.read_hit : &cache->stats.write_hit);
+ }
+ static void inc_miss_counter(struct cache *cache, struct bio *bio)
+ {
+-      atomic_inc(bio_data_dir(bio) == READ ?
++      atomic_inc_unchecked(bio_data_dir(bio) == READ ?
+                  &cache->stats.read_miss : &cache->stats.write_miss);
+ }
+@@ -1790,7 +1794,7 @@ static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
+       struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
+       return bio_detain(l->cache, b, NULL, cell_prealloc,
+-                        (cell_free_fn) prealloc_put_cell,
++                        prealloc_put_cell,
+                         l->structs, &l->cell);
+ }
+@@ -1832,7 +1836,7 @@ static void process_cell(struct cache *cache, struct prealloc *structs,
+                        */
+                       if (bio_data_dir(bio) == WRITE) {
+-                              atomic_inc(&cache->stats.demotion);
++                              atomic_inc_unchecked(&cache->stats.demotion);
+                               invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
+                               release_cell = false;
+@@ -1865,14 +1869,14 @@ static void process_cell(struct cache *cache, struct prealloc *structs,
+               break;
+       case POLICY_NEW:
+-              atomic_inc(&cache->stats.promotion);
++              atomic_inc_unchecked(&cache->stats.promotion);
+               promote(cache, structs, block, lookup_result.cblock, new_ocell);
+               release_cell = false;
+               break;
+       case POLICY_REPLACE:
+-              atomic_inc(&cache->stats.demotion);
+-              atomic_inc(&cache->stats.promotion);
++              atomic_inc_unchecked(&cache->stats.demotion);
++              atomic_inc_unchecked(&cache->stats.promotion);
+               demote_then_promote(cache, structs, lookup_result.old_oblock,
+                                   block, lookup_result.cblock,
+                                   ool.cell, new_ocell);
+@@ -1902,7 +1906,7 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
+        */
+       cell_prealloc = prealloc_get_cell(structs);
+       r = bio_detain(cache, block, bio, cell_prealloc,
+-                     (cell_free_fn) prealloc_put_cell,
++                     prealloc_put_cell,
+                      structs, &new_ocell);
+       if (r > 0)
+               return;
+@@ -1926,7 +1930,7 @@ static int commit(struct cache *cache, bool clean_shutdown)
+       if (get_cache_mode(cache) >= CM_READ_ONLY)
+               return -EINVAL;
+-      atomic_inc(&cache->stats.commit_count);
++      atomic_inc_unchecked(&cache->stats.commit_count);
+       r = dm_cache_commit(cache->cmd, clean_shutdown);
+       if (r)
+               metadata_operation_failed(cache, "dm_cache_commit", r);
+@@ -2157,32 +2161,32 @@ static void process_invalidation_requests(struct cache *cache)
+  *--------------------------------------------------------------*/
+ static bool is_quiescing(struct cache *cache)
+ {
+-      return atomic_read(&cache->quiescing);
++      return atomic_read_unchecked(&cache->quiescing);
+ }
+ static void ack_quiescing(struct cache *cache)
+ {
+       if (is_quiescing(cache)) {
+-              atomic_inc(&cache->quiescing_ack);
++              atomic_inc_unchecked(&cache->quiescing_ack);
+               wake_up(&cache->quiescing_wait);
+       }
+ }
+ static void wait_for_quiescing_ack(struct cache *cache)
+ {
+-      wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
++      wait_event(cache->quiescing_wait, atomic_read_unchecked(&cache->quiescing_ack));
+ }
+ static void start_quiescing(struct cache *cache)
+ {
+-      atomic_inc(&cache->quiescing);
++      atomic_inc_unchecked(&cache->quiescing);
+       wait_for_quiescing_ack(cache);
+ }
+ static void stop_quiescing(struct cache *cache)
+ {
+-      atomic_set(&cache->quiescing, 0);
+-      atomic_set(&cache->quiescing_ack, 0);
++      atomic_set_unchecked(&cache->quiescing, 0);
++      atomic_set_unchecked(&cache->quiescing_ack, 0);
+ }
+ static void wait_for_migrations(struct cache *cache)
+@@ -2869,8 +2873,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+       init_waitqueue_head(&cache->migration_wait);
+       init_waitqueue_head(&cache->quiescing_wait);
+-      atomic_set(&cache->quiescing, 0);
+-      atomic_set(&cache->quiescing_ack, 0);
++      atomic_set_unchecked(&cache->quiescing, 0);
++      atomic_set_unchecked(&cache->quiescing_ack, 0);
+       r = -ENOMEM;
+       atomic_set(&cache->nr_dirty, 0);
+@@ -2937,12 +2941,12 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+       load_stats(cache);
+-      atomic_set(&cache->stats.demotion, 0);
+-      atomic_set(&cache->stats.promotion, 0);
+-      atomic_set(&cache->stats.copies_avoided, 0);
+-      atomic_set(&cache->stats.cache_cell_clash, 0);
+-      atomic_set(&cache->stats.commit_count, 0);
+-      atomic_set(&cache->stats.discard_count, 0);
++      atomic_set_unchecked(&cache->stats.demotion, 0);
++      atomic_set_unchecked(&cache->stats.promotion, 0);
++      atomic_set_unchecked(&cache->stats.copies_avoided, 0);
++      atomic_set_unchecked(&cache->stats.cache_cell_clash, 0);
++      atomic_set_unchecked(&cache->stats.commit_count, 0);
++      atomic_set_unchecked(&cache->stats.discard_count, 0);
+       spin_lock_init(&cache->invalidation_lock);
+       INIT_LIST_HEAD(&cache->invalidation_requests);
+@@ -3059,7 +3063,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
+       }
+       r = bio_detain(cache, block, bio, cell,
+-                     (cell_free_fn) free_prison_cell,
++                     free_prison_cell,
+                      cache, &cell);
+       if (r) {
+               if (r < 0)
+@@ -3553,12 +3557,12 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+                      cache->sectors_per_block,
+                      (unsigned long long) from_cblock(residency),
+                      (unsigned long long) from_cblock(cache->cache_size),
+-                     (unsigned) atomic_read(&cache->stats.read_hit),
+-                     (unsigned) atomic_read(&cache->stats.read_miss),
+-                     (unsigned) atomic_read(&cache->stats.write_hit),
+-                     (unsigned) atomic_read(&cache->stats.write_miss),
+-                     (unsigned) atomic_read(&cache->stats.demotion),
+-                     (unsigned) atomic_read(&cache->stats.promotion),
++                     (unsigned) atomic_read_unchecked(&cache->stats.read_hit),
++                     (unsigned) atomic_read_unchecked(&cache->stats.read_miss),
++                     (unsigned) atomic_read_unchecked(&cache->stats.write_hit),
++                     (unsigned) atomic_read_unchecked(&cache->stats.write_miss),
++                     (unsigned) atomic_read_unchecked(&cache->stats.demotion),
++                     (unsigned) atomic_read_unchecked(&cache->stats.promotion),
+                      (unsigned long) atomic_read(&cache->nr_dirty));
+               if (writethrough_mode(&cache->features))
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 40ceba1..4141e1e 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -75,8 +75,8 @@ struct mapped_device {
+        * Event handling.
+        */
+       wait_queue_head_t eventq;
+-      atomic_t event_nr;
+-      atomic_t uevent_seq;
++      atomic_unchecked_t event_nr;
++      atomic_unchecked_t uevent_seq;
+       struct list_head uevent_list;
+       spinlock_t uevent_lock; /* Protect access to uevent_list */
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 966eb4b..aca05a3 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1777,7 +1777,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
+           cmd == DM_LIST_VERSIONS_CMD)
+               return 0;
+-      if ((cmd == DM_DEV_CREATE_CMD)) {
++      if (cmd == DM_DEV_CREATE_CMD) {
+               if (!*param->name) {
+                       DMWARN("name not supplied when creating device");
+                       return -EINVAL;
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 15db5e9..16fc91b 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -88,7 +88,7 @@ struct multipath {
+       atomic_t nr_valid_paths;        /* Total number of usable paths */
+       atomic_t pg_init_in_progress;   /* Only one pg_init allowed at once */
+-      atomic_t pg_init_count;         /* Number of times pg_init called */
++      atomic_unchecked_t pg_init_count;/* Number of times pg_init called */
+       unsigned queue_mode;
+@@ -203,7 +203,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
+               set_bit(MPATHF_QUEUE_IO, &m->flags);
+               atomic_set(&m->nr_valid_paths, 0);
+               atomic_set(&m->pg_init_in_progress, 0);
+-              atomic_set(&m->pg_init_count, 0);
++              atomic_set_unchecked(&m->pg_init_count, 0);
+               m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
+               INIT_WORK(&m->trigger_event, trigger_event);
+               init_waitqueue_head(&m->pg_init_wait);
+@@ -351,7 +351,7 @@ static int __pg_init_all_paths(struct multipath *m)
+       if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
+               return 0;
+-      atomic_inc(&m->pg_init_count);
++      atomic_inc_unchecked(&m->pg_init_count);
+       clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+       /* Check here to reset pg_init_required */
+@@ -397,7 +397,7 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
+               clear_bit(MPATHF_QUEUE_IO, &m->flags);
+       }
+-      atomic_set(&m->pg_init_count, 0);
++      atomic_set_unchecked(&m->pg_init_count, 0);
+ }
+ static struct pgpath *choose_path_in_pg(struct multipath *m,
+@@ -1420,7 +1420,7 @@ static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
+       spin_lock_irqsave(&m->lock, flags);
+-      if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
++      if (atomic_read_unchecked(&m->pg_init_count) <= m->pg_init_retries &&
+           !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
+               set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+       else
+@@ -1736,7 +1736,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
+       /* Features */
+       if (type == STATUSTYPE_INFO)
+               DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
+-                     atomic_read(&m->pg_init_count));
++                     atomic_read_unchecked(&m->pg_init_count));
+       else {
+               DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
+                             (m->pg_init_retries > 0) * 2 +
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 8abde6b..b9cdbef 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3190,7 +3190,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+                                     mddev->resync_max_sectors : mddev->dev_sectors;
+               progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
+               resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
+-                                  atomic64_read(&mddev->resync_mismatches) : 0;
++                                  atomic64_read_unchecked(&mddev->resync_mismatches) : 0;
+               sync_action = decipher_sync_action(&rs->md);
+               /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index bdf1606..443a023 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -42,7 +42,7 @@ enum dm_raid1_error {
+ struct mirror {
+       struct mirror_set *ms;
+-      atomic_t error_count;
++      atomic_unchecked_t error_count;
+       unsigned long error_type;
+       struct dm_dev *dev;
+       sector_t offset;
+@@ -188,7 +188,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
+       struct mirror *m;
+       for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
+-              if (!atomic_read(&m->error_count))
++              if (!atomic_read_unchecked(&m->error_count))
+                       return m;
+       return NULL;
+@@ -220,7 +220,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
+        * simple way to tell if a device has encountered
+        * errors.
+        */
+-      atomic_inc(&m->error_count);
++      atomic_inc_unchecked(&m->error_count);
+       if (test_and_set_bit(error_type, &m->error_type))
+               return;
+@@ -379,7 +379,7 @@ static void reset_ms_flags(struct mirror_set *ms)
+       ms->leg_failure = 0;
+       for (m = 0; m < ms->nr_mirrors; m++) {
+-              atomic_set(&(ms->mirror[m].error_count), 0);
++              atomic_set_unchecked(&(ms->mirror[m].error_count), 0);
+               ms->mirror[m].error_type = 0;
+       }
+ }
+@@ -424,7 +424,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
+       struct mirror *m = get_default_mirror(ms);
+       do {
+-              if (likely(!atomic_read(&m->error_count)))
++              if (likely(!atomic_read_unchecked(&m->error_count)))
+                       return m;
+               if (m-- == ms->mirror)
+@@ -438,7 +438,7 @@ static int default_ok(struct mirror *m)
+ {
+       struct mirror *default_mirror = get_default_mirror(m->ms);
+-      return !atomic_read(&default_mirror->error_count);
++      return !atomic_read_unchecked(&default_mirror->error_count);
+ }
+ static int mirror_available(struct mirror_set *ms, struct bio *bio)
+@@ -578,7 +578,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
+                */
+               if (likely(region_in_sync(ms, region, 1)))
+                       m = choose_mirror(ms, bio->bi_iter.bi_sector);
+-              else if (m && atomic_read(&m->error_count))
++              else if (m && atomic_read_unchecked(&m->error_count))
+                       m = NULL;
+               if (likely(m))
+@@ -963,7 +963,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
+       }
+       ms->mirror[mirror].ms = ms;
+-      atomic_set(&(ms->mirror[mirror].error_count), 0);
++      atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
+       ms->mirror[mirror].error_type = 0;
+       ms->mirror[mirror].offset = offset;
+@@ -1388,7 +1388,7 @@ static void mirror_resume(struct dm_target *ti)
+  */
+ static char device_status_char(struct mirror *m)
+ {
+-      if (!atomic_read(&(m->error_count)))
++      if (!atomic_read_unchecked(&(m->error_count)))
+               return 'A';
+       return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index 38b05f2..4f99595 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -435,7 +435,7 @@ do_sync_free:
+               synchronize_rcu_expedited();
+               dm_stat_free(&s->rcu_head);
+       } else {
+-              ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
++              ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
+               call_rcu(&s->rcu_head, dm_stat_free);
+       }
+       return 0;
+@@ -647,8 +647,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+                                      ((bi_rw == WRITE) ==
+                                       (ACCESS_ONCE(last->last_rw) == WRITE))
+                                      ));
+-              ACCESS_ONCE(last->last_sector) = end_sector;
+-              ACCESS_ONCE(last->last_rw) = bi_rw;
++              ACCESS_ONCE_RW(last->last_sector) = end_sector;
++              ACCESS_ONCE_RW(last->last_rw) = bi_rw;
+       }
+       rcu_read_lock();
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index 28193a5..0543cc9 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -21,7 +21,7 @@ struct stripe {
+       struct dm_dev *dev;
+       sector_t physical_start;
+-      atomic_t error_count;
++      atomic_unchecked_t error_count;
+ };
+ struct stripe_c {
+@@ -190,7 +190,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+                       kfree(sc);
+                       return r;
+               }
+-              atomic_set(&(sc->stripe[i].error_count), 0);
++              atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
+       }
+       ti->private = sc;
+@@ -357,7 +357,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
+               DMEMIT("%d ", sc->stripes);
+               for (i = 0; i < sc->stripes; i++)  {
+                       DMEMIT("%s ", sc->stripe[i].dev->name);
+-                      buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
++                      buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
+                               'D' : 'A';
+               }
+               buffer[i] = '\0';
+@@ -402,8 +402,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
+        */
+       for (i = 0; i < sc->stripes; i++)
+               if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
+-                      atomic_inc(&(sc->stripe[i].error_count));
+-                      if (atomic_read(&(sc->stripe[i].error_count)) <
++                      atomic_inc_unchecked(&(sc->stripe[i].error_count));
++                      if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
+                           DM_IO_ERROR_THRESHOLD)
+                               schedule_work(&sc->trigger_event);
+               }
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 3e407a9c..5c5cbdb 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -308,7 +308,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+       if (!dev_size)
+               return 0;
+-      if ((start >= dev_size) || (start + len > dev_size)) {
++      if ((start >= dev_size) || (len > dev_size - start)) {
+               DMWARN("%s: %s too small for target: "
+                      "start=%llu, len=%llu, dev_size=%llu",
+                      dm_device_name(ti->table->md), bdevname(bdev, b),
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index a15091a..2d20208 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -405,7 +405,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
+ {
+       pmd->info.tm = pmd->tm;
+       pmd->info.levels = 2;
+-      pmd->info.value_type.context = pmd->data_sm;
++      pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+       pmd->info.value_type.size = sizeof(__le64);
+       pmd->info.value_type.inc = data_block_inc;
+       pmd->info.value_type.dec = data_block_dec;
+@@ -424,7 +424,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
+       pmd->bl_info.tm = pmd->tm;
+       pmd->bl_info.levels = 1;
+-      pmd->bl_info.value_type.context = pmd->data_sm;
++      pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+       pmd->bl_info.value_type.size = sizeof(__le64);
+       pmd->bl_info.value_type.inc = data_block_inc;
+       pmd->bl_info.value_type.dec = data_block_dec;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 0f2928b..f9c3589 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -569,14 +569,16 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
+  * function to access the md->map field, and make sure they call
+  * dm_put_live_table() when finished.
+  */
+-struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
++struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(&md->io_barrier);
++struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx)
+ {
+       *srcu_idx = srcu_read_lock(&md->io_barrier);
+       return srcu_dereference(md->map, &md->io_barrier);
+ }
+-void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
++void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(&md->io_barrier);
++void dm_put_live_table(struct mapped_device *md, int srcu_idx)
+ {
+       srcu_read_unlock(&md->io_barrier, srcu_idx);
+ }
+@@ -591,13 +593,15 @@ void dm_sync_table(struct mapped_device *md)
+  * A fast alternative to dm_get_live_table/dm_put_live_table.
+  * The caller must not block between these two functions.
+  */
+-static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
++static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU);
++static struct dm_table *dm_get_live_table_fast(struct mapped_device *md)
+ {
+       rcu_read_lock();
+       return rcu_dereference(md->map);
+ }
+-static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
++static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU);
++static void dm_put_live_table_fast(struct mapped_device *md)
+ {
+       rcu_read_unlock();
+ }
+@@ -1484,8 +1488,8 @@ static struct mapped_device *alloc_dev(int minor)
+       spin_lock_init(&md->deferred_lock);
+       atomic_set(&md->holders, 1);
+       atomic_set(&md->open_count, 0);
+-      atomic_set(&md->event_nr, 0);
+-      atomic_set(&md->uevent_seq, 0);
++      atomic_set_unchecked(&md->event_nr, 0);
++      atomic_set_unchecked(&md->uevent_seq, 0);
+       INIT_LIST_HEAD(&md->uevent_list);
+       INIT_LIST_HEAD(&md->table_devices);
+       spin_lock_init(&md->uevent_lock);
+@@ -1624,7 +1628,7 @@ static void event_callback(void *context)
+       dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
+-      atomic_inc(&md->event_nr);
++      atomic_inc_unchecked(&md->event_nr);
+       wake_up(&md->eventq);
+ }
+@@ -2409,18 +2413,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ uint32_t dm_next_uevent_seq(struct mapped_device *md)
+ {
+-      return atomic_add_return(1, &md->uevent_seq);
++      return atomic_add_return_unchecked(1, &md->uevent_seq);
+ }
+ uint32_t dm_get_event_nr(struct mapped_device *md)
+ {
+-      return atomic_read(&md->event_nr);
++      return atomic_read_unchecked(&md->event_nr);
+ }
+ int dm_wait_event(struct mapped_device *md, int event_nr)
+ {
+       return wait_event_interruptible(md->eventq,
+-                      (event_nr != atomic_read(&md->event_nr)));
++                      (event_nr != atomic_read_unchecked(&md->event_nr)));
+ }
+ void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 915e84d..5155da8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -198,10 +198,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
+  *  start build, activate spare
+  */
+ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+-static atomic_t md_event_count;
++static atomic_unchecked_t md_event_count;
+ void md_new_event(struct mddev *mddev)
+ {
+-      atomic_inc(&md_event_count);
++      atomic_inc_unchecked(&md_event_count);
+       wake_up(&md_event_waiters);
+ }
+ EXPORT_SYMBOL_GPL(md_new_event);
+@@ -1434,7 +1434,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+       if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
+           (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
+               rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
+-      atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
++      atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+       rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
+       bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
+@@ -1700,7 +1700,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
+       else
+               sb->resync_offset = cpu_to_le64(0);
+-      sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
++      sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
+       sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+       sb->size = cpu_to_le64(mddev->dev_sectors);
+@@ -2719,7 +2719,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
+ static ssize_t
+ errors_show(struct md_rdev *rdev, char *page)
+ {
+-      return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
++      return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
+ }
+ static ssize_t
+@@ -2731,7 +2731,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
+       rv = kstrtouint(buf, 10, &n);
+       if (rv < 0)
+               return rv;
+-      atomic_set(&rdev->corrected_errors, n);
++      atomic_set_unchecked(&rdev->corrected_errors, n);
+       return len;
+ }
+ static struct rdev_sysfs_entry rdev_errors =
+@@ -3180,8 +3180,8 @@ int md_rdev_init(struct md_rdev *rdev)
+       rdev->sb_loaded = 0;
+       rdev->bb_page = NULL;
+       atomic_set(&rdev->nr_pending, 0);
+-      atomic_set(&rdev->read_errors, 0);
+-      atomic_set(&rdev->corrected_errors, 0);
++      atomic_set_unchecked(&rdev->read_errors, 0);
++      atomic_set_unchecked(&rdev->corrected_errors, 0);
+       INIT_LIST_HEAD(&rdev->same_set);
+       init_waitqueue_head(&rdev->blocked_wait);
+@@ -4403,7 +4403,7 @@ mismatch_cnt_show(struct mddev *mddev, char *page)
+ {
+       return sprintf(page, "%llu\n",
+                      (unsigned long long)
+-                     atomic64_read(&mddev->resync_mismatches));
++                     atomic64_read_unchecked(&mddev->resync_mismatches));
+ }
+ static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
+@@ -5095,7 +5095,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
+       return NULL;
+ }
+-static int add_named_array(const char *val, struct kernel_param *kp)
++static int add_named_array(const char *val, const struct kernel_param *kp)
+ {
+       /* val must be "md_*" where * is not all digits.
+        * We allocate an array with a large free minor number, and
+@@ -5445,7 +5445,7 @@ static void md_clean(struct mddev *mddev)
+       mddev->new_layout = 0;
+       mddev->new_chunk_sectors = 0;
+       mddev->curr_resync = 0;
+-      atomic64_set(&mddev->resync_mismatches, 0);
++      atomic64_set_unchecked(&mddev->resync_mismatches, 0);
+       mddev->suspend_lo = mddev->suspend_hi = 0;
+       mddev->sync_speed_min = mddev->sync_speed_max = 0;
+       mddev->recovery = 0;
+@@ -5862,9 +5862,10 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
+       info.patch_version = MD_PATCHLEVEL_VERSION;
+       info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
+       info.level         = mddev->level;
+-      info.size          = mddev->dev_sectors / 2;
+-      if (info.size != mddev->dev_sectors / 2) /* overflow */
++      if (2 * (sector_t)INT_MAX < mddev->dev_sectors) /* overflow */
+               info.size = -1;
++      else
++              info.size = mddev->dev_sectors / 2;
+       info.nr_disks      = nr;
+       info.raid_disks    = mddev->raid_disks;
+       info.md_minor      = mddev->md_minor;
+@@ -7431,7 +7432,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+               spin_unlock(&pers_lock);
+               seq_printf(seq, "\n");
+-              seq->poll_event = atomic_read(&md_event_count);
++              seq->poll_event = atomic_read_unchecked(&md_event_count);
+               return 0;
+       }
+       if (v == (void*)2) {
+@@ -7531,7 +7532,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
+               return error;
+       seq = file->private_data;
+-      seq->poll_event = atomic_read(&md_event_count);
++      seq->poll_event = atomic_read_unchecked(&md_event_count);
+       return error;
+ }
+@@ -7548,7 +7549,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+       /* always allow read */
+       mask = POLLIN | POLLRDNORM;
+-      if (seq->poll_event != atomic_read(&md_event_count))
++      if (seq->poll_event != atomic_read_unchecked(&md_event_count))
+               mask |= POLLERR | POLLPRI;
+       return mask;
+ }
+@@ -7644,7 +7645,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
+               struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+               curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+                             (int)part_stat_read(&disk->part0, sectors[1]) -
+-                            atomic_read(&disk->sync_io);
++                            atomic_read_unchecked(&disk->sync_io);
+               /* sync IO will cause sync_io to increase before the disk_stats
+                * as sync_io is counted when a request starts, and
+                * disk_stats is counted when it completes.
+@@ -7914,7 +7915,7 @@ void md_do_sync(struct md_thread *thread)
+                * which defaults to physical size, but can be virtual size
+                */
+               max_sectors = mddev->resync_max_sectors;
+-              atomic64_set(&mddev->resync_mismatches, 0);
++              atomic64_set_unchecked(&mddev->resync_mismatches, 0);
+               /* we don't use the checkpoint if there's a bitmap */
+               if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                       j = mddev->resync_min;
+@@ -8931,11 +8932,11 @@ static __exit void md_exit(void)
+ subsys_initcall(md_init);
+ module_exit(md_exit)
+-static int get_ro(char *buffer, struct kernel_param *kp)
++static int get_ro(char *buffer, const struct kernel_param *kp)
+ {
+       return sprintf(buffer, "%d", start_readonly);
+ }
+-static int set_ro(const char *val, struct kernel_param *kp)
++static int set_ro(const char *val, const struct kernel_param *kp)
+ {
+       return kstrtouint(val, 10, (unsigned int *)&start_readonly);
+ }
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 20c6675..871764e 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -96,13 +96,13 @@ struct md_rdev {
+                                        * only maintained for arrays that
+                                        * support hot removal
+                                        */
+-      atomic_t        read_errors;    /* number of consecutive read errors that
++      atomic_unchecked_t      read_errors;    /* number of consecutive read errors that
+                                        * we have tried to ignore.
+                                        */
+       time64_t        last_read_error;        /* monotonic time since our
+                                                * last read error
+                                                */
+-      atomic_t        corrected_errors; /* number of corrected read errors,
++      atomic_unchecked_t      corrected_errors; /* number of corrected read errors,
+                                          * for reporting to userspace and storing
+                                          * in superblock.
+                                          */
+@@ -290,7 +290,7 @@ struct mddev {
+       sector_t                        resync_max_sectors; /* may be set by personality */
+-      atomic64_t                      resync_mismatches; /* count of sectors where
++      atomic64_unchecked_t            resync_mismatches; /* count of sectors where
+                                                           * parity/replica mismatch found
+                                                           */
+@@ -469,7 +469,7 @@ extern void mddev_unlock(struct mddev *mddev);
+ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
+ {
+-      atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
++      atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+ }
+ struct md_personality
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 7e44005..20e035a 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -700,7 +700,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+        * Flick into a mode where all blocks get allocated in the new area.
+        */
+       smm->begin = old_len;
+-      memcpy(sm, &bootstrap_ops, sizeof(*sm));
++      memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
+       /*
+        * Extend.
+@@ -738,7 +738,7 @@ out:
+       /*
+        * Switch back to normal behaviour.
+        */
+-      memcpy(sm, &ops, sizeof(*sm));
++      memcpy((void *)sm, &ops, sizeof(*sm));
+       return r;
+ }
+diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
+index 3e6d115..ffecdeb 100644
+--- a/drivers/md/persistent-data/dm-space-map.h
++++ b/drivers/md/persistent-data/dm-space-map.h
+@@ -71,6 +71,7 @@ struct dm_space_map {
+                                          dm_sm_threshold_fn fn,
+                                          void *context);
+ };
++typedef struct dm_space_map __no_const dm_space_map_no_const;
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 21dc00e..14b01ea 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1050,7 +1050,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
+       struct blk_plug_cb *cb;
+       struct raid1_plug_cb *plug = NULL;
+       int first_clone;
+-      int sectors_handled;
++      sector_t sectors_handled;
+       int max_sectors;
+       sector_t start_next_window;
+@@ -1880,7 +1880,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+                       if (r1_sync_page_io(rdev, sect, s,
+                                           bio->bi_io_vec[idx].bv_page,
+                                           READ) != 0)
+-                              atomic_add(s, &rdev->corrected_errors);
++                              atomic_add_unchecked(s, &rdev->corrected_errors);
+               }
+               sectors -= s;
+               sect += s;
+@@ -1971,7 +1971,7 @@ static void process_checks(struct r1bio *r1_bio)
+               } else
+                       j = 0;
+               if (j >= 0)
+-                      atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
++                      atomic64_add_unchecked(r1_bio->sectors, &mddev->resync_mismatches);
+               if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
+                             && !error)) {
+                       /* No need to write to this device. */
+@@ -2122,7 +2122,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+                               rcu_read_unlock();
+                               if (r1_sync_page_io(rdev, sect, s,
+                                                   conf->tmppage, READ)) {
+-                                      atomic_add(s, &rdev->corrected_errors);
++                                      atomic_add_unchecked(s, &rdev->corrected_errors);
+                                       printk(KERN_INFO
+                                              "md/raid1:%s: read error corrected "
+                                              "(%d sectors at %llu on %s)\n",
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index be1a9fc..6694394 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1060,7 +1060,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
+       struct md_rdev *blocked_rdev;
+       struct blk_plug_cb *cb;
+       struct raid10_plug_cb *plug = NULL;
+-      int sectors_handled;
++      sector_t sectors_handled;
+       int max_sectors;
+       int sectors;
+@@ -1438,7 +1438,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
+ {
+       struct r10conf *conf = mddev->private;
+       sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+-      int chunk_sects = chunk_mask + 1;
++      sector_t chunk_sects = chunk_mask + 1;
+       struct bio *split;
+@@ -1826,7 +1826,7 @@ static void end_sync_read(struct bio *bio)
+               /* The write handler will notice the lack of
+                * R10BIO_Uptodate and record any errors etc
+                */
+-              atomic_add(r10_bio->sectors,
++              atomic_add_unchecked(r10_bio->sectors,
+                          &conf->mirrors[d].rdev->corrected_errors);
+       /* for reconstruct, we always reschedule after a read.
+@@ -1975,7 +1975,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+                       }
+                       if (j == vcnt)
+                               continue;
+-                      atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
++                      atomic64_add_unchecked(r10_bio->sectors, &mddev->resync_mismatches);
+                       if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+                               /* Don't fix anything. */
+                               continue;
+@@ -2174,7 +2174,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+ {
+       long cur_time_mon;
+       unsigned long hours_since_last;
+-      unsigned int read_errors = atomic_read(&rdev->read_errors);
++      unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
+       cur_time_mon = ktime_get_seconds();
+@@ -2195,9 +2195,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+        * overflowing the shift of read_errors by hours_since_last.
+        */
+       if (hours_since_last >= 8 * sizeof(read_errors))
+-              atomic_set(&rdev->read_errors, 0);
++              atomic_set_unchecked(&rdev->read_errors, 0);
+       else
+-              atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
++              atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
+ }
+ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
+@@ -2251,8 +2251,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+               return;
+       check_decay_read_errors(mddev, rdev);
+-      atomic_inc(&rdev->read_errors);
+-      if (atomic_read(&rdev->read_errors) > max_read_errors) {
++      atomic_inc_unchecked(&rdev->read_errors);
++      if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
+               char b[BDEVNAME_SIZE];
+               bdevname(rdev->bdev, b);
+@@ -2260,7 +2260,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+                      "md/raid10:%s: %s: Raid device exceeded "
+                      "read_error threshold [cur %d:max %d]\n",
+                      mdname(mddev), b,
+-                     atomic_read(&rdev->read_errors), max_read_errors);
++                     atomic_read_unchecked(&rdev->read_errors), max_read_errors);
+               printk(KERN_NOTICE
+                      "md/raid10:%s: %s: Failing raid device\n",
+                      mdname(mddev), b);
+@@ -2417,7 +2417,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+                                              sect +
+                                              choose_data_offset(r10_bio, rdev)),
+                                      bdevname(rdev->bdev, b));
+-                              atomic_add(s, &rdev->corrected_errors);
++                              atomic_add_unchecked(s, &rdev->corrected_errors);
+                       }
+                       rdev_dec_pending(rdev, mddev);
+@@ -3188,6 +3188,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+       } else {
+               /* resync. Schedule a read for every block at this virt offset */
+               int count = 0;
++              sector_t sectors;
+               bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
+@@ -3213,7 +3214,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+               r10_bio->sector = sector_nr;
+               set_bit(R10BIO_IsSync, &r10_bio->state);
+               raid10_find_phys(conf, r10_bio);
+-              r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
++              sectors = (sector_nr | chunk_mask) - sector_nr + 1;
++              r10_bio->sectors = sectors;
+               for (i = 0; i < conf->copies; i++) {
+                       int d = r10_bio->devs[i].devnum;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index ee7fc37..d7efe3d 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1120,23 +1120,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
+       struct bio_vec bvl;
+       struct bvec_iter iter;
+       struct page *bio_page;
+-      int page_offset;
++      s64 page_offset;
+       struct async_submit_ctl submit;
+       enum async_tx_flags flags = 0;
+       if (bio->bi_iter.bi_sector >= sector)
+-              page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
++              page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
+       else
+-              page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
++              page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
+       if (frombio)
+               flags |= ASYNC_TX_FENCE;
+       init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
+       bio_for_each_segment(bvl, bio, iter) {
+-              int len = bvl.bv_len;
+-              int clen;
+-              int b_offset = 0;
++              s64 len = bvl.bv_len;
++              s64 clen;
++              s64 b_offset = 0;
+               if (page_offset < 0) {
+                       b_offset = -page_offset;
+@@ -2040,6 +2040,10 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
+       return 1;
+ }
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
++#endif
++
+ static int grow_stripes(struct r5conf *conf, int num)
+ {
+       struct kmem_cache *sc;
+@@ -2050,7 +2054,11 @@ static int grow_stripes(struct r5conf *conf, int num)
+                       "raid%d-%s", conf->level, mdname(conf->mddev));
+       else
+               sprintf(conf->cache_name[0],
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                      "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
++#else
+                       "raid%d-%p", conf->level, conf->mddev);
++#endif
+       sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+       conf->active_name = 0;
+@@ -2354,21 +2362,21 @@ static void raid5_end_read_request(struct bio * bi)
+                               mdname(conf->mddev), STRIPE_SECTORS,
+                               (unsigned long long)s,
+                               bdevname(rdev->bdev, b));
+-                      atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
++                      atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
+                       clear_bit(R5_ReadError, &sh->dev[i].flags);
+                       clear_bit(R5_ReWrite, &sh->dev[i].flags);
+               } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+                       clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+-              if (atomic_read(&rdev->read_errors))
+-                      atomic_set(&rdev->read_errors, 0);
++              if (atomic_read_unchecked(&rdev->read_errors))
++                      atomic_set_unchecked(&rdev->read_errors, 0);
+       } else {
+               const char *bdn = bdevname(rdev->bdev, b);
+               int retry = 0;
+               int set_bad = 0;
+               clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+-              atomic_inc(&rdev->read_errors);
++              atomic_inc_unchecked(&rdev->read_errors);
+               if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+                       printk_ratelimited(
+                               KERN_WARNING
+@@ -2396,7 +2404,7 @@ static void raid5_end_read_request(struct bio * bi)
+                               mdname(conf->mddev),
+                               (unsigned long long)s,
+                               bdn);
+-              } else if (atomic_read(&rdev->read_errors)
++              } else if (atomic_read_unchecked(&rdev->read_errors)
+                        > conf->max_nr_stripes)
+                       printk(KERN_WARNING
+                              "md/raid:%s: Too many read errors, failing device %s.\n",
+@@ -3763,7 +3771,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
+                        */
+                       set_bit(STRIPE_INSYNC, &sh->state);
+               else {
+-                      atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++                      atomic64_add_unchecked(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                               /* don't try to repair!! */
+                               set_bit(STRIPE_INSYNC, &sh->state);
+@@ -3915,7 +3923,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
+                                */
+                       }
+               } else {
+-                      atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++                      atomic64_add_unchecked(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                               /* don't try to repair!! */
+                               set_bit(STRIPE_INSYNC, &sh->state);
+diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
+index 9914f69..177e48b 100644
+--- a/drivers/media/dvb-core/dvb_net.c
++++ b/drivers/media/dvb-core/dvb_net.c
+@@ -882,7 +882,7 @@ static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
+       return 0;
+ }
+-static int dvb_net_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t dvb_net_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 75a3f4b..06b70a3 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -428,7 +428,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+                       int demux_sink_pads)
+ {
+       struct dvb_device *dvbdev;
+-      struct file_operations *dvbdevfops;
++      file_operations_no_const *dvbdevfops;
+       struct device *clsdev;
+       int minor;
+       int id, ret;
+diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
+index 6ad22b6..6e90e2a 100644
+--- a/drivers/media/dvb-frontends/af9033.h
++++ b/drivers/media/dvb-frontends/af9033.h
+@@ -96,6 +96,6 @@ struct af9033_ops {
+       int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
+       int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
+                         int onoff);
+-};
++} __no_const;
+ #endif /* AF9033_H */
+diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
+index 8814f36..2adf845 100644
+--- a/drivers/media/dvb-frontends/cx24116.c
++++ b/drivers/media/dvb-frontends/cx24116.c
+@@ -1462,7 +1462,7 @@ static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
+       return cx24116_read_status(fe, status);
+ }
+-static int cx24116_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
+index a3f7eb4..6103a23 100644
+--- a/drivers/media/dvb-frontends/cx24117.c
++++ b/drivers/media/dvb-frontends/cx24117.c
+@@ -1555,7 +1555,7 @@ static int cx24117_tune(struct dvb_frontend *fe, bool re_tune,
+       return cx24117_read_status(fe, status);
+ }
+-static int cx24117_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
+index 066ee38..193d707 100644
+--- a/drivers/media/dvb-frontends/cx24120.c
++++ b/drivers/media/dvb-frontends/cx24120.c
+@@ -1492,7 +1492,7 @@ static int cx24120_tune(struct dvb_frontend *fe, bool re_tune,
+       return cx24120_read_status(fe, status);
+ }
+-static int cx24120_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
+index 113b094..c9424e6 100644
+--- a/drivers/media/dvb-frontends/cx24123.c
++++ b/drivers/media/dvb-frontends/cx24123.c
+@@ -1009,7 +1009,7 @@ static int cx24123_tune(struct dvb_frontend *fe,
+       return retval;
+ }
+-static int cx24123_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
+index 314d3b8..e2257bd 100644
+--- a/drivers/media/dvb-frontends/cxd2820r_core.c
++++ b/drivers/media/dvb-frontends/cxd2820r_core.c
+@@ -572,7 +572,7 @@ error:
+       return DVBFE_ALGO_SEARCH_ERROR;
+ }
+-static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_CUSTOM;
+ }
+diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
+index d5dfafb..b7ed9d9 100644
+--- a/drivers/media/dvb-frontends/dib3000.h
++++ b/drivers/media/dvb-frontends/dib3000.h
+@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
+       int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
+       int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
+       int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
+-};
++} __no_const;
+ #if IS_REACHABLE(CONFIG_DVB_DIB3000MB)
+ extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
+diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
+index baa2789..c8de7fe 100644
+--- a/drivers/media/dvb-frontends/dib7000p.h
++++ b/drivers/media/dvb-frontends/dib7000p.h
+@@ -64,7 +64,7 @@ struct dib7000p_ops {
+       int (*get_adc_power)(struct dvb_frontend *fe);
+       int (*slave_reset)(struct dvb_frontend *fe);
+       struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
+-};
++} __no_const;
+ #if IS_REACHABLE(CONFIG_DVB_DIB7000P)
+ void *dib7000p_attach(struct dib7000p_ops *ops);
+diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
+index 2b8b4b1..8cef451 100644
+--- a/drivers/media/dvb-frontends/dib8000.h
++++ b/drivers/media/dvb-frontends/dib8000.h
+@@ -61,7 +61,7 @@ struct dib8000_ops {
+       int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
+       int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
+       struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
+-};
++} __no_const;
+ #if IS_REACHABLE(CONFIG_DVB_DIB8000)
+ void *dib8000_attach(struct dib8000_ops *ops);
+diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c
+index 1c7eb47..c1cd6b8 100644
+--- a/drivers/media/dvb-frontends/hd29l2.c
++++ b/drivers/media/dvb-frontends/hd29l2.c
+@@ -555,7 +555,7 @@ err:
+       return DVBFE_ALGO_SEARCH_ERROR;
+ }
+-static int hd29l2_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo hd29l2_get_frontend_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_CUSTOM;
+ }
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 179c26e..af482fe 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -1734,7 +1734,7 @@ static int lgdt3306a_get_tune_settings(struct dvb_frontend *fe,
+       return 0;
+ }
+-static int lgdt3306a_search(struct dvb_frontend *fe)
++static enum dvbfe_search lgdt3306a_search(struct dvb_frontend *fe)
+ {
+       enum fe_status status = 0;
+       int i, ret;
+diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
+index fe79358..6b9c499 100644
+--- a/drivers/media/dvb-frontends/mb86a20s.c
++++ b/drivers/media/dvb-frontends/mb86a20s.c
+@@ -2054,7 +2054,7 @@ static void mb86a20s_release(struct dvb_frontend *fe)
+       kfree(state);
+ }
+-static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+ {
+         return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
+index fc08429..c816697 100644
+--- a/drivers/media/dvb-frontends/mt312.c
++++ b/drivers/media/dvb-frontends/mt312.c
+@@ -381,7 +381,7 @@ static int mt312_send_master_cmd(struct dvb_frontend *fe,
+ }
+ static int mt312_send_burst(struct dvb_frontend *fe,
+-                          const enum fe_sec_mini_cmd c)
++                          enum fe_sec_mini_cmd c)
+ {
+       struct mt312_state *state = fe->demodulator_priv;
+       const u8 mini_tab[2] = { 0x02, 0x03 };
+@@ -405,7 +405,7 @@ static int mt312_send_burst(struct dvb_frontend *fe,
+ }
+ static int mt312_set_tone(struct dvb_frontend *fe,
+-                        const enum fe_sec_tone_mode t)
++                        enum fe_sec_tone_mode t)
+ {
+       struct mt312_state *state = fe->demodulator_priv;
+       const u8 tone_tab[2] = { 0x01, 0x00 };
+@@ -429,7 +429,7 @@ static int mt312_set_tone(struct dvb_frontend *fe,
+ }
+ static int mt312_set_voltage(struct dvb_frontend *fe,
+-                           const enum fe_sec_voltage v)
++                           enum fe_sec_voltage v)
+ {
+       struct mt312_state *state = fe->demodulator_priv;
+       const u8 volt_tab[3] = { 0x00, 0x40, 0x00 };
+diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
+index b5e3d90..bd00dc6 100644
+--- a/drivers/media/dvb-frontends/s921.c
++++ b/drivers/media/dvb-frontends/s921.c
+@@ -464,7 +464,7 @@ static int s921_tune(struct dvb_frontend *fe,
+       return rc;
+ }
+-static int s921_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
+index 35bc9b2..d5072b1 100644
+--- a/drivers/media/pci/bt8xx/dst.c
++++ b/drivers/media/pci/bt8xx/dst.c
+@@ -1683,7 +1683,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
+       return 0;
+ }
+-static int dst_get_tuning_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe)
+ {
+       return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
+ }
+diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
+index 5dc1e3f..ed6db07 100644
+--- a/drivers/media/pci/cx88/cx88-video.c
++++ b/drivers/media/pci/cx88/cx88-video.c
+@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
+ /* ------------------------------------------------------------------ */
+-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+-static unsigned int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+ module_param_array(video_nr, int, NULL, 0444);
+ module_param_array(vbi_nr,   int, NULL, 0444);
+diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
+index 374033a..461c38c 100644
+--- a/drivers/media/pci/ivtv/ivtv-driver.c
++++ b/drivers/media/pci/ivtv/ivtv-driver.c
+@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
+ MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
+ /* ivtv instance counter */
+-static atomic_t ivtv_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
+ /* Parameter declarations */
+ static int cardtype[IVTV_MAX_CARDS];
+diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
+index d0e70dc0..e4fee68 100644
+--- a/drivers/media/pci/pt1/va1j5jf8007s.c
++++ b/drivers/media/pci/pt1/va1j5jf8007s.c
+@@ -102,7 +102,7 @@ static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr)
+       return 0;
+ }
+-static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
+index 0268f20..de9dff7 100644
+--- a/drivers/media/pci/pt1/va1j5jf8007t.c
++++ b/drivers/media/pci/pt1/va1j5jf8007t.c
+@@ -92,7 +92,7 @@ static int va1j5jf8007t_read_snr(struct dvb_frontend *fe, u16 *snr)
+       return 0;
+ }
+-static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
+ {
+       return DVBFE_ALGO_HW;
+ }
+diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
+index f50d072..0214f25 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-core.c
++++ b/drivers/media/pci/solo6x10/solo6x10-core.c
+@@ -411,7 +411,7 @@ static void solo_device_release(struct device *dev)
+ static int solo_sysfs_init(struct solo_dev *solo_dev)
+ {
+-      struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
++      bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
+       struct device *dev = &solo_dev->dev;
+       const char *driver;
+       int i;
+diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
+index 4a37a1c..7e82dfd 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
++++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
+@@ -350,7 +350,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
+ int solo_g723_init(struct solo_dev *solo_dev)
+ {
+-      static struct snd_device_ops ops = { NULL };
++      static struct snd_device_ops ops = { };
+       struct snd_card *card;
+       struct snd_kcontrol_new kctl;
+       char name[32];
+diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
+index 8c84846..27b4f83 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
++++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
+@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
+       /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
+       if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
+-              p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
++              p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
+               if (p2m_id < 0)
+                       p2m_id = -p2m_id;
+       }
+diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
+index 5bd4987..bfcdd17 100644
+--- a/drivers/media/pci/solo6x10/solo6x10.h
++++ b/drivers/media/pci/solo6x10/solo6x10.h
+@@ -216,7 +216,7 @@ struct solo_dev {
+       /* P2M DMA Engine */
+       struct solo_p2m_dev     p2m_dev[SOLO_NR_P2M];
+-      atomic_t                p2m_count;
++      atomic_unchecked_t      p2m_count;
+       int                     p2m_jiffies;
+       unsigned int            p2m_timeouts;
+diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
+index aeb2b4e..53420d1 100644
+--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
++++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
+@@ -775,8 +775,9 @@ static struct video_device video_dev_template = {
+  *
+  * IRQ_HANDLED, interrupt done.
+  */
+-static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
++static irqreturn_t vip_irq(int irq, void *_vip)
+ {
++      struct sta2x11_vip *vip = _vip;
+       unsigned int status;
+       status = reg_read(vip, DVP_ITS);
+@@ -1058,7 +1059,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
+       spin_lock_init(&vip->slock);
+       ret = request_irq(pdev->irq,
+-                        (irq_handler_t) vip_irq,
++                        vip_irq,
+                         IRQF_SHARED, KBUILD_MODNAME, vip);
+       if (ret) {
+               dev_err(&pdev->dev, "request_irq failed\n");
+diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
+index 8474528..6c4e442 100644
+--- a/drivers/media/pci/tw68/tw68-core.c
++++ b/drivers/media/pci/tw68/tw68-core.c
+@@ -61,7 +61,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
+ module_param_array(card, int, NULL, 0444);
+ MODULE_PARM_DESC(card, "card type");
+-static atomic_t tw68_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
+ /* ------------------------------------------------------------------ */
+diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
+index 71a0453..279d447 100644
+--- a/drivers/media/pci/tw686x/tw686x-core.c
++++ b/drivers/media/pci/tw686x/tw686x-core.c
+@@ -72,12 +72,12 @@ static const char *dma_mode_name(unsigned int mode)
+       }
+ }
+-static int tw686x_dma_mode_get(char *buffer, struct kernel_param *kp)
++static int tw686x_dma_mode_get(char *buffer, const struct kernel_param *kp)
+ {
+       return sprintf(buffer, dma_mode_name(dma_mode));
+ }
+-static int tw686x_dma_mode_set(const char *val, struct kernel_param *kp)
++static int tw686x_dma_mode_set(const char *val, const struct kernel_param *kp)
+ {
+       if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_MEMCPY)))
+               dma_mode = TW686X_DMA_MODE_MEMCPY;
+diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
+index 4e7db89..bd7ef95 100644
+--- a/drivers/media/pci/zoran/zoran.h
++++ b/drivers/media/pci/zoran/zoran.h
+@@ -178,7 +178,6 @@ struct zoran_fh;
+ struct zoran_mapping {
+       struct zoran_fh *fh;
+-      atomic_t count;
+ };
+ struct zoran_buffer {
+diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
+index 9d2697f..65fb18f 100644
+--- a/drivers/media/pci/zoran/zoran_card.c
++++ b/drivers/media/pci/zoran/zoran_card.c
+@@ -1356,7 +1356,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+       if (zr->card.video_codec) {
+               codec_name = codecid_to_modulename(zr->card.video_codec);
+               if (codec_name) {
+-                      result = request_module(codec_name);
++                      result = request_module("%s", codec_name);
+                       if (result) {
+                               dprintk(1,
+                                       KERN_ERR
+@@ -1368,7 +1368,7 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+       if (zr->card.video_vfe) {
+               vfe_name = codecid_to_modulename(zr->card.video_vfe);
+               if (vfe_name) {
+-                      result = request_module(vfe_name);
++                      result = request_module("%s", vfe_name);
+                       if (result < 0) {
+                               dprintk(1,
+                                       KERN_ERR
+diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
+index 80caa70..d076ecf 100644
+--- a/drivers/media/pci/zoran/zoran_driver.c
++++ b/drivers/media/pci/zoran/zoran_driver.c
+@@ -2607,8 +2607,6 @@ zoran_poll (struct file *file,
+ static void
+ zoran_vm_open (struct vm_area_struct *vma)
+ {
+-      struct zoran_mapping *map = vma->vm_private_data;
+-      atomic_inc(&map->count);
+ }
+ static void
+@@ -2736,7 +2734,6 @@ zoran_mmap (struct file           *file,
+               return res;
+       }
+       map->fh = fh;
+-      atomic_set(&map->count, 1);
+       vma->vm_ops = &zoran_vm_ops;
+       vma->vm_flags |= VM_DONTEXPAND;
+diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+index 63d4be4..451b8e1 100644
+--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
++++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+@@ -665,10 +665,10 @@ static int h264_enc_deinit(unsigned long handle)
+ }
+ static struct venc_common_if venc_h264_if = {
+-      h264_enc_init,
+-      h264_enc_encode,
+-      h264_enc_set_param,
+-      h264_enc_deinit,
++      .init = h264_enc_init,
++      .encode = h264_enc_encode,
++      .set_param = h264_enc_set_param,
++      .deinit = h264_enc_deinit,
+ };
+ struct venc_common_if *get_h264_enc_comm_if(void);
+diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+index 6d97584..8539e9b 100644
+--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
++++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+@@ -470,10 +470,10 @@ static int vp8_enc_deinit(unsigned long handle)
+ }
+ static struct venc_common_if venc_vp8_if = {
+-      vp8_enc_init,
+-      vp8_enc_encode,
+-      vp8_enc_set_param,
+-      vp8_enc_deinit,
++      .init = vp8_enc_init,
++      .encode = vp8_enc_encode,
++      .set_param = vp8_enc_set_param,
++      .deinit = vp8_enc_deinit,
+ };
+ struct venc_common_if *get_vp8_enc_comm_if(void);
+diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
+index 6b01e12..0f35c56 100644
+--- a/drivers/media/platform/omap/omap_vout.c
++++ b/drivers/media/platform/omap/omap_vout.c
+@@ -63,7 +63,6 @@ enum omap_vout_channels {
+       OMAP_VIDEO2,
+ };
+-static struct videobuf_queue_ops video_vbq_ops;
+ /* Variables configurable through module params*/
+ static u32 video1_numbuffers = 3;
+ static u32 video2_numbuffers = 3;
+@@ -1001,6 +1000,12 @@ static int omap_vout_open(struct file *file)
+ {
+       struct videobuf_queue *q;
+       struct omap_vout_device *vout = NULL;
++      static struct videobuf_queue_ops video_vbq_ops = {
++              .buf_setup = omap_vout_buffer_setup,
++              .buf_prepare = omap_vout_buffer_prepare,
++              .buf_release = omap_vout_buffer_release,
++              .buf_queue = omap_vout_buffer_queue,
++      };
+       vout = video_drvdata(file);
+       v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+@@ -1018,10 +1023,6 @@ static int omap_vout_open(struct file *file)
+       vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+       q = &vout->vbq;
+-      video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+-      video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+-      video_vbq_ops.buf_release = omap_vout_buffer_release;
+-      video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+       spin_lock_init(&vout->vbq_lock);
+       videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
+index 869f0ce..c9c6e9e 100644
+--- a/drivers/media/platform/s5p-tv/mixer.h
++++ b/drivers/media/platform/s5p-tv/mixer.h
+@@ -156,7 +156,7 @@ struct mxr_layer {
+       /** layer index (unique identifier) */
+       int idx;
+       /** callbacks for layer methods */
+-      struct mxr_layer_ops ops;
++      struct mxr_layer_ops *ops;
+       /** format array */
+       const struct mxr_format **fmt_array;
+       /** size of format array */
+diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+index d4d2564..f4570ea 100644
+--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
++++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
+ {
+       struct mxr_layer *layer;
+       int ret;
+-      const struct mxr_layer_ops ops = {
++      static const struct mxr_layer_ops ops = {
+               .release = mxr_graph_layer_release,
+               .buffer_set = mxr_graph_buffer_set,
+               .stream_set = mxr_graph_stream_set,
+diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
+index a0ec14a..225f4ac 100644
+--- a/drivers/media/platform/s5p-tv/mixer_reg.c
++++ b/drivers/media/platform/s5p-tv/mixer_reg.c
+@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
+               layer->update_buf = next;
+       }
+-      layer->ops.buffer_set(layer, layer->update_buf);
++      layer->ops->buffer_set(layer, layer->update_buf);
+       if (done && done != layer->shadow_buf)
+               vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
+diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
+index ee74e2b..9d2dae9 100644
+--- a/drivers/media/platform/s5p-tv/mixer_video.c
++++ b/drivers/media/platform/s5p-tv/mixer_video.c
+@@ -201,7 +201,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
+       layer->geo.src.height = layer->geo.src.full_height;
+       mxr_geometry_dump(mdev, &layer->geo);
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+ }
+@@ -219,7 +219,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
+       layer->geo.dst.full_width = mbus_fmt.width;
+       layer->geo.dst.full_height = mbus_fmt.height;
+       layer->geo.dst.field = mbus_fmt.field;
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+ }
+@@ -325,7 +325,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
+       /* set source size to highest accepted value */
+       geo->src.full_width = max(geo->dst.full_width, pix->width);
+       geo->src.full_height = max(geo->dst.full_height, pix->height);
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+       /* set cropping to total visible screen */
+       geo->src.width = pix->width;
+@@ -333,12 +333,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
+       geo->src.x_offset = 0;
+       geo->src.y_offset = 0;
+       /* assure consistency of geometry */
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
+       mxr_geometry_dump(mdev, &layer->geo);
+       /* set full size to lowest possible value */
+       geo->src.full_width = 0;
+       geo->src.full_height = 0;
+-      layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
++      layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+       mxr_geometry_dump(mdev, &layer->geo);
+       /* returning results */
+@@ -465,7 +465,7 @@ static int mxr_s_selection(struct file *file, void *fh,
+               target->width = s->r.width;
+               target->height = s->r.height;
+-              layer->ops.fix_geometry(layer, stage, s->flags);
++              layer->ops->fix_geometry(layer, stage, s->flags);
+               /* retrieve update selection rectangle */
+               res.left = target->x_offset;
+@@ -929,13 +929,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
+       mxr_output_get(mdev);
+       mxr_layer_update_output(layer);
+-      layer->ops.format_set(layer);
++      layer->ops->format_set(layer);
+       /* enabling layer in hardware */
+       spin_lock_irqsave(&layer->enq_slock, flags);
+       layer->state = MXR_LAYER_STREAMING;
+       spin_unlock_irqrestore(&layer->enq_slock, flags);
+-      layer->ops.stream_set(layer, MXR_ENABLE);
++      layer->ops->stream_set(layer, MXR_ENABLE);
+       mxr_streamer_get(mdev);
+       return 0;
+@@ -1007,7 +1007,7 @@ static void stop_streaming(struct vb2_queue *vq)
+       spin_unlock_irqrestore(&layer->enq_slock, flags);
+       /* disabling layer in hardware */
+-      layer->ops.stream_set(layer, MXR_DISABLE);
++      layer->ops->stream_set(layer, MXR_DISABLE);
+       /* remove one streamer */
+       mxr_streamer_put(mdev);
+       /* allow changes in output configuration */
+@@ -1045,8 +1045,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
+ void mxr_layer_release(struct mxr_layer *layer)
+ {
+-      if (layer->ops.release)
+-              layer->ops.release(layer);
++      if (layer->ops->release)
++              layer->ops->release(layer);
+ }
+ void mxr_base_layer_release(struct mxr_layer *layer)
+@@ -1072,7 +1072,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+       layer->mdev = mdev;
+       layer->idx = idx;
+-      layer->ops = *ops;
++      layer->ops = ops;
+       spin_lock_init(&layer->enq_slock);
+       INIT_LIST_HEAD(&layer->enq_list);
+diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+index 6fa6f67..04b574b 100644
+--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
++++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+@@ -207,7 +207,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
+ {
+       struct mxr_layer *layer;
+       int ret;
+-      const struct mxr_layer_ops ops = {
++      static const struct mxr_layer_ops ops = {
+               .release = mxr_vp_layer_release,
+               .buffer_set = mxr_vp_buffer_set,
+               .stream_set = mxr_vp_stream_set,
+diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
+index 46c7186..47130c8 100644
+--- a/drivers/media/platform/soc_camera/soc_camera.c
++++ b/drivers/media/platform/soc_camera/soc_camera.c
+@@ -1791,7 +1791,7 @@ static int soc_camera_probe(struct soc_camera_host *ici,
+                       goto eadd;
+               if (shd->module_name)
+-                      ret = request_module(shd->module_name);
++                      ret = request_module("%s", shd->module_name);
+               ret = shd->add_device(icd);
+               if (ret < 0)
+diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/sti/c8sectpfe/Kconfig
+index 7420a50..e6f31a0 100644
+--- a/drivers/media/platform/sti/c8sectpfe/Kconfig
++++ b/drivers/media/platform/sti/c8sectpfe/Kconfig
+@@ -4,6 +4,7 @@ config DVB_C8SECTPFE
+       depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
+       select FW_LOADER
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+       select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
+       select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
+index 82affae..42833ec 100644
+--- a/drivers/media/radio/radio-cadet.c
++++ b/drivers/media/radio/radio-cadet.c
+@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
+       unsigned char readbuf[RDS_BUFFER];
+       int i = 0;
++      if (count > RDS_BUFFER)
++              return -EFAULT;
+       mutex_lock(&dev->lock);
+       if (dev->rdsstat == 0)
+               cadet_start_rds(dev);
+@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
+               readbuf[i++] = dev->rdsbuf[dev->rdsout++];
+       mutex_unlock(&dev->lock);
+-      if (i && copy_to_user(data, readbuf, i))
+-              return -EFAULT;
++      if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
++              i = -EFAULT;
++
+       return i;
+ }
+diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
+index 8253f79..ca5f579 100644
+--- a/drivers/media/radio/radio-maxiradio.c
++++ b/drivers/media/radio/radio-maxiradio.c
+@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
+ /* TEA5757 pin mappings */
+ static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
+-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
+ #define PCI_VENDOR_ID_GUILLEMOT 0x5046
+ #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
+diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
+index 85667a9..ec4dc0a 100644
+--- a/drivers/media/radio/radio-shark.c
++++ b/drivers/media/radio/radio-shark.c
+@@ -79,7 +79,7 @@ struct shark_device {
+       u32 last_val;
+ };
+-static atomic_t shark_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
+ static void shark_write_val(struct snd_tea575x *tea, u32 val)
+ {
+diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
+index 0e65a85..3fa6f5c 100644
+--- a/drivers/media/radio/radio-shark2.c
++++ b/drivers/media/radio/radio-shark2.c
+@@ -74,7 +74,7 @@ struct shark_device {
+       u8 *transfer_buffer;
+ };
+-static atomic_t shark_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
+ static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
+ {
+diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
+index 271f725..35e8c8f 100644
+--- a/drivers/media/radio/radio-si476x.c
++++ b/drivers/media/radio/radio-si476x.c
+@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
+       struct si476x_radio *radio;
+       struct v4l2_ctrl *ctrl;
+-      static atomic_t instance = ATOMIC_INIT(0);
++      static atomic_unchecked_t instance = ATOMIC_INIT(0);
+       radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
+       if (!radio)
+diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
+index 642b89c..5e92dc3 100644
+--- a/drivers/media/radio/wl128x/fmdrv_common.c
++++ b/drivers/media/radio/wl128x/fmdrv_common.c
+@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
+ MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
+ /* Radio Nr */
+-static u32 radio_nr = -1;
++static int radio_nr = -1;
+ module_param(radio_nr, int, 0444);
+ MODULE_PARM_DESC(radio_nr, "Radio Nr");
+diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
+index 9fd1527..8927230 100644
+--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
++++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
+@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
+ static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
+ {
+-      char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
+-      char result[64];
+-      return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
+-                              sizeof(result), 0);
++      char *buf;
++      char *result;
++      int retval;
++
++      buf = kmalloc(2, GFP_KERNEL);
++      if (buf == NULL)
++              return -ENOMEM;
++      result = kmalloc(64, GFP_KERNEL);
++      if (result == NULL) {
++              kfree(buf);
++              return -ENOMEM;
++      }
++
++      buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
++      buf[1] = enable ? 1 : 0;
++
++      retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
++
++      kfree(buf);
++      kfree(result);
++      return retval;
+ }
+ static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
+ {
+-      char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
+-      char state[3];
+-      return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
++      char *buf;
++      char *state;
++      int retval;
++
++      buf = kmalloc(2, GFP_KERNEL);
++      if (buf == NULL)
++              return -ENOMEM;
++      state = kmalloc(3, GFP_KERNEL);
++      if (state == NULL) {
++              kfree(buf);
++              return -ENOMEM;
++      }
++
++      buf[0] = CINERGYT2_EP1_SLEEP_MODE;
++      buf[1] = enable ? 1 : 0;
++
++      retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
++
++      kfree(buf);
++      kfree(state);
++      return retval;
+ }
+ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
+ {
+-      char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
+-      char state[3];
++      char *query;
++      char *state;
+       int ret;
++      query = kmalloc(1, GFP_KERNEL);
++      if (query == NULL)
++              return -ENOMEM;
++      state = kmalloc(3, GFP_KERNEL);
++      if (state == NULL) {
++              kfree(query);
++              return -ENOMEM;
++      }
++
++      query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
+       adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
+-      ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
+-                              sizeof(state), 0);
++      ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
+       if (ret < 0) {
+               deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
+                       "state info\n");
+@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
+       /* Copy this pointer as we are gonna need it in the release phase */
+       cinergyt2_usb_device = adap->dev;
+-
++      kfree(query);
++      kfree(state);
+       return 0;
+ }
+@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
+ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+ {
+       struct cinergyt2_state *st = d->priv;
+-      u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
++      u8 *key, *cmd;
+       int i;
++      cmd = kmalloc(1, GFP_KERNEL);
++      if (cmd == NULL)
++              return -EINVAL;
++      key = kzalloc(5, GFP_KERNEL);
++      if (key == NULL) {
++              kfree(cmd);
++              return -EINVAL;
++      }
++
++      cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
++
+       *state = REMOTE_NO_KEY_PRESSED;
+-      dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
++      dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
+       if (key[4] == 0xff) {
+               /* key repeat */
+               st->rc_counter++;
+@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+                                       *event = d->last_event;
+                                       deb_rc("repeat key, event %x\n",
+                                                  *event);
+-                                      return 0;
++                                      goto out;
+                               }
+                       }
+                       deb_rc("repeated key (non repeatable)\n");
+               }
+-              return 0;
++              goto out;
+       }
+       /* hack to pass checksum on the custom field */
+@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+               deb_rc("key: %*ph\n", 5, key);
+       }
++out:
++      kfree(cmd);
++      kfree(key);
+       return 0;
+ }
+diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+index b3ec743..9c0e418 100644
+--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
++++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
+                                   enum fe_status *status)
+ {
+       struct cinergyt2_fe_state *state = fe->demodulator_priv;
+-      struct dvbt_get_status_msg result;
+-      u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
++      struct dvbt_get_status_msg *result;
++      u8 *cmd;
+       int ret;
+-      ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
+-                      sizeof(result), 0);
++      cmd = kmalloc(1, GFP_KERNEL);
++      if (cmd == NULL)
++              return -ENOMEM;
++      result = kmalloc(sizeof(*result), GFP_KERNEL);
++      if (result == NULL) {
++              kfree(cmd);
++              return -ENOMEM;
++      }
++
++      cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
++
++      ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
++                      sizeof(*result), 0);
+       if (ret < 0)
+-              return ret;
++              goto out;
+       *status = 0;
+-      if (0xffff - le16_to_cpu(result.gain) > 30)
++      if (0xffff - le16_to_cpu(result->gain) > 30)
+               *status |= FE_HAS_SIGNAL;
+-      if (result.lock_bits & (1 << 6))
++      if (result->lock_bits & (1 << 6))
+               *status |= FE_HAS_LOCK;
+-      if (result.lock_bits & (1 << 5))
++      if (result->lock_bits & (1 << 5))
+               *status |= FE_HAS_SYNC;
+-      if (result.lock_bits & (1 << 4))
++      if (result->lock_bits & (1 << 4))
+               *status |= FE_HAS_CARRIER;
+-      if (result.lock_bits & (1 << 1))
++      if (result->lock_bits & (1 << 1))
+               *status |= FE_HAS_VITERBI;
+       if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
+                       (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
+               *status &= ~FE_HAS_LOCK;
+-      return 0;
++out:
++      kfree(cmd);
++      kfree(result);
++      return ret;
+ }
+ static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
+ {
+       struct cinergyt2_fe_state *state = fe->demodulator_priv;
+-      struct dvbt_get_status_msg status;
+-      char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
++      struct dvbt_get_status_msg *status;
++      char *cmd;
+       int ret;
+-      ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
+-                              sizeof(status), 0);
++      cmd = kmalloc(1, GFP_KERNEL);
++      if (cmd == NULL)
++              return -ENOMEM;
++      status = kmalloc(sizeof(*status), GFP_KERNEL);
++      if (status == NULL) {
++              kfree(cmd);
++              return -ENOMEM;
++      }
++
++      cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
++
++      ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
++                              sizeof(*status), 0);
+       if (ret < 0)
+-              return ret;
++              goto out;
+-      *ber = le32_to_cpu(status.viterbi_error_rate);
++      *ber = le32_to_cpu(status->viterbi_error_rate);
++out:
++      kfree(cmd);
++      kfree(status);
+       return 0;
+ }
+ static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
+ {
+       struct cinergyt2_fe_state *state = fe->demodulator_priv;
+-      struct dvbt_get_status_msg status;
+-      u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
++      struct dvbt_get_status_msg *status;
++      u8 *cmd;
+       int ret;
+-      ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
+-                              sizeof(status), 0);
++      cmd = kmalloc(1, GFP_KERNEL);
++      if (cmd == NULL)
++              return -ENOMEM;
++      status = kmalloc(sizeof(*status), GFP_KERNEL);
++      if (status == NULL) {
++              kfree(cmd);
++              return -ENOMEM;
++      }
++
++      cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
++
++      ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
++                              sizeof(*status), 0);
+       if (ret < 0) {
+               err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
+                       ret);
+-              return ret;
++              goto out;
+       }
+-      *unc = le32_to_cpu(status.uncorrected_block_count);
+-      return 0;
++      *unc = le32_to_cpu(status->uncorrected_block_count);
++
++out:
++      kfree(cmd);
++      kfree(status);
++      return ret;
+ }
+ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
+                                               u16 *strength)
+ {
+       struct cinergyt2_fe_state *state = fe->demodulator_priv;
+-      struct dvbt_get_status_msg status;
+-      char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
++      struct dvbt_get_status_msg *status;
++      char *cmd;
+       int ret;
+-      ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
+-                              sizeof(status), 0);
++      cmd = kmalloc(1, GFP_KERNEL);
++      if (cmd == NULL)
++              return -ENOMEM;
++      status = kmalloc(sizeof(*status), GFP_KERNEL);
++      if (status == NULL) {
++              kfree(cmd);
++              return -ENOMEM;
++      }
++
++      cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
++
++      ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
++                              sizeof(*status), 0);
+       if (ret < 0) {
+               err("cinergyt2_fe_read_signal_strength() Failed!"
+                       " (Error=%d)\n", ret);
+-              return ret;
++              goto out;
+       }
+-      *strength = (0xffff - le16_to_cpu(status.gain));
++      *strength = (0xffff - le16_to_cpu(status->gain));
++
++out:
++      kfree(cmd);
++      kfree(status);
+       return 0;
+ }
+ static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
+ {
+       struct cinergyt2_fe_state *state = fe->demodulator_priv;
+-      struct dvbt_get_status_msg status;
+-      char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
++      struct dvbt_get_status_msg *status;
++      char *cmd;
+       int ret;
+-      ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
+-                              sizeof(status), 0);
++      cmd = kmalloc(1, GFP_KERNEL);
++      if (cmd == NULL)
++              return -ENOMEM;
++      status = kmalloc(sizeof(*status), GFP_KERNEL);
++      if (status == NULL) {
++              kfree(cmd);
++              return -ENOMEM;
++      }
++
++      cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
++
++      ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
++                              sizeof(*status), 0);
+       if (ret < 0) {
+               err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
+-              return ret;
++              goto out;
+       }
+-      *snr = (status.snr << 8) | status.snr;
+-      return 0;
++      *snr = (status->snr << 8) | status->snr;
++
++out:
++      kfree(cmd);
++      kfree(status);
++      return ret;
+ }
+ static int cinergyt2_fe_init(struct dvb_frontend *fe)
+@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
+ {
+       struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
+       struct cinergyt2_fe_state *state = fe->demodulator_priv;
+-      struct dvbt_set_parameters_msg param;
+-      char result[2];
++      struct dvbt_set_parameters_msg *param;
++      char *result;
+       int err;
+-      param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
+-      param.tps = cpu_to_le16(compute_tps(fep));
+-      param.freq = cpu_to_le32(fep->frequency / 1000);
+-      param.flags = 0;
++      result = kmalloc(2, GFP_KERNEL);
++      if (result == NULL)
++              return -ENOMEM;
++      param = kmalloc(sizeof(*param), GFP_KERNEL);
++      if (param == NULL) {
++              kfree(result);
++              return -ENOMEM;
++      }
++
++      param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
++      param->tps = cpu_to_le16(compute_tps(fep));
++      param->freq = cpu_to_le32(fep->frequency / 1000);
++      param->flags = 0;
+       switch (fep->bandwidth_hz) {
+       default:
+       case 8000000:
+-              param.bandwidth = 8;
++              param->bandwidth = 8;
+               break;
+       case 7000000:
+-              param.bandwidth = 7;
++              param->bandwidth = 7;
+               break;
+       case 6000000:
+-              param.bandwidth = 6;
++              param->bandwidth = 6;
+               break;
+       }
+       err = dvb_usb_generic_rw(state->d,
+-                      (char *)&param, sizeof(param),
+-                      result, sizeof(result), 0);
++                      (char *)param, sizeof(*param),
++                      result, 2, 0);
+       if (err < 0)
+               err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
+-      return (err < 0) ? err : 0;
++      kfree(result);
++      kfree(param);
++      return err;
+ }
+ static void cinergyt2_fe_release(struct dvb_frontend *fe)
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+index dd048a7..717a7b2 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
+ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
+ {
+-      struct hexline hx;
+-      u8 reset;
++      struct hexline *hx;
++      u8 *reset;
+       int ret,pos=0;
++      reset = kmalloc(1, GFP_KERNEL);
++      if (reset == NULL)
++              return -ENOMEM;
++
++      hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
++      if (hx == NULL) {
++              kfree(reset);
++              return -ENOMEM;
++      }
++
+       /* stop the CPU */
+-      reset = 1;
+-      if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
++      reset[0] = 1;
++      if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
+               err("could not stop the USB controller CPU.");
+-      while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
+-              deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
+-              ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
++      while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
++              deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
++              ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
+-              if (ret != hx.len) {
++              if (ret != hx->len) {
+                       err("error while transferring firmware "
+                               "(transferred size: %d, block size: %d)",
+-                              ret,hx.len);
++                              ret,hx->len);
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+       if (ret < 0) {
+               err("firmware download failed at %d with %d",pos,ret);
++              kfree(reset);
++              kfree(hx);
+               return ret;
+       }
+       if (ret == 0) {
+               /* restart the CPU */
+-              reset = 0;
+-              if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
++              reset[0] = 0;
++              if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
+                       err("could not restart the USB controller CPU.");
+                       ret = -EINVAL;
+               }
+       } else
+               ret = -EIO;
++      kfree(reset);
++      kfree(hx);
++
+       return ret;
+ }
+ EXPORT_SYMBOL(usb_cypress_load_firmware);
+diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
+index d9f3262..4370dbd 100644
+--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
++++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
+@@ -89,8 +89,11 @@ struct technisat_usb2_state {
+ static int technisat_usb2_i2c_access(struct usb_device *udev,
+               u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
+ {
+-      u8 b[64];
+-      int ret, actual_length;
++      u8 *b = kmalloc(64, GFP_KERNEL);
++      int ret, actual_length, error = 0;
++
++      if (b == NULL)
++              return -ENOMEM;
+       deb_i2c("i2c-access: %02x, tx: ", device_addr);
+       debug_dump(tx, txlen, deb_i2c);
+@@ -123,7 +126,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
+       if (ret < 0) {
+               err("i2c-error: out failed %02x = %d", device_addr, ret);
+-              return -ENODEV;
++              error = -ENODEV;
++              goto out;
+       }
+       ret = usb_bulk_msg(udev,
+@@ -131,7 +135,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
+                       b, 64, &actual_length, 1000);
+       if (ret < 0) {
+               err("i2c-error: in failed %02x = %d", device_addr, ret);
+-              return -ENODEV;
++              error = -ENODEV;
++              goto out;
+       }
+       if (b[0] != I2C_STATUS_OK) {
+@@ -139,8 +144,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
+               /* handle tuner-i2c-nak */
+               if (!(b[0] == I2C_STATUS_NAK &&
+                               device_addr == 0x60
+-                              /* && device_is_technisat_usb2 */))
+-                      return -ENODEV;
++                              /* && device_is_technisat_usb2 */)) {
++                      error = -ENODEV;
++                      goto out;
++              }
+       }
+       deb_i2c("status: %d, ", b[0]);
+@@ -154,7 +161,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
+       deb_i2c("\n");
+-      return 0;
++out:
++      kfree(b);
++      return error;
+ }
+ static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+index c45f307..7d79261 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+@@ -103,8 +103,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp)
+ }
+-static void pvr2_context_notify(struct pvr2_context *mp)
++static void pvr2_context_notify(void *_mp)
+ {
++      struct pvr2_context *mp = _mp;
++
+       pvr2_context_set_notify(mp,!0);
+ }
+@@ -119,9 +121,7 @@ static void pvr2_context_check(struct pvr2_context *mp)
+               pvr2_trace(PVR2_TRACE_CTXT,
+                          "pvr2_context %p (initialize)", mp);
+               /* Finish hardware initialization */
+-              if (pvr2_hdw_initialize(mp->hdw,
+-                                      (void (*)(void *))pvr2_context_notify,
+-                                      mp)) {
++              if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) {
+                       mp->video_stream.stream =
+                               pvr2_hdw_get_video_stream(mp->hdw);
+                       /* Trigger interface initialization.  By doing this
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+index 8c95793..2309b9e 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+@@ -101,8 +101,10 @@ static int pvr2_dvb_feed_thread(void *data)
+       return stat;
+ }
+-static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap)
++static void pvr2_dvb_notify(void *_adap)
+ {
++      struct pvr2_dvb_adapter *adap = _adap;
++
+       wake_up(&adap->buffer_wait_data);
+ }
+@@ -161,8 +163,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
+               if (!(adap->buffer_storage[idx])) return -ENOMEM;
+       }
+-      pvr2_stream_set_callback(pvr->video_stream.stream,
+-                               (pvr2_stream_callback) pvr2_dvb_notify, adap);
++      pvr2_stream_set_callback(pvr->video_stream.stream, pvr2_dvb_notify, adap);
+       ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT);
+       if (ret < 0) return ret;
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+index fe20fe4..a199a6d 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+@@ -2097,7 +2097,7 @@ static void pvr2_hdw_load_modules(struct pvr2_hdw *hdw)
+       cm = &hdw->hdw_desc->client_modules;
+       for (idx = 0; idx < cm->cnt; idx++) {
+-              request_module(cm->lst[idx]);
++              request_module("%s", cm->lst[idx]);
+       }
+       ct = &hdw->hdw_desc->client_table;
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
+index 9a596a3..38de071 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
+@@ -216,7 +216,7 @@ unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
+                       bufSize -= c2;
+                       bufPtr += c2;
+                       c2 = scnprintf(bufPtr,bufSize,
+-                                     ip->name);
++                                     "%s", ip->name);
+                       c1 += c2;
+                       bufSize -= c2;
+                       bufPtr += c2;
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+index 81f788b..9619f47 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+@@ -1069,8 +1069,10 @@ static int pvr2_v4l2_open(struct file *file)
+ }
+-static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
++static void pvr2_v4l2_notify(void *_fhp)
+ {
++      struct pvr2_v4l2_fh *fhp = _fhp;
++
+       wake_up(&fhp->wait_data);
+ }
+@@ -1103,7 +1105,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
+       hdw = fh->channel.mc_head->hdw;
+       sp = fh->pdi->stream->stream;
+-      pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
++      pvr2_stream_set_callback(sp,pvr2_v4l2_notify,fh);
+       pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
+       if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
+       return pvr2_ioread_set_enabled(fh->rhp,!0);
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 302e284..93781d6 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2078,7 +2078,7 @@ static int uvc_reset_resume(struct usb_interface *intf)
+  * Module parameters
+  */
+-static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
++static int uvc_clock_param_get(char *buffer, const struct kernel_param *kp)
+ {
+       if (uvc_clock_param == CLOCK_MONOTONIC)
+               return sprintf(buffer, "CLOCK_MONOTONIC");
+@@ -2086,7 +2086,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
+               return sprintf(buffer, "CLOCK_REALTIME");
+ }
+-static int uvc_clock_param_set(const char *val, struct kernel_param *kp)
++static int uvc_clock_param_set(const char *val, const struct kernel_param *kp)
+ {
+       if (strncasecmp(val, "clock_", strlen("clock_")) == 0)
+               val += strlen("clock_");
+diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
+index 5b80850..97b8443 100644
+--- a/drivers/media/v4l2-core/v4l2-common.c
++++ b/drivers/media/v4l2-core/v4l2-common.c
+@@ -268,7 +268,7 @@ struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+       BUG_ON(!v4l2_dev);
+       if (info->modalias[0])
+-              request_module(info->modalias);
++              request_module("%s", info->modalias);
+       spi = spi_new_device(master, info);
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index bacecbd..277d1f8 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -449,7 +449,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+                * by passing a very big num_planes value */
+               uplane = compat_alloc_user_space(num_planes *
+                                               sizeof(struct v4l2_plane));
+-              kp->m.planes = (__force struct v4l2_plane *)uplane;
++              kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
+               while (--num_planes >= 0) {
+                       ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
+@@ -519,7 +519,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+               if (num_planes == 0)
+                       return 0;
+-              uplane = (__force struct v4l2_plane __user *)kp->m.planes;
++              uplane = (struct v4l2_plane __force_user *)kp->m.planes;
+               if (get_user(p, &up->m.planes))
+                       return -EFAULT;
+               uplane32 = compat_ptr(p);
+@@ -581,7 +581,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
+               get_user(kp->flags, &up->flags) ||
+               copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
+                       return -EFAULT;
+-      kp->base = (__force void *)compat_ptr(tmp);
++      kp->base = (__force_kernel void *)compat_ptr(tmp);
+       return 0;
+ }
+@@ -687,7 +687,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
+                       n * sizeof(struct v4l2_ext_control32)))
+               return -EFAULT;
+       kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
+-      kp->controls = (__force struct v4l2_ext_control *)kcontrols;
++      kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
+       while (--n >= 0) {
+               u32 id;
+@@ -714,7 +714,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
+ {
+       struct v4l2_ext_control32 __user *ucontrols;
+       struct v4l2_ext_control __user *kcontrols =
+-              (__force struct v4l2_ext_control __user *)kp->controls;
++              (struct v4l2_ext_control __force_user *)kp->controls;
+       int n = kp->count;
+       compat_caddr_t p;
+@@ -799,7 +799,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+               get_user(tmp, &up->edid) ||
+               copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+                       return -EFAULT;
+-      kp->edid = (__force u8 *)compat_ptr(tmp);
++      kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
+       return 0;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
+index 06fa5f1..2231dda 100644
+--- a/drivers/media/v4l2-core/v4l2-device.c
++++ b/drivers/media/v4l2-core/v4l2-device.c
+@@ -74,9 +74,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
+ EXPORT_SYMBOL_GPL(v4l2_device_put);
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+-                                              atomic_t *instance)
++                                              atomic_unchecked_t *instance)
+ {
+-      int num = atomic_inc_return(instance) - 1;
++      int num = atomic_inc_return_unchecked(instance) - 1;
+       int len = strlen(basename);
+       if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index 51a0fa1..5ae0546 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -2425,49 +2425,216 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
+       return -ENOTTY;
+ }
++static int v4l_vidioc_g_fbuf(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_fbuf(file, fh, arg);
++}
++
++static int v4l_vidioc_s_fbuf(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_s_fbuf(file, fh, arg);
++}
++
++static int v4l_vidioc_expbuf(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_expbuf(file, fh, arg);
++}
++
++static int v4l_vidioc_g_std(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_std(file, fh, arg);
++}
++
++static int v4l_vidioc_g_audio(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_audio(file, fh, arg);
++}
++
++static int v4l_vidioc_s_audio(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_s_audio(file, fh, arg);
++}
++
++static int v4l_vidioc_g_input(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_input(file, fh, arg);
++}
++
++static int v4l_vidioc_g_edid(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_edid(file, fh, arg);
++}
++
++static int v4l_vidioc_s_edid(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_s_edid(file, fh, arg);
++}
++
++static int v4l_vidioc_g_output(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_output(file, fh, arg);
++}
++
++static int v4l_vidioc_g_audout(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_audout(file, fh, arg);
++}
++
++static int v4l_vidioc_s_audout(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_s_audout(file, fh, arg);
++}
++
++static int v4l_vidioc_g_selection(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_selection(file, fh, arg);
++}
++
++static int v4l_vidioc_s_selection(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_s_selection(file, fh, arg);
++}
++
++static int v4l_vidioc_g_jpegcomp(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_jpegcomp(file, fh, arg);
++}
++
++static int v4l_vidioc_s_jpegcomp(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_s_jpegcomp(file, fh, arg);
++}
++
++static int v4l_vidioc_enumaudio(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_enumaudio(file, fh, arg);
++}
++
++static int v4l_vidioc_enumaudout(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_enumaudout(file, fh, arg);
++}
++
++static int v4l_vidioc_enum_framesizes(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_enum_framesizes(file, fh, arg);
++}
++
++static int v4l_vidioc_enum_frameintervals(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_enum_frameintervals(file, fh, arg);
++}
++
++static int v4l_vidioc_g_enc_index(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_enc_index(file, fh, arg);
++}
++
++static int v4l_vidioc_encoder_cmd(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_encoder_cmd(file, fh, arg);
++}
++
++static int v4l_vidioc_try_encoder_cmd(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_try_encoder_cmd(file, fh, arg);
++}
++
++static int v4l_vidioc_decoder_cmd(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_decoder_cmd(file, fh, arg);
++}
++
++static int v4l_vidioc_try_decoder_cmd(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_try_decoder_cmd(file, fh, arg);
++}
++
++static int v4l_vidioc_s_dv_timings(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_s_dv_timings(file, fh, arg);
++}
++
++static int v4l_vidioc_g_dv_timings(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_g_dv_timings(file, fh, arg);
++}
++
++static int v4l_vidioc_enum_dv_timings(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_enum_dv_timings(file, fh, arg);
++}
++
++static int v4l_vidioc_query_dv_timings(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_query_dv_timings(file, fh, arg);
++}
++
++static int v4l_vidioc_dv_timings_cap(const struct v4l2_ioctl_ops *ops,
++                              struct file *file, void *fh, void *arg)
++{
++      return ops->vidioc_dv_timings_cap(file, fh, arg);
++}
++
+ struct v4l2_ioctl_info {
+       unsigned int ioctl;
+       u32 flags;
+       const char * const name;
+-      union {
+-              u32 offset;
+-              int (*func)(const struct v4l2_ioctl_ops *ops,
+-                              struct file *file, void *fh, void *p);
+-      } u;
++      int (*func)(const struct v4l2_ioctl_ops *ops,
++                      struct file *file, void *fh, void *p);
+       void (*debug)(const void *arg, bool write_only);
+-};
++} __do_const;
++typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
+ /* This control needs a priority check */
+ #define INFO_FL_PRIO  (1 << 0)
+ /* This control can be valid if the filehandle passes a control handler. */
+ #define INFO_FL_CTRL  (1 << 1)
+-/* This is a standard ioctl, no need for special code */
+-#define INFO_FL_STD   (1 << 2)
+ /* This is ioctl has its own function */
+-#define INFO_FL_FUNC  (1 << 3)
++#define INFO_FL_FUNC  (1 << 2)
+ /* Queuing ioctl */
+-#define INFO_FL_QUEUE (1 << 4)
++#define INFO_FL_QUEUE (1 << 3)
+ /* Zero struct from after the field to the end */
+ #define INFO_FL_CLEAR(v4l2_struct, field)                     \
+       ((offsetof(struct v4l2_struct, field) +                 \
+         sizeof(((struct v4l2_struct *)0)->field)) << 16)
+ #define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
+-#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags)                       \
+-      [_IOC_NR(_ioctl)] = {                                           \
+-              .ioctl = _ioctl,                                        \
+-              .flags = _flags | INFO_FL_STD,                          \
+-              .name = #_ioctl,                                        \
+-              .u.offset = offsetof(struct v4l2_ioctl_ops, _vidioc),   \
+-              .debug = _debug,                                        \
+-      }
+-
+ #define IOCTL_INFO_FNC(_ioctl, _func, _debug, _flags)                 \
+       [_IOC_NR(_ioctl)] = {                                           \
+               .ioctl = _ioctl,                                        \
+               .flags = _flags | INFO_FL_FUNC,                         \
+               .name = #_ioctl,                                        \
+-              .u.func = _func,                                        \
++              .func = _func,                                          \
+               .debug = _debug,                                        \
+       }
+@@ -2478,17 +2645,17 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
+       IOCTL_INFO_FNC(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+       IOCTL_INFO_FNC(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
+-      IOCTL_INFO_STD(VIDIOC_G_FBUF, vidioc_g_fbuf, v4l_print_framebuffer, 0),
+-      IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
++      IOCTL_INFO_FNC(VIDIOC_G_FBUF, v4l_vidioc_g_fbuf, v4l_print_framebuffer, 0),
++      IOCTL_INFO_FNC(VIDIOC_S_FBUF, v4l_vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+-      IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
++      IOCTL_INFO_FNC(VIDIOC_EXPBUF, v4l_vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
+       IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
+       IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+       IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+       IOCTL_INFO_FNC(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
+       IOCTL_INFO_FNC(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
+-      IOCTL_INFO_STD(VIDIOC_G_STD, vidioc_g_std, v4l_print_std, 0),
++      IOCTL_INFO_FNC(VIDIOC_G_STD, v4l_vidioc_g_std, v4l_print_std, 0),
+       IOCTL_INFO_FNC(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
+       IOCTL_INFO_FNC(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
+@@ -2496,19 +2663,19 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
+       IOCTL_INFO_FNC(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
+       IOCTL_INFO_FNC(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
+       IOCTL_INFO_FNC(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
+-      IOCTL_INFO_STD(VIDIOC_G_AUDIO, vidioc_g_audio, v4l_print_audio, 0),
+-      IOCTL_INFO_STD(VIDIOC_S_AUDIO, vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO),
++      IOCTL_INFO_FNC(VIDIOC_G_AUDIO, v4l_vidioc_g_audio, v4l_print_audio, 0),
++      IOCTL_INFO_FNC(VIDIOC_S_AUDIO, v4l_vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
+       IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
+-      IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
++      IOCTL_INFO_FNC(VIDIOC_G_INPUT, v4l_vidioc_g_input, v4l_print_u32, 0),
+       IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
+-      IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, 0),
+-      IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO),
+-      IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
++      IOCTL_INFO_FNC(VIDIOC_G_EDID, v4l_vidioc_g_edid, v4l_print_edid, 0),
++      IOCTL_INFO_FNC(VIDIOC_S_EDID, v4l_vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO),
++      IOCTL_INFO_FNC(VIDIOC_G_OUTPUT, v4l_vidioc_g_output, v4l_print_u32, 0),
+       IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
+-      IOCTL_INFO_STD(VIDIOC_G_AUDOUT, vidioc_g_audout, v4l_print_audioout, 0),
+-      IOCTL_INFO_STD(VIDIOC_S_AUDOUT, vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO),
++      IOCTL_INFO_FNC(VIDIOC_G_AUDOUT, v4l_vidioc_g_audout, v4l_print_audioout, 0),
++      IOCTL_INFO_FNC(VIDIOC_S_AUDOUT, v4l_vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
+       IOCTL_INFO_FNC(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
+@@ -2516,14 +2683,14 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
+       IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
+       IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
+       IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
+-      IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
+-      IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
+-      IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0),
+-      IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
++      IOCTL_INFO_FNC(VIDIOC_G_SELECTION, v4l_vidioc_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
++      IOCTL_INFO_FNC(VIDIOC_S_SELECTION, v4l_vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
++      IOCTL_INFO_FNC(VIDIOC_G_JPEGCOMP, v4l_vidioc_g_jpegcomp, v4l_print_jpegcompression, 0),
++      IOCTL_INFO_FNC(VIDIOC_S_JPEGCOMP, v4l_vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
+       IOCTL_INFO_FNC(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
+-      IOCTL_INFO_STD(VIDIOC_ENUMAUDIO, vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
+-      IOCTL_INFO_STD(VIDIOC_ENUMAUDOUT, vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
++      IOCTL_INFO_FNC(VIDIOC_ENUMAUDIO, v4l_vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
++      IOCTL_INFO_FNC(VIDIOC_ENUMAUDOUT, v4l_vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
+       IOCTL_INFO_FNC(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
+       IOCTL_INFO_FNC(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
+       IOCTL_INFO_FNC(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
+@@ -2531,26 +2698,26 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
+       IOCTL_INFO_FNC(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+       IOCTL_INFO_FNC(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
+       IOCTL_INFO_FNC(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+-      IOCTL_INFO_STD(VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
+-      IOCTL_INFO_STD(VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
+-      IOCTL_INFO_STD(VIDIOC_G_ENC_INDEX, vidioc_g_enc_index, v4l_print_enc_idx, 0),
+-      IOCTL_INFO_STD(VIDIOC_ENCODER_CMD, vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+-      IOCTL_INFO_STD(VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+-      IOCTL_INFO_STD(VIDIOC_DECODER_CMD, vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
+-      IOCTL_INFO_STD(VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0),
++      IOCTL_INFO_FNC(VIDIOC_ENUM_FRAMESIZES, v4l_vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
++      IOCTL_INFO_FNC(VIDIOC_ENUM_FRAMEINTERVALS, v4l_vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
++      IOCTL_INFO_FNC(VIDIOC_G_ENC_INDEX, v4l_vidioc_g_enc_index, v4l_print_enc_idx, 0),
++      IOCTL_INFO_FNC(VIDIOC_ENCODER_CMD, v4l_vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
++      IOCTL_INFO_FNC(VIDIOC_TRY_ENCODER_CMD, v4l_vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
++      IOCTL_INFO_FNC(VIDIOC_DECODER_CMD, v4l_vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
++      IOCTL_INFO_FNC(VIDIOC_TRY_DECODER_CMD, v4l_vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0),
+       IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
+       IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
+       IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
+-      IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)),
+-      IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0),
++      IOCTL_INFO_FNC(VIDIOC_S_DV_TIMINGS, v4l_vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)),
++      IOCTL_INFO_FNC(VIDIOC_G_DV_TIMINGS, v4l_vidioc_g_dv_timings, v4l_print_dv_timings, 0),
+       IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
+       IOCTL_INFO_FNC(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
+       IOCTL_INFO_FNC(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
+       IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+       IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
+-      IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)),
+-      IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
+-      IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
++      IOCTL_INFO_FNC(VIDIOC_ENUM_DV_TIMINGS, v4l_vidioc_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)),
++      IOCTL_INFO_FNC(VIDIOC_QUERY_DV_TIMINGS, v4l_vidioc_query_dv_timings, v4l_print_dv_timings, 0),
++      IOCTL_INFO_FNC(VIDIOC_DV_TIMINGS_CAP, v4l_vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
+       IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
+       IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
+       IOCTL_INFO_FNC(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)),
+@@ -2619,7 +2786,7 @@ static long __video_do_ioctl(struct file *file,
+       struct video_device *vfd = video_devdata(file);
+       const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
+       bool write_only = false;
+-      struct v4l2_ioctl_info default_info;
++      v4l2_ioctl_info_no_const default_info;
+       const struct v4l2_ioctl_info *info;
+       void *fh = file->private_data;
+       struct v4l2_fh *vfh = NULL;
+@@ -2655,14 +2822,8 @@ static long __video_do_ioctl(struct file *file,
+       }
+       write_only = _IOC_DIR(cmd) == _IOC_WRITE;
+-      if (info->flags & INFO_FL_STD) {
+-              typedef int (*vidioc_op)(struct file *file, void *fh, void *p);
+-              const void *p = vfd->ioctl_ops;
+-              const vidioc_op *vidioc = p + info->u.offset;
+-
+-              ret = (*vidioc)(file, fh, arg);
+-      } else if (info->flags & INFO_FL_FUNC) {
+-              ret = info->u.func(ops, file, fh, arg);
++      if (info->flags & INFO_FL_FUNC) {
++              ret = info->func(ops, file, fh, arg);
+       } else if (!ops->vidioc_default) {
+               ret = -ENOTTY;
+       } else {
+@@ -2710,7 +2871,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+                               ret = -EINVAL;
+                               break;
+                       }
+-                      *user_ptr = (void __user *)buf->m.planes;
++                      *user_ptr = (void __force_user *)buf->m.planes;
+                       *kernel_ptr = (void **)&buf->m.planes;
+                       *array_size = sizeof(struct v4l2_plane) * buf->length;
+                       ret = 1;
+@@ -2727,7 +2888,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+                               ret = -EINVAL;
+                               break;
+                       }
+-                      *user_ptr = (void __user *)edid->edid;
++                      *user_ptr = (void __force_user *)edid->edid;
+                       *kernel_ptr = (void **)&edid->edid;
+                       *array_size = edid->blocks * 128;
+                       ret = 1;
+@@ -2745,7 +2906,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+                               ret = -EINVAL;
+                               break;
+                       }
+-                      *user_ptr = (void __user *)ctrls->controls;
++                      *user_ptr = (void __force_user *)ctrls->controls;
+                       *kernel_ptr = (void **)&ctrls->controls;
+                       *array_size = sizeof(struct v4l2_ext_control)
+                                   * ctrls->count;
+@@ -2846,7 +3007,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+       }
+       if (has_array_args) {
+-              *kernel_ptr = (void __force *)user_ptr;
++              *kernel_ptr = (void __force_kernel *)user_ptr;
+               if (copy_to_user(user_ptr, mbuf, array_size))
+                       err = -EFAULT;
+               goto out_array_args;
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index f00f3e7..9138f66 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -233,7 +233,7 @@ struct omap3_gpmc_regs {
+ struct gpmc_device {
+       struct device *dev;
+       int irq;
+-      struct irq_chip irq_chip;
++      struct irq_chip *irq_chip;
+       struct gpio_chip gpio_chip;
+       int nirqs;
+ };
+@@ -1254,10 +1254,10 @@ static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
+       irq_set_chip_data(virq, gpmc);
+       if (hw < GPMC_NR_NAND_IRQS) {
+               irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
+-              irq_set_chip_and_handler(virq, &gpmc->irq_chip,
++              irq_set_chip_and_handler(virq, gpmc->irq_chip,
+                                        handle_simple_irq);
+       } else {
+-              irq_set_chip_and_handler(virq, &gpmc->irq_chip,
++              irq_set_chip_and_handler(virq, gpmc->irq_chip,
+                                        handle_edge_irq);
+       }
+@@ -1303,6 +1303,16 @@ static irqreturn_t gpmc_handle_irq(int irq, void *data)
+       return IRQ_HANDLED;
+ }
++static struct irq_chip gpmc_irq_chip = {
++      .name = "gpmc",
++      .irq_enable = gpmc_irq_enable,
++      .irq_disable = gpmc_irq_disable,
++      .irq_ack = gpmc_irq_ack,
++      .irq_mask = gpmc_irq_mask,
++      .irq_unmask = gpmc_irq_unmask,
++      .irq_set_type = gpmc_irq_set_type,
++};
++
+ static int gpmc_setup_irq(struct gpmc_device *gpmc)
+ {
+       u32 regval;
+@@ -1315,13 +1325,7 @@ static int gpmc_setup_irq(struct gpmc_device *gpmc)
+       regval = gpmc_read_reg(GPMC_IRQSTATUS);
+       gpmc_write_reg(GPMC_IRQSTATUS, regval);
+-      gpmc->irq_chip.name = "gpmc";
+-      gpmc->irq_chip.irq_enable = gpmc_irq_enable;
+-      gpmc->irq_chip.irq_disable = gpmc_irq_disable;
+-      gpmc->irq_chip.irq_ack = gpmc_irq_ack;
+-      gpmc->irq_chip.irq_mask = gpmc_irq_mask;
+-      gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
+-      gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
++      gpmc->irq_chip = &gpmc_irq_chip;
+       gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
+                                               gpmc->nirqs,
+diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
+index 5537f8d..f990a1d 100644
+--- a/drivers/message/fusion/mptbase.c
++++ b/drivers/message/fusion/mptbase.c
+@@ -99,7 +99,7 @@ module_param(mpt_channel_mapping, int, 0);
+ MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
+ static int mpt_debug_level;
+-static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
++static int mpt_set_debug_level(const char *val, const struct kernel_param *kp);
+ module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
+                 &mpt_debug_level, 0600);
+ MODULE_PARM_DESC(mpt_debug_level,
+@@ -242,7 +242,7 @@ pci_enable_io_access(struct pci_dev *pdev)
+       pci_write_config_word(pdev, PCI_COMMAND, command_reg);
+ }
+-static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
++static int mpt_set_debug_level(const char *val, const struct kernel_param *kp)
+ {
+       int ret = param_set_int(val, kp);
+       MPT_ADAPTER *ioc;
+@@ -6748,8 +6748,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
+       seq_printf(m, "  MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
+       seq_printf(m, "  MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
++#else
+       seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n",
+                                       (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
++#endif
++
+       /*
+        *  Rounding UP to nearest 4-kB boundary here...
+        */
+@@ -6762,7 +6767,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
+                                       ioc->facts.GlobalCredits);
+       seq_printf(m, "  Frames   @ 0x%p (Dma @ 0x%p)\n",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                                      NULL, NULL);
++#else
+                                       (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
++#endif
+       sz = (ioc->reply_sz * ioc->reply_depth) + 128;
+       seq_printf(m, "    {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
+                                       ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
+diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
+index 6955c9e..03bc466 100644
+--- a/drivers/message/fusion/mptlan.c
++++ b/drivers/message/fusion/mptlan.c
+@@ -680,7 +680,7 @@ out:
+ }
+ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+-static int
++static netdev_tx_t
+ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
+ {
+       struct mpt_lan_priv *priv = netdev_priv(dev);
+diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
+index 7ee1667..c36740d 100644
+--- a/drivers/message/fusion/mptsas.c
++++ b/drivers/message/fusion/mptsas.c
+@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
+               return 0;
+ }
++static inline void
++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
++{
++      if (phy_info->port_details) {
++              phy_info->port_details->rphy = rphy;
++              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
++                  ioc->name, rphy));
++      }
++
++      if (rphy) {
++              dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
++                  &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
++              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
++                  ioc->name, rphy, rphy->dev.release));
++      }
++}
++
+ /* no mutex */
+ static void
+ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
+@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
+               return NULL;
+ }
+-static inline void
+-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
+-{
+-      if (phy_info->port_details) {
+-              phy_info->port_details->rphy = rphy;
+-              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+-                  ioc->name, rphy));
+-      }
+-
+-      if (rphy) {
+-              dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+-                  &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+-              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+-                  ioc->name, rphy, rphy->dev.release));
+-      }
+-}
+-
+ static inline struct sas_port *
+ mptsas_get_port(struct mptsas_phyinfo *phy_info)
+ {
+diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
+index 0aecd7b..41bf9bf 100644
+--- a/drivers/mfd/ab8500-debugfs.c
++++ b/drivers/mfd/ab8500-debugfs.c
+@@ -100,7 +100,7 @@ static int irq_last;
+ static u32 *irq_count;
+ static int num_irqs;
+-static struct device_attribute **dev_attr;
++static device_attribute_no_const **dev_attr;
+ static char **event_name;
+ static u8 avg_sample = SAMPLE_16;
+diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
+index da5722d..d405030 100644
+--- a/drivers/mfd/kempld-core.c
++++ b/drivers/mfd/kempld-core.c
+@@ -494,7 +494,7 @@ static struct platform_driver kempld_driver = {
+       .remove         = kempld_remove,
+ };
+-static struct dmi_system_id kempld_dmi_table[] __initdata = {
++static const struct dmi_system_id kempld_dmi_table[] __initconst = {
+       {
+               .ident = "BBL6",
+               .matches = {
+diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
+index 5c80aea..1006323 100644
+--- a/drivers/mfd/max8925-i2c.c
++++ b/drivers/mfd/max8925-i2c.c
+@@ -151,7 +151,7 @@ static int max8925_probe(struct i2c_client *client,
+                                  const struct i2c_device_id *id)
+ {
+       struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
+-      static struct max8925_chip *chip;
++      struct max8925_chip *chip;
+       struct device_node *node = client->dev.of_node;
+       if (node && !pdata) {
+diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
+index ee94080..e2a4a3d 100644
+--- a/drivers/mfd/rn5t618.c
++++ b/drivers/mfd/rn5t618.c
+@@ -52,7 +52,6 @@ static const struct regmap_config rn5t618_regmap_config = {
+ };
+ static struct rn5t618 *rn5t618_pm_power_off;
+-static struct notifier_block rn5t618_restart_handler;
+ static void rn5t618_trigger_poweroff_sequence(bool repower)
+ {
+@@ -84,6 +83,12 @@ static int rn5t618_restart(struct notifier_block *this,
+       return NOTIFY_DONE;
+ }
++static struct notifier_block rn5t618_restart_handler = {
++      .notifier_call = rn5t618_restart,
++      .priority = 192,
++
++};
++
+ static const struct of_device_id rn5t618_of_match[] = {
+       { .compatible = "ricoh,rn5t567", .data = (void *)RN5T567 },
+       { .compatible = "ricoh,rn5t618", .data = (void *)RN5T618 },
+@@ -133,9 +138,6 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
+                       dev_warn(&i2c->dev, "Poweroff callback already assigned\n");
+       }
+-      rn5t618_restart_handler.notifier_call = rn5t618_restart;
+-      rn5t618_restart_handler.priority = 192;
+-
+       ret = register_restart_handler(&rn5t618_restart_handler);
+       if (ret) {
+               dev_err(&i2c->dev, "cannot register restart handler, %d\n", ret);
+diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
+index 11cab15..d144bd9 100644
+--- a/drivers/mfd/tps65910.c
++++ b/drivers/mfd/tps65910.c
+@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+                   struct tps65910_platform_data *pdata)
+ {
+       int ret = 0;
+-      static struct regmap_irq_chip *tps6591x_irqs_chip;
++      struct regmap_irq_chip *tps6591x_irqs_chip;
+       if (!irq) {
+               dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
+index b46c0cf..89e322b 100644
+--- a/drivers/mfd/twl4030-irq.c
++++ b/drivers/mfd/twl4030-irq.c
+@@ -34,6 +34,7 @@
+ #include <linux/of.h>
+ #include <linux/irqdomain.h>
+ #include <linux/i2c/twl.h>
++#include <asm/pgtable.h>
+ #include "twl-core.h"
+@@ -720,10 +721,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
+        * Install an irq handler for each of the SIH modules;
+        * clone dummy irq_chip since PIH can't *do* anything
+        */
+-      twl4030_irq_chip = dummy_irq_chip;
+-      twl4030_irq_chip.name = "twl4030";
++      pax_open_kernel();
++      memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
++      const_cast(twl4030_irq_chip.name) = "twl4030";
+-      twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
++      const_cast(twl4030_sih_irq_chip.irq_ack) = dummy_irq_chip.irq_ack;
++      pax_close_kernel();
+       for (i = irq_base; i < irq_end; i++) {
+               irq_set_chip_and_handler(i, &twl4030_irq_chip,
+diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
+index 1922cb8..e14fb42 100644
+--- a/drivers/misc/c2port/core.c
++++ b/drivers/misc/c2port/core.c
+@@ -918,7 +918,9 @@ struct c2port_device *c2port_device_register(char *name,
+               goto error_idr_alloc;
+       c2dev->id = ret;
+-      bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
++      pax_open_kernel();
++      const_cast(bin_attr_flash_data.size) = ops->blocks_num * ops->block_size;
++      pax_close_kernel();
+       c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
+                                  "c2port%d", c2dev->id);
+diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
+index 99635dd..255bd78 100644
+--- a/drivers/misc/kgdbts.c
++++ b/drivers/misc/kgdbts.c
+@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
+       char before[BREAK_INSTR_SIZE];
+       char after[BREAK_INSTR_SIZE];
+-      probe_kernel_read(before, (char *)kgdbts_break_test,
++      probe_kernel_read(before, (void *)ktla_ktva((unsigned long)kgdbts_break_test),
+         BREAK_INSTR_SIZE);
+       init_simple_test();
+       ts.tst = plant_and_detach_test;
+@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
+       /* Activate test with initial breakpoint */
+       if (!is_early)
+               kgdb_breakpoint();
+-      probe_kernel_read(after, (char *)kgdbts_break_test,
++      probe_kernel_read(after, (void *)ktla_ktva((unsigned long)kgdbts_break_test),
+         BREAK_INSTR_SIZE);
+       if (memcmp(before, after, BREAK_INSTR_SIZE)) {
+               printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
+@@ -1130,7 +1130,7 @@ static void kgdbts_put_char(u8 chr)
+               ts.run_test(0, chr);
+ }
+-static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
++static int param_set_kgdbts_var(const char *kmessage, const struct kernel_param *kp)
+ {
+       int len = strlen(kmessage);
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
+index fb8705f..dc2f679 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
+@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
+        * the lid is closed. This leads to interrupts as soon as a little move
+        * is done.
+        */
+-      atomic_inc(&lis3->count);
++      atomic_inc_unchecked(&lis3->count);
+       wake_up_interruptible(&lis3->misc_wait);
+       kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
+@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+       if (lis3->pm_dev)
+               pm_runtime_get_sync(lis3->pm_dev);
+-      atomic_set(&lis3->count, 0);
++      atomic_set_unchecked(&lis3->count, 0);
+       return 0;
+ }
+@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
+       add_wait_queue(&lis3->misc_wait, &wait);
+       while (true) {
+               set_current_state(TASK_INTERRUPTIBLE);
+-              data = atomic_xchg(&lis3->count, 0);
++              data = atomic_xchg_unchecked(&lis3->count, 0);
+               if (data)
+                       break;
+@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+                                             struct lis3lv02d, miscdev);
+       poll_wait(file, &lis3->misc_wait, wait);
+-      if (atomic_read(&lis3->count))
++      if (atomic_read_unchecked(&lis3->count))
+               return POLLIN | POLLRDNORM;
+       return 0;
+ }
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
+index c439c82..1f20f57 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.h
++++ b/drivers/misc/lis3lv02d/lis3lv02d.h
+@@ -297,7 +297,7 @@ struct lis3lv02d {
+       struct input_polled_dev *idev;     /* input device */
+       struct platform_device  *pdev;     /* platform device */
+       struct regulator_bulk_data regulators[2];
+-      atomic_t                count;     /* interrupt count after last read */
++      atomic_unchecked_t      count;     /* interrupt count after last read */
+       union axis_conversion   ac;        /* hw -> logical axis */
+       int                     mapped_btns[3];
+diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
+index ddc9e4b..9e27f41 100644
+--- a/drivers/misc/mic/scif/scif_api.c
++++ b/drivers/misc/mic/scif/scif_api.c
+@@ -1486,10 +1486,12 @@ int scif_client_register(struct scif_client *client)
+ {
+       struct subsys_interface *si = &client->si;
+-      si->name = client->name;
+-      si->subsys = &scif_peer_bus;
+-      si->add_dev = scif_add_client_dev;
+-      si->remove_dev = scif_remove_client_dev;
++      pax_open_kernel();
++      const_cast(si->name) = client->name;
++      const_cast(si->subsys) = &scif_peer_bus;
++      const_cast(si->add_dev) = scif_add_client_dev;
++      const_cast(si->remove_dev) = scif_remove_client_dev;
++      pax_close_kernel();
+       return subsys_interface_register(&client->si);
+ }
+diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
+index 637cc46..4fb1267 100644
+--- a/drivers/misc/mic/scif/scif_rb.c
++++ b/drivers/misc/mic/scif/scif_rb.c
+@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb)
+        * the read barrier in scif_rb_count(..)
+        */
+       wmb();
+-      ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
++      ACCESS_ONCE_RW(*rb->write_ptr) = rb->current_write_offset;
+ #ifdef CONFIG_INTEL_MIC_CARD
+       /*
+        * X100 Si bug: For the case where a Core is performing an EXT_WR
+@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb)
+        * This way, if ordering is violated for the Interrupt Message, it will
+        * fall just behind the first Posted associated with the first EXT_WR.
+        */
+-      ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
++      ACCESS_ONCE_RW(*rb->write_ptr) = rb->current_write_offset;
+ #endif
+ }
+@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
+        * scif_rb_space(..)
+        */
+       mb();
+-      ACCESS_ONCE(*rb->read_ptr) = new_offset;
++      ACCESS_ONCE_RW(*rb->read_ptr) = new_offset;
+ #ifdef CONFIG_INTEL_MIC_CARD
+       /*
+        * X100 Si Bug: For the case where a Core is performing an EXT_WR
+@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
+        * This way, if ordering is violated for the Interrupt Message, it will
+        * fall just behind the first Posted associated with the first EXT_WR.
+        */
+-      ACCESS_ONCE(*rb->read_ptr) = new_offset;
++      ACCESS_ONCE_RW(*rb->read_ptr) = new_offset;
+ #endif
+ }
+diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c
+index 6030ac5..e498727 100644
+--- a/drivers/misc/panel.c
++++ b/drivers/misc/panel.c
+@@ -1983,7 +1983,7 @@ static void panel_process_inputs(void)
+       }
+ }
+-static void panel_scan_timer(void)
++static void panel_scan_timer(unsigned long data)
+ {
+       if (keypad.enabled && keypad_initialized) {
+               if (spin_trylock_irq(&pprt_lock)) {
+@@ -2019,7 +2019,7 @@ static void init_scan_timer(void)
+       if (scan_timer.function)
+               return;         /* already started */
+-      setup_timer(&scan_timer, (void *)&panel_scan_timer, 0);
++      setup_timer(&scan_timer, &panel_scan_timer, 0);
+       scan_timer.expires = jiffies + INPUT_POLL_TIME;
+       add_timer(&scan_timer);
+ }
+diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
+index 1ee8e82..785f528 100644
+--- a/drivers/misc/sgi-gru/gruhandles.c
++++ b/drivers/misc/sgi-gru/gruhandles.c
+@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
+       unsigned long nsec;
+       nsec = CLKS2NSEC(clks);
+-      atomic_long_inc(&mcs_op_statistics[op].count);
+-      atomic_long_add(nsec, &mcs_op_statistics[op].total);
++      atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
++      atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
+       if (mcs_op_statistics[op].max < nsec)
+               mcs_op_statistics[op].max = nsec;
+ }
+diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
+index 4f76359..cdfcb2e 100644
+--- a/drivers/misc/sgi-gru/gruprocfs.c
++++ b/drivers/misc/sgi-gru/gruprocfs.c
+@@ -32,9 +32,9 @@
+ #define printstat(s, f)               printstat_val(s, &gru_stats.f, #f)
+-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
+ {
+-      unsigned long val = atomic_long_read(v);
++      unsigned long val = atomic_long_read_unchecked(v);
+       seq_printf(s, "%16lu %s\n", val, id);
+ }
+@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
+       seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+       for (op = 0; op < mcsop_last; op++) {
+-              count = atomic_long_read(&mcs_op_statistics[op].count);
+-              total = atomic_long_read(&mcs_op_statistics[op].total);
++              count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
++              total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
+               max = mcs_op_statistics[op].max;
+               seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
+                          count ? total / count : 0, max);
+diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
+index 5c3ce24..4915ccb 100644
+--- a/drivers/misc/sgi-gru/grutables.h
++++ b/drivers/misc/sgi-gru/grutables.h
+@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
+  * GRU statistics.
+  */
+ struct gru_stats_s {
+-      atomic_long_t vdata_alloc;
+-      atomic_long_t vdata_free;
+-      atomic_long_t gts_alloc;
+-      atomic_long_t gts_free;
+-      atomic_long_t gms_alloc;
+-      atomic_long_t gms_free;
+-      atomic_long_t gts_double_allocate;
+-      atomic_long_t assign_context;
+-      atomic_long_t assign_context_failed;
+-      atomic_long_t free_context;
+-      atomic_long_t load_user_context;
+-      atomic_long_t load_kernel_context;
+-      atomic_long_t lock_kernel_context;
+-      atomic_long_t unlock_kernel_context;
+-      atomic_long_t steal_user_context;
+-      atomic_long_t steal_kernel_context;
+-      atomic_long_t steal_context_failed;
+-      atomic_long_t nopfn;
+-      atomic_long_t asid_new;
+-      atomic_long_t asid_next;
+-      atomic_long_t asid_wrap;
+-      atomic_long_t asid_reuse;
+-      atomic_long_t intr;
+-      atomic_long_t intr_cbr;
+-      atomic_long_t intr_tfh;
+-      atomic_long_t intr_spurious;
+-      atomic_long_t intr_mm_lock_failed;
+-      atomic_long_t call_os;
+-      atomic_long_t call_os_wait_queue;
+-      atomic_long_t user_flush_tlb;
+-      atomic_long_t user_unload_context;
+-      atomic_long_t user_exception;
+-      atomic_long_t set_context_option;
+-      atomic_long_t check_context_retarget_intr;
+-      atomic_long_t check_context_unload;
+-      atomic_long_t tlb_dropin;
+-      atomic_long_t tlb_preload_page;
+-      atomic_long_t tlb_dropin_fail_no_asid;
+-      atomic_long_t tlb_dropin_fail_upm;
+-      atomic_long_t tlb_dropin_fail_invalid;
+-      atomic_long_t tlb_dropin_fail_range_active;
+-      atomic_long_t tlb_dropin_fail_idle;
+-      atomic_long_t tlb_dropin_fail_fmm;
+-      atomic_long_t tlb_dropin_fail_no_exception;
+-      atomic_long_t tfh_stale_on_fault;
+-      atomic_long_t mmu_invalidate_range;
+-      atomic_long_t mmu_invalidate_page;
+-      atomic_long_t flush_tlb;
+-      atomic_long_t flush_tlb_gru;
+-      atomic_long_t flush_tlb_gru_tgh;
+-      atomic_long_t flush_tlb_gru_zero_asid;
++      atomic_long_unchecked_t vdata_alloc;
++      atomic_long_unchecked_t vdata_free;
++      atomic_long_unchecked_t gts_alloc;
++      atomic_long_unchecked_t gts_free;
++      atomic_long_unchecked_t gms_alloc;
++      atomic_long_unchecked_t gms_free;
++      atomic_long_unchecked_t gts_double_allocate;
++      atomic_long_unchecked_t assign_context;
++      atomic_long_unchecked_t assign_context_failed;
++      atomic_long_unchecked_t free_context;
++      atomic_long_unchecked_t load_user_context;
++      atomic_long_unchecked_t load_kernel_context;
++      atomic_long_unchecked_t lock_kernel_context;
++      atomic_long_unchecked_t unlock_kernel_context;
++      atomic_long_unchecked_t steal_user_context;
++      atomic_long_unchecked_t steal_kernel_context;
++      atomic_long_unchecked_t steal_context_failed;
++      atomic_long_unchecked_t nopfn;
++      atomic_long_unchecked_t asid_new;
++      atomic_long_unchecked_t asid_next;
++      atomic_long_unchecked_t asid_wrap;
++      atomic_long_unchecked_t asid_reuse;
++      atomic_long_unchecked_t intr;
++      atomic_long_unchecked_t intr_cbr;
++      atomic_long_unchecked_t intr_tfh;
++      atomic_long_unchecked_t intr_spurious;
++      atomic_long_unchecked_t intr_mm_lock_failed;
++      atomic_long_unchecked_t call_os;
++      atomic_long_unchecked_t call_os_wait_queue;
++      atomic_long_unchecked_t user_flush_tlb;
++      atomic_long_unchecked_t user_unload_context;
++      atomic_long_unchecked_t user_exception;
++      atomic_long_unchecked_t set_context_option;
++      atomic_long_unchecked_t check_context_retarget_intr;
++      atomic_long_unchecked_t check_context_unload;
++      atomic_long_unchecked_t tlb_dropin;
++      atomic_long_unchecked_t tlb_preload_page;
++      atomic_long_unchecked_t tlb_dropin_fail_no_asid;
++      atomic_long_unchecked_t tlb_dropin_fail_upm;
++      atomic_long_unchecked_t tlb_dropin_fail_invalid;
++      atomic_long_unchecked_t tlb_dropin_fail_range_active;
++      atomic_long_unchecked_t tlb_dropin_fail_idle;
++      atomic_long_unchecked_t tlb_dropin_fail_fmm;
++      atomic_long_unchecked_t tlb_dropin_fail_no_exception;
++      atomic_long_unchecked_t tfh_stale_on_fault;
++      atomic_long_unchecked_t mmu_invalidate_range;
++      atomic_long_unchecked_t mmu_invalidate_page;
++      atomic_long_unchecked_t flush_tlb;
++      atomic_long_unchecked_t flush_tlb_gru;
++      atomic_long_unchecked_t flush_tlb_gru_tgh;
++      atomic_long_unchecked_t flush_tlb_gru_zero_asid;
+-      atomic_long_t copy_gpa;
+-      atomic_long_t read_gpa;
++      atomic_long_unchecked_t copy_gpa;
++      atomic_long_unchecked_t read_gpa;
+-      atomic_long_t mesq_receive;
+-      atomic_long_t mesq_receive_none;
+-      atomic_long_t mesq_send;
+-      atomic_long_t mesq_send_failed;
+-      atomic_long_t mesq_noop;
+-      atomic_long_t mesq_send_unexpected_error;
+-      atomic_long_t mesq_send_lb_overflow;
+-      atomic_long_t mesq_send_qlimit_reached;
+-      atomic_long_t mesq_send_amo_nacked;
+-      atomic_long_t mesq_send_put_nacked;
+-      atomic_long_t mesq_page_overflow;
+-      atomic_long_t mesq_qf_locked;
+-      atomic_long_t mesq_qf_noop_not_full;
+-      atomic_long_t mesq_qf_switch_head_failed;
+-      atomic_long_t mesq_qf_unexpected_error;
+-      atomic_long_t mesq_noop_unexpected_error;
+-      atomic_long_t mesq_noop_lb_overflow;
+-      atomic_long_t mesq_noop_qlimit_reached;
+-      atomic_long_t mesq_noop_amo_nacked;
+-      atomic_long_t mesq_noop_put_nacked;
+-      atomic_long_t mesq_noop_page_overflow;
++      atomic_long_unchecked_t mesq_receive;
++      atomic_long_unchecked_t mesq_receive_none;
++      atomic_long_unchecked_t mesq_send;
++      atomic_long_unchecked_t mesq_send_failed;
++      atomic_long_unchecked_t mesq_noop;
++      atomic_long_unchecked_t mesq_send_unexpected_error;
++      atomic_long_unchecked_t mesq_send_lb_overflow;
++      atomic_long_unchecked_t mesq_send_qlimit_reached;
++      atomic_long_unchecked_t mesq_send_amo_nacked;
++      atomic_long_unchecked_t mesq_send_put_nacked;
++      atomic_long_unchecked_t mesq_page_overflow;
++      atomic_long_unchecked_t mesq_qf_locked;
++      atomic_long_unchecked_t mesq_qf_noop_not_full;
++      atomic_long_unchecked_t mesq_qf_switch_head_failed;
++      atomic_long_unchecked_t mesq_qf_unexpected_error;
++      atomic_long_unchecked_t mesq_noop_unexpected_error;
++      atomic_long_unchecked_t mesq_noop_lb_overflow;
++      atomic_long_unchecked_t mesq_noop_qlimit_reached;
++      atomic_long_unchecked_t mesq_noop_amo_nacked;
++      atomic_long_unchecked_t mesq_noop_put_nacked;
++      atomic_long_unchecked_t mesq_noop_page_overflow;
+ };
+@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
+       tghop_invalidate, mcsop_last};
+ struct mcs_op_statistic {
+-      atomic_long_t   count;
+-      atomic_long_t   total;
++      atomic_long_unchecked_t count;
++      atomic_long_unchecked_t total;
+       unsigned long   max;
+ };
+@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
+ #define STAT(id)      do {                                            \
+                               if (gru_options & OPT_STATS)            \
+-                                      atomic_long_inc(&gru_stats.id); \
++                                      atomic_long_inc_unchecked(&gru_stats.id);       \
+                       } while (0)
+ #ifdef CONFIG_SGI_GRU_DEBUG
+diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
+index c862cd4..0d176fe 100644
+--- a/drivers/misc/sgi-xp/xp.h
++++ b/drivers/misc/sgi-xp/xp.h
+@@ -288,7 +288,7 @@ struct xpc_interface {
+                                       xpc_notify_func, void *);
+       void (*received) (short, int, void *);
+       enum xp_retval (*partid_to_nasids) (short, void *);
+-};
++} __no_const;
+ extern struct xpc_interface xpc_interface;
+diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
+index 01be66d..4a305b4 100644
+--- a/drivers/misc/sgi-xp/xp_main.c
++++ b/drivers/misc/sgi-xp/xp_main.c
+@@ -71,20 +71,42 @@ EXPORT_SYMBOL_GPL(xpc_registrations);
+ /*
+  * Initialize the XPC interface to indicate that XPC isn't loaded.
+  */
+-static enum xp_retval
+-xpc_notloaded(void)
++static void xpc_notloaded_connect(int ch_number)
++{
++}
++
++static void xpc_notloaded_disconnect(int ch_number)
++{
++}
++
++static enum xp_retval xpc_notloaded_send(short partid, int ch_number, u32 flags, void *payload,
++                                       u16 payload_size)
++{
++      return xpNotLoaded;
++}
++
++static enum xp_retval xpc_notloaded_send_notify(short partid, int ch_number, u32 flags, void *payload,
++                                              u16 payload_size, xpc_notify_func func, void *key)
++{
++      return xpNotLoaded;
++}
++
++static void xpc_notloaded_received(short partid, int ch_number, void *payload)
++{
++}
++
++static enum xp_retval xpc_notloaded_partid_to_nasids(short partid, void *nasid_mask)
+ {
+       return xpNotLoaded;
+ }
+ struct xpc_interface xpc_interface = {
+-      (void (*)(int))xpc_notloaded,
+-      (void (*)(int))xpc_notloaded,
+-      (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
+-      (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
+-                         void *))xpc_notloaded,
+-      (void (*)(short, int, void *))xpc_notloaded,
+-      (enum xp_retval(*)(short, void *))xpc_notloaded
++      .connect = xpc_notloaded_connect,
++      .disconnect = xpc_notloaded_disconnect,
++      .send = xpc_notloaded_send,
++      .send_notify = xpc_notloaded_send_notify,
++      .received = xpc_notloaded_received,
++      .partid_to_nasids = xpc_notloaded_partid_to_nasids
+ };
+ EXPORT_SYMBOL_GPL(xpc_interface);
+@@ -115,17 +137,12 @@ EXPORT_SYMBOL_GPL(xpc_set_interface);
+ void
+ xpc_clear_interface(void)
+ {
+-      xpc_interface.connect = (void (*)(int))xpc_notloaded;
+-      xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
+-      xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
+-          xpc_notloaded;
+-      xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
+-                                                     u16, xpc_notify_func,
+-                                                     void *))xpc_notloaded;
+-      xpc_interface.received = (void (*)(short, int, void *))
+-          xpc_notloaded;
+-      xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
+-          xpc_notloaded;
++      xpc_interface.connect = xpc_notloaded_connect;
++      xpc_interface.disconnect = xpc_notloaded_disconnect;
++      xpc_interface.send = xpc_notloaded_send;
++      xpc_interface.send_notify = xpc_notloaded_send_notify;
++      xpc_interface.received = xpc_notloaded_received;
++      xpc_interface.partid_to_nasids = xpc_notloaded_partid_to_nasids;
+ }
+ EXPORT_SYMBOL_GPL(xpc_clear_interface);
+diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
+index b94d5f7..7f494c5 100644
+--- a/drivers/misc/sgi-xp/xpc.h
++++ b/drivers/misc/sgi-xp/xpc.h
+@@ -835,6 +835,7 @@ struct xpc_arch_operations {
+       void (*received_payload) (struct xpc_channel *, void *);
+       void (*notify_senders_of_disconnect) (struct xpc_channel *);
+ };
++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
+ /* struct xpc_partition act_state values (for XPC HB) */
+@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
+ /* found in xpc_main.c */
+ extern struct device *xpc_part;
+ extern struct device *xpc_chan;
+-extern struct xpc_arch_operations xpc_arch_ops;
++extern xpc_arch_operations_no_const xpc_arch_ops;
+ extern int xpc_disengage_timelimit;
+ extern int xpc_disengage_timedout;
+ extern int xpc_activate_IRQ_rcvd;
+diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
+index 7f32712..8539ab2 100644
+--- a/drivers/misc/sgi-xp/xpc_main.c
++++ b/drivers/misc/sgi-xp/xpc_main.c
+@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
+       .notifier_call = xpc_system_die,
+ };
+-struct xpc_arch_operations xpc_arch_ops;
++xpc_arch_operations_no_const xpc_arch_ops;
+ /*
+  * Timer function to enforce the timelimit on the partition disengage.
+diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
+index 557f978..c8ce9fb 100644
+--- a/drivers/misc/sgi-xp/xpnet.c
++++ b/drivers/misc/sgi-xp/xpnet.c
+@@ -421,7 +421,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
+  * destination partid.  If the destination partid octets are 0xffff,
+  * this packet is to be broadcast to all connected partitions.
+  */
+-static int
++static netdev_tx_t
+ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct xpnet_pending_msg *queued_msg;
+diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
+index bf0d770..9b331b0d 100644
+--- a/drivers/misc/ti-st/st_kim.c
++++ b/drivers/misc/ti-st/st_kim.c
+@@ -581,9 +581,10 @@ static int show_list(struct seq_file *s, void *unused)
+       return 0;
+ }
+-static ssize_t show_install(struct device *dev,
+-              struct device_attribute *attr, char *buf)
++static ssize_t show_install(struct kobject *_dev,
++              struct kobj_attribute *attr, char *buf)
+ {
++      struct device *dev = (struct device *)_dev;
+       struct kim_data_s *kim_data = dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", kim_data->ldisc_install);
+ }
+@@ -610,47 +611,50 @@ static ssize_t store_baud_rate(struct device *dev,
+ }
+ #endif        /* if DEBUG */
+-static ssize_t show_dev_name(struct device *dev,
+-              struct device_attribute *attr, char *buf)
++static ssize_t show_dev_name(struct kobject *_dev,
++              struct kobj_attribute *attr, char *buf)
+ {
++      struct device *dev = (struct device *)_dev;
+       struct kim_data_s *kim_data = dev_get_drvdata(dev);
+       return sprintf(buf, "%s\n", kim_data->dev_name);
+ }
+-static ssize_t show_baud_rate(struct device *dev,
+-              struct device_attribute *attr, char *buf)
++static ssize_t show_baud_rate(struct kobject *_dev,
++              struct kobj_attribute *attr, char *buf)
+ {
++      struct device *dev = (struct device *)_dev;
+       struct kim_data_s *kim_data = dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", kim_data->baud_rate);
+ }
+-static ssize_t show_flow_cntrl(struct device *dev,
+-              struct device_attribute *attr, char *buf)
++static ssize_t show_flow_cntrl(struct kobject *_dev,
++              struct kobj_attribute *attr, char *buf)
+ {
++      struct device *dev = (struct device *)_dev;
+       struct kim_data_s *kim_data = dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", kim_data->flow_cntrl);
+ }
+ /* structures specific for sysfs entries */
+ static struct kobj_attribute ldisc_install =
+-__ATTR(install, 0444, (void *)show_install, NULL);
++__ATTR(install, 0444, show_install, NULL);
+ static struct kobj_attribute uart_dev_name =
+ #ifdef DEBUG  /* TODO: move this to debug-fs if possible */
+-__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
++__ATTR(dev_name, 0644, show_dev_name, store_dev_name);
+ #else
+-__ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
++__ATTR(dev_name, 0444, show_dev_name, NULL);
+ #endif
+ static struct kobj_attribute uart_baud_rate =
+ #ifdef DEBUG  /* TODO: move to debugfs */
+-__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
++__ATTR(baud_rate, 0644, show_baud_rate, store_baud_rate);
+ #else
+-__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
++__ATTR(baud_rate, 0444, show_baud_rate, NULL);
+ #endif
+ static struct kobj_attribute uart_flow_cntrl =
+-__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
++__ATTR(flow_cntrl, 0444, show_flow_cntrl, NULL);
+ static struct attribute *uim_attrs[] = {
+       &ldisc_install.attr,
+diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
+index c032eef..16a2a74 100644
+--- a/drivers/mmc/card/mmc_test.c
++++ b/drivers/mmc/card/mmc_test.c
+@@ -2076,8 +2076,8 @@ static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+ {
+       int ret = 0;
+       int i;
+-      void *pre_req = test->card->host->ops->pre_req;
+-      void *post_req = test->card->host->ops->post_req;
++      void (*pre_req)(struct mmc_host *, struct mmc_request *, bool) = test->card->host->ops->pre_req;
++      void (*post_req)(struct mmc_host *, struct mmc_request *, int) = test->card->host->ops->post_req;
+       if (rw->do_nonblock_req &&
+           ((!pre_req && post_req) || (pre_req && !post_req))) {
+diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
+index e8cd2de..c1640f6 100644
+--- a/drivers/mmc/host/dw_mmc.h
++++ b/drivers/mmc/host/dw_mmc.h
+@@ -298,5 +298,5 @@ struct dw_mci_drv_data {
+                                               struct mmc_ios *ios);
+       int             (*switch_voltage)(struct mmc_host *mmc,
+                                         struct mmc_ios *ios);
+-};
++} __do_const;
+ #endif /* _DW_MMC_H_ */
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index df990bb..e647253 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1613,7 +1613,9 @@ static int mmci_probe(struct amba_device *dev,
+       mmc->caps |= MMC_CAP_CMD23;
+       if (variant->busy_detect) {
+-              mmci_ops.card_busy = mmci_card_busy;
++              pax_open_kernel();
++              const_cast(mmci_ops.card_busy) = mmci_card_busy;
++              pax_close_kernel();
+               mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
+               mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+               mmc->max_busy_timeout = 0;
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index 5f2f24a..e80f6f3 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -2076,7 +2076,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+       if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
+               dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
+-              omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
++              pax_open_kernel();
++              const_cast(omap_hsmmc_ops.multi_io_quirk) = omap_hsmmc_multi_io_quirk;
++              pax_close_kernel();
+       }
+       device_init_wakeup(&pdev->dev, true);
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 99e0b33..107a2cc 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1231,9 +1231,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+               writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+       }
+-      if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+-              sdhci_esdhc_ops.platform_execute_tuning =
++      if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
++              pax_open_kernel();
++              const_cast(sdhci_esdhc_ops.platform_execute_tuning) =
+                                       esdhc_executing_tuning;
++              pax_close_kernel();
++      }
+       if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
+               host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index 784c5a8..3567328 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
+        * we can use overriding functions instead of default.
+        */
+       if (sc->no_divider) {
+-              sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+-              sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+-              sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
++              pax_open_kernel();
++              const_cast(sdhci_s3c_ops.set_clock) = sdhci_cmu_set_clock;
++              const_cast(sdhci_s3c_ops.get_min_clock) = sdhci_cmu_get_min_clock;
++              const_cast(sdhci_s3c_ops.get_max_clock) = sdhci_cmu_get_max_clock;
++              pax_close_kernel();
+       }
+       /* It supports additional host capabilities if needed */
+diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
+index 92467ef..cb90505 100644
+--- a/drivers/mmc/host/tmio_mmc_pio.c
++++ b/drivers/mmc/host/tmio_mmc_pio.c
+@@ -1072,7 +1072,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
+               goto host_free;
+       }
+-      tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
++      pax_open_kernel();
++      const_cast(tmio_mmc_ops.start_signal_voltage_switch) = _host->start_signal_voltage_switch;
++      pax_close_kernel();
+       mmc->ops = &tmio_mmc_ops;
+       mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
+index 94d3eb4..7d34296 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0020.c
++++ b/drivers/mtd/chips/cfi_cmdset_0020.c
+@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+       size_t   totlen = 0, thislen;
+       int      ret = 0;
+       size_t   buflen = 0;
+-      static char *buffer;
++      char *buffer;
+       if (!ECCBUF_SIZE) {
+               /* We should fall back to a general writev implementation.
+diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
+index 7c887f1..62fd690 100644
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -431,7 +431,7 @@ static int block2mtd_setup2(const char *val)
+ }
+-static int block2mtd_setup(const char *val, struct kernel_param *kp)
++static int block2mtd_setup(const char *val, const struct kernel_param *kp)
+ {
+ #ifdef MODULE
+       return block2mtd_setup2(val);
+diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
+index 8b66e52..7287696 100644
+--- a/drivers/mtd/devices/phram.c
++++ b/drivers/mtd/devices/phram.c
+@@ -266,7 +266,7 @@ static int phram_setup(const char *val)
+       return ret;
+ }
+-static int phram_param_call(const char *val, struct kernel_param *kp)
++static int phram_param_call(const char *val, const struct kernel_param *kp)
+ {
+ #ifdef MODULE
+       return phram_setup(val);
+diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
+index 385305e..8051e87 100644
+--- a/drivers/mtd/maps/gpio-addr-flash.c
++++ b/drivers/mtd/maps/gpio-addr-flash.c
+@@ -128,7 +128,7 @@ static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssi
+  *    @map: MTD map state
+  *    @ofs: desired offset to write
+  */
+-static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
++static void gf_write(struct map_info *map, const map_word d1, unsigned long ofs)
+ {
+       struct async_state *state = gf_map_info_to_state(map);
+       uint16_t d;
+diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
+index 6dc97aa..c251b90 100644
+--- a/drivers/mtd/maps/latch-addr-flash.c
++++ b/drivers/mtd/maps/latch-addr-flash.c
+@@ -52,7 +52,7 @@ static map_word lf_read(struct map_info *map, unsigned long ofs)
+       return datum;
+ }
+-static void lf_write(struct map_info *map, map_word datum, unsigned long ofs)
++static void lf_write(struct map_info *map, const map_word datum, unsigned long ofs)
+ {
+       struct latch_addr_flash_info *info;
+diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
+index eb0242e..1a4c5b9 100644
+--- a/drivers/mtd/maps/pci.c
++++ b/drivers/mtd/maps/pci.c
+@@ -59,13 +59,13 @@ static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from
+       memcpy_fromio(to, map->base + map->translate(map, from), len);
+ }
+-static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
++static void mtd_pci_write8(struct map_info *_map, const map_word val, unsigned long ofs)
+ {
+       struct map_pci_info *map = (struct map_pci_info *)_map;
+       writeb(val.x[0], map->base + map->translate(map, ofs));
+ }
+-static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
++static void mtd_pci_write32(struct map_info *_map, const map_word val, unsigned long ofs)
+ {
+       struct map_pci_info *map = (struct map_pci_info *)_map;
+       writel(val.x[0], map->base + map->translate(map, ofs));
+diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
+index 70bb403..3ae94c6 100644
+--- a/drivers/mtd/maps/pcmciamtd.c
++++ b/drivers/mtd/maps/pcmciamtd.c
+@@ -161,7 +161,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
+ }
+-static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
++static void pcmcia_write8_remap(struct map_info *map, const map_word d, unsigned long adr)
+ {
+       void __iomem *addr = remap_window(map, adr);
+@@ -173,7 +173,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
+ }
+-static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
++static void pcmcia_write16_remap(struct map_info *map, const map_word d, unsigned long adr)
+ {
+       void __iomem *addr = remap_window(map, adr);
+       if(!addr)
+@@ -256,7 +256,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
+ }
+-static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
++static void pcmcia_write8(struct map_info *map, const map_word d, unsigned long adr)
+ {
+       void __iomem *win_base = (void __iomem *)map->map_priv_2;
+@@ -269,7 +269,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
+ }
+-static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
++static void pcmcia_write16(struct map_info *map, const map_word d, unsigned long adr)
+ {
+       void __iomem *win_base = (void __iomem *)map->map_priv_2;
+diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
+index 556a2df..e771329 100644
+--- a/drivers/mtd/maps/sbc_gxx.c
++++ b/drivers/mtd/maps/sbc_gxx.c
+@@ -138,7 +138,7 @@ static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from
+       }
+ }
+-static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
++static void sbc_gxx_write8(struct map_info *map, const map_word d, unsigned long adr)
+ {
+       spin_lock(&sbc_gxx_spin);
+       sbc_gxx_page(map, adr);
+diff --git a/drivers/mtd/nand/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/brcmnand/bcm63138_nand.c
+index 59444b3..b8fd6d5 100644
+--- a/drivers/mtd/nand/brcmnand/bcm63138_nand.c
++++ b/drivers/mtd/nand/brcmnand/bcm63138_nand.c
+@@ -81,8 +81,10 @@ static int bcm63138_nand_probe(struct platform_device *pdev)
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
++      pax_open_kernel();
+       soc->ctlrdy_ack = bcm63138_nand_intc_ack;
+       soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
++      pax_close_kernel();
+       return brcmnand_probe(pdev, soc);
+ }
+diff --git a/drivers/mtd/nand/brcmnand/brcmnand.h b/drivers/mtd/nand/brcmnand/brcmnand.h
+index ef5eabb..2b61d03 100644
+--- a/drivers/mtd/nand/brcmnand/brcmnand.h
++++ b/drivers/mtd/nand/brcmnand/brcmnand.h
+@@ -24,7 +24,7 @@ struct brcmnand_soc {
+       bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
+       void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
+       void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare);
+-};
++} __no_const;
+ static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc)
+ {
+diff --git a/drivers/mtd/nand/brcmnand/iproc_nand.c b/drivers/mtd/nand/brcmnand/iproc_nand.c
+index 585596c..da877c2 100644
+--- a/drivers/mtd/nand/brcmnand/iproc_nand.c
++++ b/drivers/mtd/nand/brcmnand/iproc_nand.c
+@@ -120,9 +120,11 @@ static int iproc_nand_probe(struct platform_device *pdev)
+       if (IS_ERR(priv->ext_base))
+               return PTR_ERR(priv->ext_base);
++      pax_open_kernel();
+       soc->ctlrdy_ack = iproc_nand_intc_ack;
+       soc->ctlrdy_set_enabled = iproc_nand_intc_set;
+       soc->prepare_data_bus = iproc_nand_apb_access;
++      pax_close_kernel();
+       return brcmnand_probe(pdev, soc);
+ }
+diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
+index 0b0c937..e3a9cca 100644
+--- a/drivers/mtd/nand/cafe_nand.c
++++ b/drivers/mtd/nand/cafe_nand.c
+@@ -345,7 +345,17 @@ static irqreturn_t cafe_nand_interrupt(int irq, void *id)
+       return IRQ_HANDLED;
+ }
+-static void cafe_nand_bug(struct mtd_info *mtd)
++static void cafe_nand_bug_hwctl(struct mtd_info *mtd, int mode)
++{
++      BUG();
++}
++
++static int cafe_nand_bug_calculate(struct mtd_info *mtd, const uint8_t *dat, uint8_t *ecc_code)
++{
++      BUG();
++}
++
++static int cafe_nand_bug_correct(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
+ {
+       BUG();
+ }
+@@ -780,9 +790,9 @@ static int cafe_nand_probe(struct pci_dev *pdev,
+       cafe->nand.ecc.size = mtd->writesize;
+       cafe->nand.ecc.bytes = 14;
+       cafe->nand.ecc.strength = 4;
+-      cafe->nand.ecc.hwctl  = (void *)cafe_nand_bug;
+-      cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
+-      cafe->nand.ecc.correct  = (void *)cafe_nand_bug;
++      cafe->nand.ecc.hwctl  = cafe_nand_bug_hwctl;
++      cafe->nand.ecc.calculate = cafe_nand_bug_calculate;
++      cafe->nand.ecc.correct  = cafe_nand_bug_correct;
+       cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+       cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+       cafe->nand.ecc.read_page = cafe_nand_read_page;
+diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
+index 0476ae8..8d320ef 100644
+--- a/drivers/mtd/nand/denali.c
++++ b/drivers/mtd/nand/denali.c
+@@ -24,6 +24,7 @@
+ #include <linux/slab.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+ #include "denali.h"
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+index 6e46156..923c436 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+@@ -414,7 +414,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+       /* first try to map the upper buffer directly */
+       if (virt_addr_valid(this->upper_buf) &&
+-              !object_is_on_stack(this->upper_buf)) {
++              !object_starts_on_stack(this->upper_buf)) {
+               sg_init_one(sgl, this->upper_buf, this->upper_len);
+               ret = dma_map_sg(this->dev, sgl, 1, dr);
+               if (ret == 0)
+diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
+index a5dfbfb..8042ab4 100644
+--- a/drivers/mtd/nftlmount.c
++++ b/drivers/mtd/nftlmount.c
+@@ -24,6 +24,7 @@
+ #include <asm/errno.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/nand.h>
+ #include <linux/mtd/nftl.h>
+diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
+index 3692dd5..b731a9b 100644
+--- a/drivers/mtd/sm_ftl.c
++++ b/drivers/mtd/sm_ftl.c
+@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ #define SM_CIS_VENDOR_OFFSET 0x59
+ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+ {
+-      struct attribute_group *attr_group;
++      attribute_group_no_const *attr_group;
+       struct attribute **attributes;
+       struct sm_sysfs_attribute *vendor_attribute;
+       char *vendor;
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 0680516..eb890f3 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -1389,7 +1389,7 @@ static int __init bytes_str_to_int(const char *str)
+  * This function returns zero in case of success and a negative error code in
+  * case of error.
+  */
+-static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
++static int __init ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
+ {
+       int i, len;
+       struct mtd_dev_param *p;
+diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
+index b8df0f5..0d64b6e 100644
+--- a/drivers/net/bonding/bond_netlink.c
++++ b/drivers/net/bonding/bond_netlink.c
+@@ -666,7 +666,7 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
+-struct rtnl_link_ops bond_link_ops __read_mostly = {
++struct rtnl_link_ops bond_link_ops = {
+       .kind                   = "bond",
+       .priv_size              = sizeof(struct bonding),
+       .setup                  = bond_setup,
+diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+index ddabce7..6583c29 100644
+--- a/drivers/net/caif/caif_hsi.c
++++ b/drivers/net/caif/caif_hsi.c
+@@ -1011,7 +1011,7 @@ static void cfhsi_aggregation_tout(unsigned long arg)
+       cfhsi_start_tx(cfhsi);
+ }
+-static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct cfhsi *cfhsi = NULL;
+       int start_xfer = 0;
+@@ -1441,7 +1441,7 @@ err:
+       return -ENODEV;
+ }
+-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
++static struct rtnl_link_ops caif_hsi_link_ops = {
+       .kind           = "cfhsi",
+       .priv_size      = sizeof(struct cfhsi),
+       .setup          = cfhsi_setup,
+diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
+index c2dea49..4bf83b5 100644
+--- a/drivers/net/caif/caif_serial.c
++++ b/drivers/net/caif/caif_serial.c
+@@ -277,7 +277,7 @@ error:
+       return tty_wr;
+ }
+-static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ser_device *ser;
+diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
+index 3a529fb..c55ad5e 100644
+--- a/drivers/net/caif/caif_spi.c
++++ b/drivers/net/caif/caif_spi.c
+@@ -486,7 +486,7 @@ static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
+       complete(&cfspi->comp);
+ }
+-static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct cfspi *cfspi = NULL;
+       unsigned long flags;
+diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
+index b306210..c5345de 100644
+--- a/drivers/net/caif/caif_virtio.c
++++ b/drivers/net/caif/caif_virtio.c
+@@ -519,7 +519,7 @@ err:
+ }
+ /* Put the CAIF packet on the virtio ring and kick the receiver */
+-static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct cfv_info *cfv = netdev_priv(netdev);
+       struct buf_info *buf_info;
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index 22570ea..c462375 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -81,7 +81,7 @@ config CAN_BFIN
+ config CAN_FLEXCAN
+       tristate "Support for Freescale FLEXCAN based chips"
+-      depends on ARM || PPC
++      depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
+       ---help---
+         Say Y here if you want to support for Freescale FlexCAN.
+diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
+index 1deb8ff..4e2b0c1 100644
+--- a/drivers/net/can/bfin_can.c
++++ b/drivers/net/can/bfin_can.c
+@@ -338,7 +338,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev,
+       return 0;
+ }
+-static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct bfin_can_priv *priv = netdev_priv(dev);
+       struct bfin_can_regs __iomem *reg = priv->membase;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 8d6208c..7731e3c 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -1053,7 +1053,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
+       return;
+ }
+-static struct rtnl_link_ops can_link_ops __read_mostly = {
++static struct rtnl_link_ops can_link_ops = {
+       .kind           = "can",
+       .maxtype        = IFLA_CAN_MAX,
+       .policy         = can_policy,
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 16f7cad..e643cf4 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -465,7 +465,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
+       return err;
+ }
+-static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       const struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->regs;
+diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
+index f13bb8d..26e4a44 100644
+--- a/drivers/net/can/janz-ican3.c
++++ b/drivers/net/can/janz-ican3.c
+@@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev)
+       return 0;
+ }
+-static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct ican3_dev *mod = netdev_priv(ndev);
+       struct can_frame *cf = (struct can_frame *)skb->data;
+diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
+index c1b6676..50a8a51 100644
+--- a/drivers/net/can/led.c
++++ b/drivers/net/can/led.c
+@@ -128,7 +128,7 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
+ }
+ /* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
++static struct notifier_block can_netdev_notifier = {
+       .notifier_call = can_led_notifier,
+ };
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index 68ef0a4..9e4938b 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode)
+  * xx xx xx xx         ff         ll 00 11 22 33 44 55 66 77
+  * [ can_id ] [flags] [len] [can data (up to 8 bytes]
+  */
+-static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct sun4ican_priv *priv = netdev_priv(dev);
+       struct can_frame *cf = (struct can_frame *)skb->data;
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 674f367..ec3a31f 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
+       dev->destructor         = free_netdev;
+ }
+-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
++static struct rtnl_link_ops vcan_link_ops = {
+       .kind   = "vcan",
+       .setup  = vcan_setup,
+ };
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index c71a035..08768ce 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -386,7 +386,7 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+  *
+  * Return: 0 on success and failure value on error
+  */
+-static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct xcan_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index 69fc840..77a32fc 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -167,7 +167,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
+       return 0;
+ }
+-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
++static struct rtnl_link_ops dummy_link_ops = {
+       .kind           = DRV_NAME,
+       .setup          = dummy_setup,
+       .validate       = dummy_validate,
+diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
+index 39ca935..bd14a10 100644
+--- a/drivers/net/ethernet/8390/ax88796.c
++++ b/drivers/net/ethernet/8390/ax88796.c
+@@ -808,7 +808,7 @@ static int ax_probe(struct platform_device *pdev)
+       struct ei_device *ei_local;
+       struct ax_device *ax;
+       struct resource *irq, *mem, *mem2;
+-      unsigned long mem_size, mem2_size = 0;
++      resource_size_t mem_size, mem2_size = 0;
+       int ret = 0;
+       dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
+@@ -852,9 +852,11 @@ static int ax_probe(struct platform_device *pdev)
+       if (ax->plat->reg_offsets)
+               ei_local->reg_offset = ax->plat->reg_offsets;
+       else {
++              resource_size_t _mem_size = mem_size;
++              _mem_size /= 0x18;
+               ei_local->reg_offset = ax->reg_offsets;
+               for (ret = 0; ret < 0x18; ret++)
+-                      ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
++                      ax->reg_offsets[ret] = _mem_size * ret;
+       }
+       if (!request_mem_region(mem->start, mem_size, pdev->name)) {
+diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
+index 4ea717d..549ae69 100644
+--- a/drivers/net/ethernet/8390/axnet_cs.c
++++ b/drivers/net/ethernet/8390/axnet_cs.c
+@@ -96,7 +96,7 @@ static void get_8390_hdr(struct net_device *,
+ static void block_input(struct net_device *dev, int count,
+                       struct sk_buff *skb, int ring_offset);
+ static void block_output(struct net_device *dev, int count,
+-                       const u_char *buf, const int start_page);
++                       const u_char *buf, int start_page);
+ static void axnet_detach(struct pcmcia_device *p_dev);
+@@ -667,7 +667,7 @@ static void block_input(struct net_device *dev, int count,
+ /*====================================================================*/
+ static void block_output(struct net_device *dev, int count,
+-                       const u_char *buf, const int start_page)
++                       const u_char *buf, int start_page)
+ {
+     unsigned int nic_base = dev->base_addr;
+diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
+index 57e9791..c93b6a0 100644
+--- a/drivers/net/ethernet/8390/ne2k-pci.c
++++ b/drivers/net/ethernet/8390/ne2k-pci.c
+@@ -172,8 +172,8 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
+                         int ring_page);
+ static void ne2k_pci_block_input(struct net_device *dev, int count,
+                         struct sk_buff *skb, int ring_offset);
+-static void ne2k_pci_block_output(struct net_device *dev, const int count,
+-              const unsigned char *buf, const int start_page);
++static void ne2k_pci_block_output(struct net_device *dev, int count,
++              const unsigned char *buf, int start_page);
+ static const struct ethtool_ops ne2k_pci_ethtool_ops;
+@@ -563,7 +563,7 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
+ }
+ static void ne2k_pci_block_output(struct net_device *dev, int count,
+-                                const unsigned char *buf, const int start_page)
++                                const unsigned char *buf, int start_page)
+ {
+       long nic_base = NE_BASE;
+       unsigned long dma_start;
+diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
+index 2f79d29..ed5a64e 100644
+--- a/drivers/net/ethernet/8390/pcnet_cs.c
++++ b/drivers/net/ethernet/8390/pcnet_cs.c
+@@ -1208,7 +1208,7 @@ static void dma_block_input(struct net_device *dev, int count,
+ /*====================================================================*/
+ static void dma_block_output(struct net_device *dev, int count,
+-                           const u_char *buf, const int start_page)
++                           const u_char *buf, int start_page)
+ {
+     unsigned int nic_base = dev->base_addr;
+     struct pcnet_dev *info = PRIV(dev);
+@@ -1387,7 +1387,7 @@ static void shmem_block_input(struct net_device *dev, int count,
+ /*====================================================================*/
+ static void shmem_block_output(struct net_device *dev, int count,
+-                             const u_char *buf, const int start_page)
++                             const u_char *buf, int start_page)
+ {
+     void __iomem *shmem = ei_status.mem + (start_page << 8);
+     shmem -= ei_status.tx_start_page << 8;
+diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
+index 38eaea1..49e5aaa 100644
+--- a/drivers/net/ethernet/adi/bfin_mac.c
++++ b/drivers/net/ethernet/adi/bfin_mac.c
+@@ -1097,7 +1097,7 @@ static void tx_reclaim_skb_timeout(unsigned long lp)
+       tx_reclaim_skb((struct bfin_mac_local *)lp);
+ }
+-static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
++static netdev_tx_t bfin_mac_hard_start_xmit(struct sk_buff *skb,
+                               struct net_device *dev)
+ {
+       struct bfin_mac_local *lp = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
+index 6ffdff6..8b96f60 100644
+--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
+@@ -412,7 +412,7 @@ static void emac_timeout(struct net_device *dev)
+ /* Hardware start transmission.
+  * Send a packet to media from the upper layer.
+  */
+-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct emac_board_info *db = netdev_priv(dev);
+       unsigned long channel;
+diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
+index bda31f3..55cfc6e 100644
+--- a/drivers/net/ethernet/altera/altera_tse_main.c
++++ b/drivers/net/ethernet/altera/altera_tse_main.c
+@@ -551,7 +551,7 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
+  * physically contiguous fragment starting at
+  * skb->data, for length of skb_headlen(skb).
+  */
+-static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct altera_tse_private *priv = netdev_priv(dev);
+       unsigned int txsize = priv->tx_ring_size;
+@@ -1243,7 +1243,7 @@ static int tse_shutdown(struct net_device *dev)
+       return 0;
+ }
+-static struct net_device_ops altera_tse_netdev_ops = {
++static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
+       .ndo_open               = tse_open,
+       .ndo_stop               = tse_shutdown,
+       .ndo_start_xmit         = tse_start_xmit,
+@@ -1480,11 +1480,13 @@ static int altera_tse_probe(struct platform_device *pdev)
+       ndev->netdev_ops = &altera_tse_netdev_ops;
+       altera_tse_set_ethtool_ops(ndev);
++      pax_open_kernel();
+       altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
+       if (priv->hash_filter)
+               altera_tse_netdev_ops.ndo_set_rx_mode =
+                       tse_set_rx_mode_hashfilter;
++      pax_close_kernel();
+       /* Scatter/gather IO is not supported,
+        * so it is turned off
+diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
+index dcf2a1f..ec0c782 100644
+--- a/drivers/net/ethernet/amd/7990.c
++++ b/drivers/net/ethernet/amd/7990.c
+@@ -535,7 +535,7 @@ void lance_tx_timeout(struct net_device *dev)
+ }
+ EXPORT_SYMBOL_GPL(lance_tx_timeout);
+-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
++netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct lance_private *lp = netdev_priv(dev);
+       volatile struct lance_init_block *ib = lp->init_block;
+diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
+index e9e0be3..1b8e3af 100644
+--- a/drivers/net/ethernet/amd/7990.h
++++ b/drivers/net/ethernet/amd/7990.h
+@@ -240,7 +240,7 @@ struct lance_private {
+ /* Now the prototypes we export */
+ int lance_open(struct net_device *dev);
+ int lance_close(struct net_device *dev);
+-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
++netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ void lance_set_multicast(struct net_device *dev);
+ void lance_tx_timeout(struct net_device *dev);
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
+index 9496005..1fb7ac2 100644
+--- a/drivers/net/ethernet/amd/amd8111e.c
++++ b/drivers/net/ethernet/amd/amd8111e.c
+@@ -1690,8 +1690,9 @@ static int amd8111e_resume(struct pci_dev *pci_dev)
+       return 0;
+ }
+-static void amd8111e_config_ipg(struct net_device *dev)
++static void amd8111e_config_ipg(unsigned long _dev)
+ {
++      struct net_device *dev = (struct net_device *)_dev;
+       struct amd8111e_priv *lp = netdev_priv(dev);
+       struct ipg_info *ipg_data = &lp->ipg_data;
+       void __iomem *mmio = lp->mmio;
+@@ -1904,7 +1905,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
+       if(lp->options & OPTION_DYN_IPG_ENABLE){
+               init_timer(&lp->ipg_data.ipg_timer);
+               lp->ipg_data.ipg_timer.data = (unsigned long) dev;
+-              lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
++              lp->ipg_data.ipg_timer.function = &amd8111e_config_ipg;
+               lp->ipg_data.ipg_timer.expires = jiffies +
+                                                IPG_CONVERGE_JIFFIES;
+               lp->ipg_data.ipg = DEFAULT_IPG;
+diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
+index d2bc8e5..2285a75 100644
+--- a/drivers/net/ethernet/amd/atarilance.c
++++ b/drivers/net/ethernet/amd/atarilance.c
+@@ -339,7 +339,7 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
+                                    *init_rec );
+ static int lance_open( struct net_device *dev );
+ static void lance_init_ring( struct net_device *dev );
+-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
++static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+ static irqreturn_t lance_interrupt( int irq, void *dev_id );
+ static int lance_rx( struct net_device *dev );
+ static int lance_close( struct net_device *dev );
+@@ -770,7 +770,7 @@ static void lance_tx_timeout (struct net_device *dev)
+ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
++static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+ {
+       struct lance_private *lp = netdev_priv(dev);
+       struct lance_ioreg       *IO = lp->iobase;
+diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
+index b799c7a..58bd5b3 100644
+--- a/drivers/net/ethernet/amd/declance.c
++++ b/drivers/net/ethernet/amd/declance.c
+@@ -893,7 +893,7 @@ static void lance_tx_timeout(struct net_device *dev)
+       netif_wake_queue(dev);
+ }
+-static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct lance_private *lp = netdev_priv(dev);
+       volatile struct lance_regs *ll = lp->ll;
+diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
+index c22bf52..a83f5f6 100644
+--- a/drivers/net/ethernet/amd/pcnet32.c
++++ b/drivers/net/ethernet/amd/pcnet32.c
+@@ -318,7 +318,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *);
+ static void pcnet32_load_multicast(struct net_device *dev);
+ static void pcnet32_set_multicast_list(struct net_device *);
+ static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+-static void pcnet32_watchdog(struct net_device *);
++static void pcnet32_watchdog(unsigned long);
+ static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+ static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
+                      int val);
+@@ -1915,7 +1915,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+       init_timer(&lp->watchdog_timer);
+       lp->watchdog_timer.data = (unsigned long)dev;
+-      lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
++      lp->watchdog_timer.function = &pcnet32_watchdog;
+       /* The PCNET32-specific entries in the device structure. */
+       dev->netdev_ops = &pcnet32_netdev_ops;
+@@ -2837,8 +2837,9 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
+  * Could possibly be changed to use mii_check_media instead.
+  */
+-static void pcnet32_watchdog(struct net_device *dev)
++static void pcnet32_watchdog(unsigned long _dev)
+ {
++      struct net_device *dev = (struct net_device *)_dev;
+       struct pcnet32_private *lp = netdev_priv(dev);
+       unsigned long flags;
+diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
+index 3d8c6b2..35160ad 100644
+--- a/drivers/net/ethernet/amd/sun3lance.c
++++ b/drivers/net/ethernet/amd/sun3lance.c
+@@ -235,7 +235,7 @@ struct lance_private {
+ static int lance_probe( struct net_device *dev);
+ static int lance_open( struct net_device *dev );
+ static void lance_init_ring( struct net_device *dev );
+-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
++static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+ static irqreturn_t lance_interrupt( int irq, void *dev_id);
+ static int lance_rx( struct net_device *dev );
+ static int lance_close( struct net_device *dev );
+@@ -511,7 +511,7 @@ static void lance_init_ring( struct net_device *dev )
+ }
+-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
++static netdev_tx_t lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+ {
+       struct lance_private *lp = netdev_priv(dev);
+       int entry, len;
+diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
+index 9b56b40..f183a4d 100644
+--- a/drivers/net/ethernet/amd/sunlance.c
++++ b/drivers/net/ethernet/amd/sunlance.c
+@@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev)
+       netif_wake_queue(dev);
+ }
+-static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct lance_private *lp = netdev_priv(dev);
+       int entry, skblen, len;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index bbef959..999ab1d 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -1283,14 +1283,14 @@ do {                                                                   \
+  * operations, everything works on mask values.
+  */
+ #define XMDIO_READ(_pdata, _mmd, _reg)                                        \
+-      ((_pdata)->hw_if.read_mmd_regs((_pdata), 0,                     \
++      ((_pdata)->hw_if->read_mmd_regs((_pdata), 0,                    \
+               MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+ #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask)                    \
+       (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+ #define XMDIO_WRITE(_pdata, _mmd, _reg, _val)                         \
+-      ((_pdata)->hw_if.write_mmd_regs((_pdata), 0,                    \
++      ((_pdata)->hw_if->write_mmd_regs((_pdata), 0,                   \
+               MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+ #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val)             \
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+index 895d356..b1c866e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+@@ -202,7 +202,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
+       pdata->num_tcs = max_tc + 1;
+       memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+-      pdata->hw_if.config_dcb_tc(pdata);
++      pdata->hw_if->config_dcb_tc(pdata);
+       return 0;
+ }
+@@ -249,7 +249,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
+       memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+-      pdata->hw_if.config_dcb_pfc(pdata);
++      pdata->hw_if->config_dcb_pfc(pdata);
+       return 0;
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index b3bc87f..5bdfdd3 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -353,7 +353,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_channel *channel;
+       struct xgbe_ring *ring;
+       struct xgbe_ring_data *rdata;
+@@ -394,7 +394,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_channel *channel;
+       struct xgbe_ring *ring;
+       struct xgbe_ring_desc *rdesc;
+@@ -628,17 +628,12 @@ err_out:
+       return 0;
+ }
+-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+-{
+-      DBGPR("-->xgbe_init_function_ptrs_desc\n");
+-
+-      desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+-      desc_if->free_ring_resources = xgbe_free_ring_resources;
+-      desc_if->map_tx_skb = xgbe_map_tx_skb;
+-      desc_if->map_rx_buffer = xgbe_map_rx_buffer;
+-      desc_if->unmap_rdata = xgbe_unmap_rdata;
+-      desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+-      desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+-
+-      DBGPR("<--xgbe_init_function_ptrs_desc\n");
+-}
++const struct xgbe_desc_if default_xgbe_desc_if = {
++      .alloc_ring_resources = xgbe_alloc_ring_resources,
++      .free_ring_resources = xgbe_free_ring_resources,
++      .map_tx_skb = xgbe_map_tx_skb,
++      .map_rx_buffer = xgbe_map_rx_buffer,
++      .unmap_rdata = xgbe_unmap_rdata,
++      .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
++      .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
++};
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 1babcc1..aa7f8f4e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -2816,7 +2816,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+ static int xgbe_init(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       int ret;
+       DBGPR("-->xgbe_init\n");
+@@ -2882,107 +2882,102 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+       return 0;
+ }
+-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+-{
+-      DBGPR("-->xgbe_init_function_ptrs\n");
+-
+-      hw_if->tx_complete = xgbe_tx_complete;
+-
+-      hw_if->set_mac_address = xgbe_set_mac_address;
+-      hw_if->config_rx_mode = xgbe_config_rx_mode;
+-
+-      hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+-      hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+-
+-      hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
+-      hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+-      hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
+-      hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
+-      hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
+-
+-      hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+-      hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+-
+-      hw_if->set_gmii_speed = xgbe_set_gmii_speed;
+-      hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
+-      hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+-
+-      hw_if->enable_tx = xgbe_enable_tx;
+-      hw_if->disable_tx = xgbe_disable_tx;
+-      hw_if->enable_rx = xgbe_enable_rx;
+-      hw_if->disable_rx = xgbe_disable_rx;
+-
+-      hw_if->powerup_tx = xgbe_powerup_tx;
+-      hw_if->powerdown_tx = xgbe_powerdown_tx;
+-      hw_if->powerup_rx = xgbe_powerup_rx;
+-      hw_if->powerdown_rx = xgbe_powerdown_rx;
+-
+-      hw_if->dev_xmit = xgbe_dev_xmit;
+-      hw_if->dev_read = xgbe_dev_read;
+-      hw_if->enable_int = xgbe_enable_int;
+-      hw_if->disable_int = xgbe_disable_int;
+-      hw_if->init = xgbe_init;
+-      hw_if->exit = xgbe_exit;
++const struct xgbe_hw_if default_xgbe_hw_if = {
++      .tx_complete = xgbe_tx_complete,
++
++      .set_mac_address = xgbe_set_mac_address,
++      .config_rx_mode = xgbe_config_rx_mode,
++
++      .enable_rx_csum = xgbe_enable_rx_csum,
++      .disable_rx_csum = xgbe_disable_rx_csum,
++
++      .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
++      .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
++      .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
++      .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
++      .update_vlan_hash_table = xgbe_update_vlan_hash_table,
++
++      .read_mmd_regs = xgbe_read_mmd_regs,
++      .write_mmd_regs = xgbe_write_mmd_regs,
++
++      .set_gmii_speed = xgbe_set_gmii_speed,
++      .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
++      .set_xgmii_speed = xgbe_set_xgmii_speed,
++
++      .enable_tx = xgbe_enable_tx,
++      .disable_tx = xgbe_disable_tx,
++      .enable_rx = xgbe_enable_rx,
++      .disable_rx = xgbe_disable_rx,
++
++      .powerup_tx = xgbe_powerup_tx,
++      .powerdown_tx = xgbe_powerdown_tx,
++      .powerup_rx = xgbe_powerup_rx,
++      .powerdown_rx = xgbe_powerdown_rx,
++
++      .dev_xmit = xgbe_dev_xmit,
++      .dev_read = xgbe_dev_read,
++      .enable_int = xgbe_enable_int,
++      .disable_int = xgbe_disable_int,
++      .init = xgbe_init,
++      .exit = xgbe_exit,
+       /* Descriptor related Sequences have to be initialized here */
+-      hw_if->tx_desc_init = xgbe_tx_desc_init;
+-      hw_if->rx_desc_init = xgbe_rx_desc_init;
+-      hw_if->tx_desc_reset = xgbe_tx_desc_reset;
+-      hw_if->rx_desc_reset = xgbe_rx_desc_reset;
+-      hw_if->is_last_desc = xgbe_is_last_desc;
+-      hw_if->is_context_desc = xgbe_is_context_desc;
+-      hw_if->tx_start_xmit = xgbe_tx_start_xmit;
++      .tx_desc_init = xgbe_tx_desc_init,
++      .rx_desc_init = xgbe_rx_desc_init,
++      .tx_desc_reset = xgbe_tx_desc_reset,
++      .rx_desc_reset = xgbe_rx_desc_reset,
++      .is_last_desc = xgbe_is_last_desc,
++      .is_context_desc = xgbe_is_context_desc,
++      .tx_start_xmit = xgbe_tx_start_xmit,
+       /* For FLOW ctrl */
+-      hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
+-      hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
++      .config_tx_flow_control = xgbe_config_tx_flow_control,
++      .config_rx_flow_control = xgbe_config_rx_flow_control,
+       /* For RX coalescing */
+-      hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
+-      hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
+-      hw_if->usec_to_riwt = xgbe_usec_to_riwt;
+-      hw_if->riwt_to_usec = xgbe_riwt_to_usec;
++      .config_rx_coalesce = xgbe_config_rx_coalesce,
++      .config_tx_coalesce = xgbe_config_tx_coalesce,
++      .usec_to_riwt = xgbe_usec_to_riwt,
++      .riwt_to_usec = xgbe_riwt_to_usec,
+       /* For RX and TX threshold config */
+-      hw_if->config_rx_threshold = xgbe_config_rx_threshold;
+-      hw_if->config_tx_threshold = xgbe_config_tx_threshold;
++      .config_rx_threshold = xgbe_config_rx_threshold,
++      .config_tx_threshold = xgbe_config_tx_threshold,
+       /* For RX and TX Store and Forward Mode config */
+-      hw_if->config_rsf_mode = xgbe_config_rsf_mode;
+-      hw_if->config_tsf_mode = xgbe_config_tsf_mode;
++      .config_rsf_mode = xgbe_config_rsf_mode,
++      .config_tsf_mode = xgbe_config_tsf_mode,
+       /* For TX DMA Operating on Second Frame config */
+-      hw_if->config_osp_mode = xgbe_config_osp_mode;
++      .config_osp_mode = xgbe_config_osp_mode,
+       /* For RX and TX PBL config */
+-      hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
+-      hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
+-      hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
+-      hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
+-      hw_if->config_pblx8 = xgbe_config_pblx8;
++      .config_rx_pbl_val = xgbe_config_rx_pbl_val,
++      .get_rx_pbl_val = xgbe_get_rx_pbl_val,
++      .config_tx_pbl_val = xgbe_config_tx_pbl_val,
++      .get_tx_pbl_val = xgbe_get_tx_pbl_val,
++      .config_pblx8 = xgbe_config_pblx8,
+       /* For MMC statistics support */
+-      hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+-      hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+-      hw_if->read_mmc_stats = xgbe_read_mmc_stats;
++      .tx_mmc_int = xgbe_tx_mmc_int,
++      .rx_mmc_int = xgbe_rx_mmc_int,
++      .read_mmc_stats = xgbe_read_mmc_stats,
+       /* For PTP config */
+-      hw_if->config_tstamp = xgbe_config_tstamp;
+-      hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
+-      hw_if->set_tstamp_time = xgbe_set_tstamp_time;
+-      hw_if->get_tstamp_time = xgbe_get_tstamp_time;
+-      hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
++      .config_tstamp = xgbe_config_tstamp,
++      .update_tstamp_addend = xgbe_update_tstamp_addend,
++      .set_tstamp_time = xgbe_set_tstamp_time,
++      .get_tstamp_time = xgbe_get_tstamp_time,
++      .get_tx_tstamp = xgbe_get_tx_tstamp,
+       /* For Data Center Bridging config */
+-      hw_if->config_tc = xgbe_config_tc;
+-      hw_if->config_dcb_tc = xgbe_config_dcb_tc;
+-      hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
++      .config_tc = xgbe_config_tc,
++      .config_dcb_tc = xgbe_config_dcb_tc,
++      .config_dcb_pfc = xgbe_config_dcb_pfc,
+       /* For Receive Side Scaling */
+-      hw_if->enable_rss = xgbe_enable_rss;
+-      hw_if->disable_rss = xgbe_disable_rss;
+-      hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+-      hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+-
+-      DBGPR("<--xgbe_init_function_ptrs\n");
+-}
++      .enable_rss = xgbe_enable_rss,
++      .disable_rss = xgbe_disable_rss,
++      .set_rss_hash_key = xgbe_set_rss_hash_key,
++      .set_rss_lookup_table = xgbe_set_rss_lookup_table,
++};
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index a9b2709..8cf92f1 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -245,7 +245,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
+                * support, tell it now
+                */
+               if (ring->tx.xmit_more)
+-                      pdata->hw_if.tx_start_xmit(channel, ring);
++                      pdata->hw_if->tx_start_xmit(channel, ring);
+               return NETDEV_TX_BUSY;
+       }
+@@ -273,7 +273,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_channel *channel;
+       enum xgbe_int int_id;
+       unsigned int i;
+@@ -295,7 +295,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_channel *channel;
+       enum xgbe_int int_id;
+       unsigned int i;
+@@ -318,7 +318,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+ static irqreturn_t xgbe_isr(int irq, void *data)
+ {
+       struct xgbe_prv_data *pdata = data;
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_channel *channel;
+       unsigned int dma_isr, dma_ch_isr;
+       unsigned int mac_isr, mac_tssr;
+@@ -447,7 +447,7 @@ static void xgbe_service(struct work_struct *work)
+                                                  struct xgbe_prv_data,
+                                                  service_work);
+-      pdata->phy_if.phy_status(pdata);
++      pdata->phy_if->phy_status(pdata);
+ }
+ static void xgbe_service_timer(unsigned long data)
+@@ -706,7 +706,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       DBGPR("-->xgbe_init_tx_coalesce\n");
+@@ -720,7 +720,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       DBGPR("-->xgbe_init_rx_coalesce\n");
+@@ -735,7 +735,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       struct xgbe_channel *channel;
+       struct xgbe_ring *ring;
+       struct xgbe_ring_data *rdata;
+@@ -760,7 +760,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
+ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       struct xgbe_channel *channel;
+       struct xgbe_ring *ring;
+       struct xgbe_ring_data *rdata;
+@@ -788,13 +788,13 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+       pdata->phy_link = -1;
+       pdata->phy_speed = SPEED_UNKNOWN;
+-      return pdata->phy_if.phy_reset(pdata);
++      return pdata->phy_if->phy_reset(pdata);
+ }
+ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       unsigned long flags;
+       DBGPR("-->xgbe_powerdown\n");
+@@ -833,7 +833,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       unsigned long flags;
+       DBGPR("-->xgbe_powerup\n");
+@@ -870,8 +870,8 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ static int xgbe_start(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
+-      struct xgbe_phy_if *phy_if = &pdata->phy_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
++      struct xgbe_phy_if *phy_if = pdata->phy_if;
+       struct net_device *netdev = pdata->netdev;
+       int ret;
+@@ -914,8 +914,8 @@ err_phy:
+ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ {
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
+-      struct xgbe_phy_if *phy_if = &pdata->phy_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
++      struct xgbe_phy_if *phy_if = pdata->phy_if;
+       struct xgbe_channel *channel;
+       struct net_device *netdev = pdata->netdev;
+       struct netdev_queue *txq;
+@@ -1143,7 +1143,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+               return -ERANGE;
+       }
+-      pdata->hw_if.config_tstamp(pdata, mac_tscr);
++      pdata->hw_if->config_tstamp(pdata, mac_tscr);
+       memcpy(&pdata->tstamp_config, &config, sizeof(config));
+@@ -1292,7 +1292,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ static int xgbe_open(struct net_device *netdev)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       int ret;
+       DBGPR("-->xgbe_open\n");
+@@ -1364,7 +1364,7 @@ err_sysclk:
+ static int xgbe_close(struct net_device *netdev)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       DBGPR("-->xgbe_close\n");
+@@ -1388,11 +1388,11 @@ static int xgbe_close(struct net_device *netdev)
+       return 0;
+ }
+-static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       struct xgbe_channel *channel;
+       struct xgbe_ring *ring;
+       struct xgbe_packet_data *packet;
+@@ -1461,7 +1461,7 @@ tx_netdev_return:
+ static void xgbe_set_rx_mode(struct net_device *netdev)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       DBGPR("-->xgbe_set_rx_mode\n");
+@@ -1473,7 +1473,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
+ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct sockaddr *saddr = addr;
+       DBGPR("-->xgbe_set_mac_address\n");
+@@ -1548,7 +1548,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+       DBGPR("-->%s\n", __func__);
+-      pdata->hw_if.read_mmc_stats(pdata);
++      pdata->hw_if->read_mmc_stats(pdata);
+       s->rx_packets = pstats->rxframecount_gb;
+       s->rx_bytes = pstats->rxoctetcount_gb;
+@@ -1575,7 +1575,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+                               u16 vid)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       DBGPR("-->%s\n", __func__);
+@@ -1591,7 +1591,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+                                u16 vid)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       DBGPR("-->%s\n", __func__);
+@@ -1641,7 +1641,7 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+               return -EINVAL;
+       pdata->num_tcs = tc;
+-      pdata->hw_if.config_tc(pdata);
++      pdata->hw_if->config_tc(pdata);
+       return 0;
+ }
+@@ -1650,7 +1650,7 @@ static int xgbe_set_features(struct net_device *netdev,
+                            netdev_features_t features)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+       int ret = 0;
+@@ -1716,8 +1716,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
+ static void xgbe_rx_refresh(struct xgbe_channel *channel)
+ {
+       struct xgbe_prv_data *pdata = channel->pdata;
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       struct xgbe_ring *ring = channel->rx_ring;
+       struct xgbe_ring_data *rdata;
+@@ -1794,8 +1794,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ static int xgbe_tx_poll(struct xgbe_channel *channel)
+ {
+       struct xgbe_prv_data *pdata = channel->pdata;
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
+-      struct xgbe_desc_if *desc_if = &pdata->desc_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
++      struct xgbe_desc_if *desc_if = pdata->desc_if;
+       struct xgbe_ring *ring = channel->tx_ring;
+       struct xgbe_ring_data *rdata;
+       struct xgbe_ring_desc *rdesc;
+@@ -1865,7 +1865,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
+ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+ {
+       struct xgbe_prv_data *pdata = channel->pdata;
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_ring *ring = channel->rx_ring;
+       struct xgbe_ring_data *rdata;
+       struct xgbe_packet_data *packet;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 11d9f0c..78767ab 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -206,7 +206,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
+       u8 *stat;
+       int i;
+-      pdata->hw_if.read_mmc_stats(pdata);
++      pdata->hw_if->read_mmc_stats(pdata);
+       for (i = 0; i < XGBE_STATS_COUNT; i++) {
+               stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+               *data++ = *(u64 *)stat;
+@@ -267,7 +267,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
+               pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+       if (netif_running(netdev))
+-              ret = pdata->phy_if.phy_config_aneg(pdata);
++              ret = pdata->phy_if->phy_config_aneg(pdata);
+       return ret;
+ }
+@@ -368,7 +368,7 @@ static int xgbe_set_settings(struct net_device *netdev,
+               pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+       if (netif_running(netdev))
+-              ret = pdata->phy_if.phy_config_aneg(pdata);
++              ret = pdata->phy_if->phy_config_aneg(pdata);
+       return ret;
+ }
+@@ -422,7 +422,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
+                            struct ethtool_coalesce *ec)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       unsigned int rx_frames, rx_riwt, rx_usecs;
+       unsigned int tx_frames;
+@@ -545,7 +545,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+ {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-      struct xgbe_hw_if *hw_if = &pdata->hw_if;
++      struct xgbe_hw_if *hw_if = pdata->hw_if;
+       unsigned int ret;
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 3eee320..4188681 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -202,13 +202,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
+       DBGPR("<--xgbe_default_config\n");
+ }
+-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+-{
+-      xgbe_init_function_ptrs_dev(&pdata->hw_if);
+-      xgbe_init_function_ptrs_phy(&pdata->phy_if);
+-      xgbe_init_function_ptrs_desc(&pdata->desc_if);
+-}
+-
+ #ifdef CONFIG_ACPI
+ static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+ {
+@@ -647,10 +640,12 @@ static int xgbe_probe(struct platform_device *pdev)
+       memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+       /* Set all the function pointers */
+-      xgbe_init_all_fptrs(pdata);
++      pdata->hw_if = &default_xgbe_hw_if;
++      pdata->phy_if = &default_xgbe_phy_if;
++      pdata->desc_if = &default_xgbe_desc_if;
+       /* Issue software reset to device */
+-      pdata->hw_if.exit(pdata);
++      pdata->hw_if->exit(pdata);
+       /* Populate the hardware features */
+       xgbe_get_all_hw_features(pdata);
+@@ -704,7 +699,7 @@ static int xgbe_probe(struct platform_device *pdev)
+       XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+       /* Call MDIO/PHY initialization routine */
+-      pdata->phy_if.phy_init(pdata);
++      pdata->phy_if->phy_init(pdata);
+       /* Set device operations */
+       netdev->netdev_ops = xgbe_get_netdev_ops();
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 84c5d29..697b4f2 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -202,7 +202,7 @@ static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+       xgbe_an_enable_kr_training(pdata);
+       /* Set MAC to 10G speed */
+-      pdata->hw_if.set_xgmii_speed(pdata);
++      pdata->hw_if->set_xgmii_speed(pdata);
+       /* Set PCS to KR/10G speed */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+@@ -250,7 +250,7 @@ static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+       xgbe_an_disable_kr_training(pdata);
+       /* Set MAC to 2.5G speed */
+-      pdata->hw_if.set_gmii_2500_speed(pdata);
++      pdata->hw_if->set_gmii_2500_speed(pdata);
+       /* Set PCS to KX/1G speed */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+@@ -298,7 +298,7 @@ static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+       xgbe_an_disable_kr_training(pdata);
+       /* Set MAC to 1G speed */
+-      pdata->hw_if.set_gmii_speed(pdata);
++      pdata->hw_if->set_gmii_speed(pdata);
+       /* Set PCS to KX/1G speed */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+@@ -877,13 +877,13 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+               if (pdata->tx_pause != pdata->phy.tx_pause) {
+                       new_state = 1;
+-                      pdata->hw_if.config_tx_flow_control(pdata);
++                      pdata->hw_if->config_tx_flow_control(pdata);
+                       pdata->tx_pause = pdata->phy.tx_pause;
+               }
+               if (pdata->rx_pause != pdata->phy.rx_pause) {
+                       new_state = 1;
+-                      pdata->hw_if.config_rx_flow_control(pdata);
++                      pdata->hw_if->config_rx_flow_control(pdata);
+                       pdata->rx_pause = pdata->phy.rx_pause;
+               }
+@@ -1348,14 +1348,13 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+               xgbe_dump_phy_registers(pdata);
+ }
+-void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+-{
+-      phy_if->phy_init        = xgbe_phy_init;
++const struct xgbe_phy_if default_xgbe_phy_if = {
++      .phy_init        = xgbe_phy_init,
+-      phy_if->phy_reset       = xgbe_phy_reset;
+-      phy_if->phy_start       = xgbe_phy_start;
+-      phy_if->phy_stop        = xgbe_phy_stop;
++      .phy_reset       = xgbe_phy_reset,
++      .phy_start       = xgbe_phy_start,
++      .phy_stop        = xgbe_phy_stop,
+-      phy_if->phy_status      = xgbe_phy_status;
+-      phy_if->phy_config_aneg = xgbe_phy_config_aneg;
+-}
++      .phy_status      = xgbe_phy_status,
++      .phy_config_aneg = xgbe_phy_config_aneg,
++};
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+index b03e4f5..78e4cc4 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
+                                                  tstamp_cc);
+       u64 nsec;
+-      nsec = pdata->hw_if.get_tstamp_time(pdata);
++      nsec = pdata->hw_if->get_tstamp_time(pdata);
+       return nsec;
+ }
+@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+       spin_lock_irqsave(&pdata->tstamp_lock, flags);
+-      pdata->hw_if.update_tstamp_addend(pdata, addend);
++      pdata->hw_if->update_tstamp_addend(pdata, addend);
+       spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 98d9d63..3825a58 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -786,9 +786,9 @@ struct xgbe_prv_data {
+       int dev_irq;
+       unsigned int per_channel_irq;
+-      struct xgbe_hw_if hw_if;
+-      struct xgbe_phy_if phy_if;
+-      struct xgbe_desc_if desc_if;
++      struct xgbe_hw_if *hw_if;
++      struct xgbe_phy_if *phy_if;
++      struct xgbe_desc_if *desc_if;
+       /* AXI DMA settings */
+       unsigned int coherent;
+@@ -951,6 +951,10 @@ struct xgbe_prv_data {
+ #endif
+ };
++extern const struct xgbe_hw_if default_xgbe_hw_if;
++extern const struct xgbe_phy_if default_xgbe_phy_if;
++extern const struct xgbe_desc_if default_xgbe_desc_if;
++
+ /* Function prototypes*/
+ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+index d1d6b5e..19d6062 100644
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+@@ -111,7 +111,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
+       }
+ }
+-static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
++static irqreturn_t xgene_enet_rx_irq(int irq, void *data)
+ {
+       struct xgene_enet_desc_ring *rx_ring = data;
+@@ -577,7 +577,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
+       return processed;
+ }
+-static int xgene_enet_napi(struct napi_struct *napi, const int budget)
++static int xgene_enet_napi(struct napi_struct *napi, int budget)
+ {
+       struct xgene_enet_desc_ring *ring;
+       int processed;
+diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
+index b0da969..1688f6b 100644
+--- a/drivers/net/ethernet/arc/emac_main.c
++++ b/drivers/net/ethernet/arc/emac_main.c
+@@ -608,7 +608,7 @@ static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
+  *
+  * This function is invoked from upper layers to initiate transmission.
+  */
+-static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       unsigned int len, *txbd_curr = &priv->txbd_curr;
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index 4eb17da..5262e50 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1462,7 +1462,7 @@ static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+ static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+-                                             pci_channel_state_t state)
++                                             enum pci_channel_state state)
+ {
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index a3200ea..d02b523 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -2704,7 +2704,7 @@ static void atl1c_remove(struct pci_dev *pdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct atl1c_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 974713b..5e0112b 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -2475,7 +2475,7 @@ static void atl1e_remove(struct pci_dev *pdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t
+-atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++atl1e_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct atl1e_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
+index b047fd6..d115fcb 100644
+--- a/drivers/net/ethernet/aurora/nb8800.c
++++ b/drivers/net/ethernet/aurora/nb8800.c
+@@ -396,7 +396,7 @@ static void nb8800_tx_dma_start_irq(struct net_device *dev)
+       spin_unlock(&priv->tx_lock);
+ }
+-static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_tx_desc *txd;
+diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+index 6c8bc5f..58c4f8c 100644
+--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
++++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+@@ -571,7 +571,7 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
+ /*
+  * tx request callback
+  */
+-static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct bcm_enet_priv *priv;
+       struct bcm_enet_desc *desc;
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 505ceaf..c88cfa9 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -8703,7 +8703,7 @@ static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
+  * this device has been detected.
+  */
+ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
+-                                             pci_channel_state_t state)
++                                             enum pci_channel_state state)
+ {
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnx2 *bp = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+index 0e68fad..3546d87 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+@@ -1124,7 +1124,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+ {
+       /* RX_MODE controlling object */
+-      bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
++      bnx2x_init_rx_mode_obj(bp);
+       /* multicast configuration controlling object */
+       bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+index 1fb8010..0a8dc20 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+@@ -5621,7 +5621,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
+       return 0;
+ }
+-static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
++static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+                                     struct link_params *params,
+                                     struct link_vars *vars)
+ {
+@@ -5695,7 +5695,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
+       return rc;
+ }
+-static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
++static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+                                    struct link_params *params,
+                                    struct link_vars *vars)
+ {
+@@ -7436,7 +7436,7 @@ static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
+       }
+ }
+-static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
++static void bnx2x_8073_config_init(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+ {
+@@ -7499,7 +7499,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+       if (params->loopback_mode == LOOPBACK_EXT) {
+               bnx2x_807x_force_10G(bp, phy);
+               DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+-              return 0;
++              return;
+       } else {
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+@@ -7581,7 +7581,6 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+       DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+                  ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+-      return 0;
+ }
+ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+@@ -7748,7 +7747,7 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+ /******************************************************************/
+ /*                    BCM8705 PHY SECTION                       */
+ /******************************************************************/
+-static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
++static void bnx2x_8705_config_init(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+ {
+@@ -7772,7 +7771,6 @@ static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
+                        MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+       /* BCM8705 doesn't have microcode, hence the 0 */
+       bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+-      return 0;
+ }
+ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+@@ -8959,7 +8957,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+ /******************************************************************/
+ /*                    BCM8706 PHY SECTION                       */
+ /******************************************************************/
+-static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
++static void bnx2x_8706_config_init(struct bnx2x_phy *phy,
+                                struct link_params *params,
+                                struct link_vars *vars)
+ {
+@@ -9061,11 +9059,9 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
+               bnx2x_cl45_write(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+       }
+-
+-      return 0;
+ }
+-static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
++static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+ {
+@@ -9142,7 +9138,7 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+ }
+-static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
++static void bnx2x_8726_config_init(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+ {
+@@ -9223,8 +9219,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
+                                phy->tx_preemphasis[1]);
+       }
+-      return 0;
+-
+ }
+ static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+@@ -9360,7 +9354,7 @@ static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+       }
+ }
+-static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
++static void bnx2x_8727_config_init(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+ {
+@@ -9442,8 +9436,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+                                (tmp2 & 0x7fff));
+       }
+-
+-      return 0;
+ }
+ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+@@ -10018,7 +10010,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+       return 0;
+ }
+-static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
++static void bnx2x_8481_config_init(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+ {
+@@ -10032,7 +10024,7 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
+       bnx2x_wait_reset_complete(bp, phy, params);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+-      return bnx2x_848xx_cmn_config_init(phy, params, vars);
++      bnx2x_848xx_cmn_config_init(phy, params, vars);
+ }
+ #define PHY848xx_CMDHDLR_WAIT 300
+@@ -10282,7 +10274,7 @@ static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
+       return reset_gpios;
+ }
+-static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
++static void bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+                               struct link_params *params)
+ {
+       struct bnx2x *bp = params->bp;
+@@ -10311,8 +10303,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+       udelay(10);
+       DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
+               reset_gpios);
+-
+-      return 0;
+ }
+ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+@@ -10355,7 +10345,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+ }
+ #define PHY84833_CONSTANT_LATENCY 1193
+-static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
++static void bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+                                  struct link_params *params,
+                                  struct link_vars *vars)
+ {
+@@ -10502,7 +10492,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+               if (rc) {
+                       DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+                       bnx2x_8483x_disable_eee(phy, params, vars);
+-                      return rc;
++                      return;
+               }
+               if ((phy->req_duplex == DUPLEX_FULL) &&
+@@ -10514,7 +10504,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+                       rc = bnx2x_8483x_disable_eee(phy, params, vars);
+               if (rc) {
+                       DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
+-                      return rc;
++                      return;
+               }
+       } else {
+               vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+@@ -10553,7 +10543,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+                                         MDIO_84833_TOP_CFG_XGPHY_STRAP1,
+                                         (u16)~MDIO_84833_SUPER_ISOLATE);
+       }
+-      return rc;
+ }
+ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+@@ -11113,7 +11102,7 @@ static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
+       }
+ }
+-static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
++static void bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+                                              struct link_params *params,
+                                              struct link_vars *vars)
+ {
+@@ -11315,8 +11304,6 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+       bnx2x_cl22_write(bp, phy,
+                       MDIO_PMA_REG_CTRL, autoneg_val);
+-
+-      return 0;
+ }
+@@ -11540,7 +11527,7 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+                        MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+ }
+-static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
++static void bnx2x_7101_config_init(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+ {
+@@ -11577,7 +11564,6 @@ static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+       bnx2x_save_spirom_version(bp, params->port,
+                                 (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+-      return 0;
+ }
+ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+@@ -11746,9 +11732,9 @@ static const struct bnx2x_phy phy_serdes = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_xgxs_config_init,
+-      .read_status    = (read_status_t)bnx2x_link_settings_status,
+-      .link_reset     = (link_reset_t)bnx2x_int_link_reset,
++      .config_init    = bnx2x_xgxs_config_init,
++      .read_status    = bnx2x_link_settings_status,
++      .link_reset     = bnx2x_int_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+       .format_fw_ver  = (format_fw_ver_t)NULL,
+       .hw_reset       = (hw_reset_t)NULL,
+@@ -11782,14 +11768,14 @@ static const struct bnx2x_phy phy_xgxs = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_xgxs_config_init,
+-      .read_status    = (read_status_t)bnx2x_link_settings_status,
+-      .link_reset     = (link_reset_t)bnx2x_int_link_reset,
+-      .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
++      .config_init    = bnx2x_xgxs_config_init,
++      .read_status    = bnx2x_link_settings_status,
++      .link_reset     = bnx2x_int_link_reset,
++      .config_loopback = bnx2x_set_xgxs_loopback,
+       .format_fw_ver  = (format_fw_ver_t)NULL,
+       .hw_reset       = (hw_reset_t)NULL,
+       .set_link_led   = (set_link_led_t)NULL,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
++      .phy_specific_func = bnx2x_xgxs_specific_func
+ };
+ static const struct bnx2x_phy phy_warpcore = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+@@ -11820,12 +11806,12 @@ static const struct bnx2x_phy phy_warpcore = {
+       .speed_cap_mask = 0,
+       /* req_duplex = */0,
+       /* rsrv = */0,
+-      .config_init    = (config_init_t)bnx2x_warpcore_config_init,
+-      .read_status    = (read_status_t)bnx2x_warpcore_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_warpcore_link_reset,
+-      .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
++      .config_init    = bnx2x_warpcore_config_init,
++      .read_status    = bnx2x_warpcore_read_status,
++      .link_reset     = bnx2x_warpcore_link_reset,
++      .config_loopback = bnx2x_set_warpcore_loopback,
+       .format_fw_ver  = (format_fw_ver_t)NULL,
+-      .hw_reset       = (hw_reset_t)bnx2x_warpcore_hw_reset,
++      .hw_reset       = bnx2x_warpcore_hw_reset,
+       .set_link_led   = (set_link_led_t)NULL,
+       .phy_specific_func = (phy_specific_func_t)NULL
+ };
+@@ -11851,13 +11837,13 @@ static const struct bnx2x_phy phy_7101 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_7101_config_init,
+-      .read_status    = (read_status_t)bnx2x_7101_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_common_ext_link_reset,
+-      .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_7101_format_ver,
+-      .hw_reset       = (hw_reset_t)bnx2x_7101_hw_reset,
+-      .set_link_led   = (set_link_led_t)bnx2x_7101_set_link_led,
++      .config_init    = bnx2x_7101_config_init,
++      .read_status    = bnx2x_7101_read_status,
++      .link_reset     = bnx2x_common_ext_link_reset,
++      .config_loopback = bnx2x_7101_config_loopback,
++      .format_fw_ver  = bnx2x_7101_format_ver,
++      .hw_reset       = bnx2x_7101_hw_reset,
++      .set_link_led   = bnx2x_7101_set_link_led,
+       .phy_specific_func = (phy_specific_func_t)NULL
+ };
+ static const struct bnx2x_phy phy_8073 = {
+@@ -11882,14 +11868,14 @@ static const struct bnx2x_phy phy_8073 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_8073_config_init,
+-      .read_status    = (read_status_t)bnx2x_8073_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_8073_link_reset,
++      .config_init    = bnx2x_8073_config_init,
++      .read_status    = bnx2x_8073_read_status,
++      .link_reset     = bnx2x_8073_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
++      .format_fw_ver  = bnx2x_format_ver,
+       .hw_reset       = (hw_reset_t)NULL,
+       .set_link_led   = (set_link_led_t)NULL,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
++      .phy_specific_func = bnx2x_8073_specific_func
+ };
+ static const struct bnx2x_phy phy_8705 = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+@@ -11910,11 +11896,11 @@ static const struct bnx2x_phy phy_8705 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_8705_config_init,
+-      .read_status    = (read_status_t)bnx2x_8705_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_common_ext_link_reset,
++      .config_init    = bnx2x_8705_config_init,
++      .read_status    = bnx2x_8705_read_status,
++      .link_reset     = bnx2x_common_ext_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_null_format_ver,
++      .format_fw_ver  = bnx2x_null_format_ver,
+       .hw_reset       = (hw_reset_t)NULL,
+       .set_link_led   = (set_link_led_t)NULL,
+       .phy_specific_func = (phy_specific_func_t)NULL
+@@ -11939,11 +11925,11 @@ static const struct bnx2x_phy phy_8706 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_8706_config_init,
+-      .read_status    = (read_status_t)bnx2x_8706_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_common_ext_link_reset,
++      .config_init    = bnx2x_8706_config_init,
++      .read_status    = bnx2x_8706_read_status,
++      .link_reset     = bnx2x_common_ext_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
++      .format_fw_ver  = bnx2x_format_ver,
+       .hw_reset       = (hw_reset_t)NULL,
+       .set_link_led   = (set_link_led_t)NULL,
+       .phy_specific_func = (phy_specific_func_t)NULL
+@@ -11971,11 +11957,11 @@ static const struct bnx2x_phy phy_8726 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_8726_config_init,
+-      .read_status    = (read_status_t)bnx2x_8726_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_8726_link_reset,
+-      .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
++      .config_init    = bnx2x_8726_config_init,
++      .read_status    = bnx2x_8726_read_status,
++      .link_reset     = bnx2x_8726_link_reset,
++      .config_loopback = bnx2x_8726_config_loopback,
++      .format_fw_ver  = bnx2x_format_ver,
+       .hw_reset       = (hw_reset_t)NULL,
+       .set_link_led   = (set_link_led_t)NULL,
+       .phy_specific_func = (phy_specific_func_t)NULL
+@@ -12002,14 +11988,14 @@ static const struct bnx2x_phy phy_8727 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_8727_config_init,
+-      .read_status    = (read_status_t)bnx2x_8727_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_8727_link_reset,
++      .config_init    = bnx2x_8727_config_init,
++      .read_status    = bnx2x_8727_read_status,
++      .link_reset     = bnx2x_8727_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
+-      .hw_reset       = (hw_reset_t)bnx2x_8727_hw_reset,
+-      .set_link_led   = (set_link_led_t)bnx2x_8727_set_link_led,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
++      .format_fw_ver  = bnx2x_format_ver,
++      .hw_reset       = bnx2x_8727_hw_reset,
++      .set_link_led   = bnx2x_8727_set_link_led,
++      .phy_specific_func = bnx2x_8727_specific_func
+ };
+ static const struct bnx2x_phy phy_8481 = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+@@ -12037,13 +12023,13 @@ static const struct bnx2x_phy phy_8481 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_8481_config_init,
+-      .read_status    = (read_status_t)bnx2x_848xx_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_8481_link_reset,
++      .config_init    = bnx2x_8481_config_init,
++      .read_status    = bnx2x_848xx_read_status,
++      .link_reset     = bnx2x_8481_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+-      .hw_reset       = (hw_reset_t)bnx2x_8481_hw_reset,
+-      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
++      .format_fw_ver  = bnx2x_848xx_format_ver,
++      .hw_reset       = bnx2x_8481_hw_reset,
++      .set_link_led   = bnx2x_848xx_set_link_led,
+       .phy_specific_func = (phy_specific_func_t)NULL
+ };
+@@ -12074,14 +12060,14 @@ static const struct bnx2x_phy phy_84823 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_848x3_config_init,
+-      .read_status    = (read_status_t)bnx2x_848xx_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
++      .config_init    = bnx2x_848x3_config_init,
++      .read_status    = bnx2x_848xx_read_status,
++      .link_reset     = bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
++      .format_fw_ver  = bnx2x_848xx_format_ver,
+       .hw_reset       = (hw_reset_t)NULL,
+-      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
++      .set_link_led   = bnx2x_848xx_set_link_led,
++      .phy_specific_func = bnx2x_848xx_specific_func
+ };
+ static const struct bnx2x_phy phy_84833 = {
+@@ -12109,14 +12095,14 @@ static const struct bnx2x_phy phy_84833 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_848x3_config_init,
+-      .read_status    = (read_status_t)bnx2x_848xx_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
++      .config_init    = bnx2x_848x3_config_init,
++      .read_status    = bnx2x_848xx_read_status,
++      .link_reset     = bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+-      .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+-      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
++      .format_fw_ver  = bnx2x_848xx_format_ver,
++      .hw_reset       = bnx2x_84833_hw_reset_phy,
++      .set_link_led   = bnx2x_848xx_set_link_led,
++      .phy_specific_func = bnx2x_848xx_specific_func
+ };
+ static const struct bnx2x_phy phy_84834 = {
+@@ -12143,14 +12129,14 @@ static const struct bnx2x_phy phy_84834 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_848x3_config_init,
+-      .read_status    = (read_status_t)bnx2x_848xx_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
++      .config_init    = bnx2x_848x3_config_init,
++      .read_status    = bnx2x_848xx_read_status,
++      .link_reset     = bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+-      .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+-      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
++      .format_fw_ver  = bnx2x_848xx_format_ver,
++      .hw_reset       = bnx2x_84833_hw_reset_phy,
++      .set_link_led   = bnx2x_848xx_set_link_led,
++      .phy_specific_func = bnx2x_848xx_specific_func
+ };
+ static const struct bnx2x_phy phy_84858 = {
+@@ -12177,14 +12163,14 @@ static const struct bnx2x_phy phy_84858 = {
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+-      .config_init    = (config_init_t)bnx2x_848x3_config_init,
+-      .read_status    = (read_status_t)bnx2x_848xx_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
++      .config_init    = bnx2x_848x3_config_init,
++      .read_status    = bnx2x_848xx_read_status,
++      .link_reset     = bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+-      .format_fw_ver  = (format_fw_ver_t)bnx2x_8485x_format_ver,
+-      .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+-      .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
++      .format_fw_ver  = bnx2x_8485x_format_ver,
++      .hw_reset       = bnx2x_84833_hw_reset_phy,
++      .set_link_led   = bnx2x_848xx_set_link_led,
++      .phy_specific_func = bnx2x_848xx_specific_func
+ };
+ static const struct bnx2x_phy phy_54618se = {
+@@ -12211,14 +12197,14 @@ static const struct bnx2x_phy phy_54618se = {
+       .speed_cap_mask = 0,
+       /* req_duplex = */0,
+       /* rsrv = */0,
+-      .config_init    = (config_init_t)bnx2x_54618se_config_init,
+-      .read_status    = (read_status_t)bnx2x_54618se_read_status,
+-      .link_reset     = (link_reset_t)bnx2x_54618se_link_reset,
+-      .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
++      .config_init    = bnx2x_54618se_config_init,
++      .read_status    = bnx2x_54618se_read_status,
++      .link_reset     = bnx2x_54618se_link_reset,
++      .config_loopback = bnx2x_54618se_config_loopback,
+       .format_fw_ver  = (format_fw_ver_t)NULL,
+       .hw_reset       = (hw_reset_t)NULL,
+-      .set_link_led   = (set_link_led_t)bnx2x_5461x_set_link_led,
+-      .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
++      .set_link_led   = bnx2x_5461x_set_link_led,
++      .phy_specific_func = bnx2x_54618se_specific_func
+ };
+ /*****************************************************************/
+ /*                                                               */
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+index b7d2511..a625bae 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+@@ -126,7 +126,7 @@ struct link_vars;
+ struct link_params;
+ struct bnx2x_phy;
+-typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
++typedef void (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+                           struct link_vars *vars);
+ typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
+                           struct link_vars *vars);
+@@ -134,7 +134,7 @@ typedef void (*link_reset_t)(struct bnx2x_phy *phy,
+                            struct link_params *params);
+ typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
+                                 struct link_params *params);
+-typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
++typedef int (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+ typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
+ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
+                              struct link_params *params, u8 mode);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index fa3386b..ea5074c 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -14159,7 +14159,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+  * this device has been detected.
+  */
+ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnx2x *bp = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+index ff702a7..cb3ae16 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+@@ -2576,15 +2576,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
+       return rc;
+ }
+-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+-                          struct bnx2x_rx_mode_obj *o)
++void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
+ {
+       if (CHIP_IS_E1x(bp)) {
+-              o->wait_comp      = bnx2x_empty_rx_mode_wait;
+-              o->config_rx_mode = bnx2x_set_rx_mode_e1x;
++              bp->rx_mode_obj.wait_comp      = bnx2x_empty_rx_mode_wait;
++              bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
+       } else {
+-              o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+-              o->config_rx_mode = bnx2x_set_rx_mode_e2;
++              bp->rx_mode_obj.wait_comp      = bnx2x_wait_rx_mode_comp_e2;
++              bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
+       }
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+index 4048fc5..333809f 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+@@ -1436,8 +1436,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
+ /********************* RX MODE ****************/
+-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+-                          struct bnx2x_rx_mode_obj *o);
++void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
+ /**
+  * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 228c964..7bbb29da 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6915,7 +6915,7 @@ init_err_free:
+  * this device has been detected.
+  */
+ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+-                                             pci_channel_state_t state)
++                                             enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index ea967df..bf073dc 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -18112,7 +18112,7 @@ static void tg3_shutdown(struct pci_dev *pdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+-                                            pci_channel_state_t state)
++                                            enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct tg3 *tp = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 3b5e98e..52b3916 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -150,6 +150,7 @@
+ #define  CHIPREV_ID_5750_A0            0x4000
+ #define  CHIPREV_ID_5750_A1            0x4001
+ #define  CHIPREV_ID_5750_A3            0x4003
++#define  CHIPREV_ID_5750_C1            0x4201
+ #define  CHIPREV_ID_5750_C2            0x4202
+ #define  CHIPREV_ID_5752_A0_HW                 0x5000
+ #define  CHIPREV_ID_5752_A0            0x6000
+diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
+index 1d11d66..8f7a3cb 100644
+--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
++++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
+@@ -34,10 +34,19 @@ struct bfa_sm_table {
+       int             state;  /*!< state machine encoding     */
+       char            *name;  /*!< state name for display     */
+ };
+-#define BFA_SM(_sm)           ((bfa_sm_t)(_sm))
++#define BFA_SM(_sm)           (_sm)
++
++#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (_state))
++#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (_state))
+ /* State machine with entry actions. */
+-typedef void (*bfa_fsm_t)(void *fsm, int event);
++struct bfa_ioc;
++enum ioc_event;
++struct bfa_iocpf;
++enum iocpf_event;
++
++typedef void (*bfa_fsm_ioc_t)(struct bfa_ioc *fsm, enum ioc_event event);
++typedef void (*bfa_fsm_iocpf_t)(struct bfa_iocpf *fsm, enum iocpf_event event);
+ /* oc - object class eg. bfa_ioc
+  * st - state, eg. reset
+@@ -49,16 +58,37 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
+       static void oc ## _sm_ ## st ## _entry(otype * fsm)
+ #define bfa_fsm_set_state(_fsm, _state) do {                          \
+-      (_fsm)->fsm = (bfa_fsm_t)(_state);                              \
++      (_fsm)->fsm = (_state);                                         \
+       _state ## _entry(_fsm);                                         \
+ } while (0)
+ #define bfa_fsm_send_event(_fsm, _event)      ((_fsm)->fsm((_fsm), (_event)))
+-#define bfa_fsm_cmp_state(_fsm, _state)                                       \
+-      ((_fsm)->fsm == (bfa_fsm_t)(_state))
++#define bfa_fsm_cmp_state(_fsm, _state)               ((_fsm)->fsm == (_state))
++
++/* For converting from state machine function to state encoding. */
++struct iocpf_sm_table {
++      bfa_fsm_iocpf_t sm;     /*!< state machine function     */
++      int             state;  /*!< state machine encoding     */
++      char            *name;  /*!< state name for display     */
++};
++struct ioc_sm_table {
++      bfa_fsm_ioc_t   sm;     /*!< state machine function     */
++      int             state;  /*!< state machine encoding     */
++      char            *name;  /*!< state name for display     */
++};
++
++static inline int
++iocpf_sm_to_state(const struct iocpf_sm_table *smt, bfa_fsm_iocpf_t sm)
++{
++      int     i = 0;
++
++      while (smt[i].sm && smt[i].sm != sm)
++              i++;
++      return smt[i].state;
++}
+ static inline int
+-bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
++ioc_sm_to_state(const struct ioc_sm_table *smt, bfa_fsm_ioc_t sm)
+ {
+       int     i = 0;
+diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+index 9e59663..3564807 100644
+--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
++++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+@@ -122,7 +122,7 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
+-static struct bfa_sm_table ioc_sm_table[] = {
++static struct ioc_sm_table ioc_sm_table[] = {
+       {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
+       {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+       {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
+@@ -191,7 +191,7 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
+                                               enum iocpf_event);
+ bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
+-static struct bfa_sm_table iocpf_sm_table[] = {
++static struct iocpf_sm_table iocpf_sm_table[] = {
+       {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+       {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+       {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+@@ -2862,12 +2862,12 @@ static enum bfa_ioc_state
+ bfa_ioc_get_state(struct bfa_ioc *ioc)
+ {
+       enum bfa_iocpf_state iocpf_st;
+-      enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
++      enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm);
+       if (ioc_st == BFA_IOC_ENABLING ||
+               ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+-              iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
++              iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+               switch (iocpf_st) {
+               case BFA_IOCPF_SEMWAIT:
+@@ -2985,7 +2985,7 @@ bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
+ {
+       enum bfa_iocpf_state iocpf_st;
+-      iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
++      iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+       if (iocpf_st == BFA_IOCPF_HWINIT)
+               bfa_ioc_poll_fwinit(ioc);
+diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+index 2c0b4c0..97873eb 100644
+--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
++++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+@@ -156,7 +156,7 @@ struct bfa_ioc_notify {
+ } while (0)
+ struct bfa_iocpf {
+-      bfa_fsm_t               fsm;
++      bfa_fsm_iocpf_t         fsm;
+       struct bfa_ioc          *ioc;
+       bool                    fw_mismatch_notified;
+       bool                    auto_recover;
+@@ -164,7 +164,7 @@ struct bfa_iocpf {
+ };
+ struct bfa_ioc {
+-      bfa_fsm_t               fsm;
++      bfa_fsm_ioc_t           fsm;
+       struct bfa              *bfa;
+       struct bfa_pcidev       pcidev;
+       struct timer_list       ioc_timer;
+diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
+index 66bc8b5..bf64466 100644
+--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h
++++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
+@@ -63,8 +63,10 @@ enum bfa_msgq_cmdq_flags {
+       BFA_MSGQ_CMDQ_F_DB_UPDATE       = 1,
+ };
++enum cmdq_event;
++
+ struct bfa_msgq_cmdq {
+-      bfa_fsm_t                       fsm;
++      void (*fsm)(struct bfa_msgq_cmdq *, enum cmdq_event);
+       enum bfa_msgq_cmdq_flags flags;
+       u16                     producer_index;
+@@ -89,8 +91,10 @@ enum bfa_msgq_rspq_flags {
+ typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr);
++enum rspq_event;
++
+ struct bfa_msgq_rspq {
+-      bfa_fsm_t                       fsm;
++      void (*fsm)(struct bfa_msgq_rspq *, enum rspq_event);
+       enum bfa_msgq_rspq_flags flags;
+       u16                     producer_index;
+diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
+index 4e5c387..0791dab 100644
+--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
++++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
+@@ -1265,7 +1265,7 @@ bna_enet_mtu_get(struct bna_enet *enet)
+ void
+ bna_enet_enable(struct bna_enet *enet)
+ {
+-      if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
++      if (enet->fsm != bna_enet_sm_stopped)
+               return;
+       enet->flags |= BNA_ENET_F_ENABLED;
+@@ -1676,10 +1676,10 @@ bna_cb_ioceth_reset(void *arg)
+ }
+ static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
+-      bna_cb_ioceth_enable,
+-      bna_cb_ioceth_disable,
+-      bna_cb_ioceth_hbfail,
+-      bna_cb_ioceth_reset
++      .enable_cbfn = bna_cb_ioceth_enable,
++      .disable_cbfn = bna_cb_ioceth_disable,
++      .hbfail_cbfn = bna_cb_ioceth_hbfail,
++      .reset_cbfn = bna_cb_ioceth_reset
+ };
+ static void bna_attr_init(struct bna_ioceth *ioceth)
+@@ -1759,12 +1759,12 @@ bna_ioceth_uninit(struct bna_ioceth *ioceth)
+ void
+ bna_ioceth_enable(struct bna_ioceth *ioceth)
+ {
+-      if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
++      if (ioceth->fsm == bna_ioceth_sm_ready) {
+               bnad_cb_ioceth_ready(ioceth->bna->bnad);
+               return;
+       }
+-      if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
++      if (ioceth->fsm == bna_ioceth_sm_stopped)
+               bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
+ }
+diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+index 95bc470..c12be9f 100644
+--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
++++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+@@ -1964,7 +1964,7 @@ static void
+ bna_rx_stop(struct bna_rx *rx)
+ {
+       rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
+-      if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
++      if (rx->fsm == bna_rx_sm_stopped)
+               bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
+       else {
+               rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
+@@ -2543,7 +2543,7 @@ bna_rx_destroy(struct bna_rx *rx)
+ void
+ bna_rx_enable(struct bna_rx *rx)
+ {
+-      if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
++      if (rx->fsm != bna_rx_sm_stopped)
+               return;
+       rx->rx_flags |= BNA_RX_F_ENABLED;
+@@ -3531,7 +3531,7 @@ bna_tx_destroy(struct bna_tx *tx)
+ void
+ bna_tx_enable(struct bna_tx *tx)
+ {
+-      if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
++      if (tx->fsm != bna_tx_sm_stopped)
+               return;
+       tx->flags |= BNA_TX_F_ENABLED;
+diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
+index c438d03..4653f43 100644
+--- a/drivers/net/ethernet/brocade/bna/bna_types.h
++++ b/drivers/net/ethernet/brocade/bna/bna_types.h
+@@ -320,8 +320,10 @@ struct bna_attr {
+ /* IOCEth */
++enum bna_ioceth_event;
++
+ struct bna_ioceth {
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bna_ioceth *, enum bna_ioceth_event);
+       struct bfa_ioc ioc;
+       struct bna_attr attr;
+@@ -342,8 +344,10 @@ struct bna_pause_config {
+       enum bna_status rx_pause;
+ };
++enum bna_enet_event;
++
+ struct bna_enet {
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bna_enet *, enum bna_enet_event);
+       enum bna_enet_flags flags;
+       enum bna_enet_type type;
+@@ -368,8 +372,10 @@ struct bna_enet {
+ /* Ethport */
++enum bna_ethport_event;
++
+ struct bna_ethport {
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bna_ethport *, enum bna_ethport_event);
+       enum bna_ethport_flags flags;
+       enum bna_link_status link_status;
+@@ -462,13 +468,15 @@ struct bna_txq {
+ };
+ /* Tx object */
++enum bna_tx_event;
++
+ struct bna_tx {
+       /* This should be the first one */
+       struct list_head                        qe;
+       int                     rid;
+       int                     hw_id;
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bna_tx *, enum bna_tx_event);
+       enum bna_tx_flags flags;
+       enum bna_tx_type type;
+@@ -706,8 +714,10 @@ struct bna_rxp {
+ };
+ /* RxF structure (hardware Rx Function) */
++enum bna_rxf_event;
++
+ struct bna_rxf {
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bna_rxf *, enum bna_rxf_event);
+       struct bfa_msgq_cmd_entry msgq_cmd;
+       union {
+@@ -777,13 +787,15 @@ struct bna_rxf {
+ };
+ /* Rx object */
++enum bna_rx_event;
++
+ struct bna_rx {
+       /* This should be the first one */
+       struct list_head                        qe;
+       int                     rid;
+       int                     hw_id;
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bna_rx *, enum bna_rx_event);
+       enum bna_rx_type type;
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index 771cc26..c681a90 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -1118,8 +1118,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
+  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
+  */
+ static void
+-bnad_tx_cleanup(struct delayed_work *work)
++bnad_tx_cleanup(struct work_struct *_work)
+ {
++      struct delayed_work *work = (struct delayed_work *)_work;
+       struct bnad_tx_info *tx_info =
+               container_of(work, struct bnad_tx_info, tx_cleanup_work);
+       struct bnad *bnad = NULL;
+@@ -1197,7 +1198,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
+  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
+  */
+ static void
+-bnad_rx_cleanup(void *work)
++bnad_rx_cleanup(struct work_struct *work)
+ {
+       struct bnad_rx_info *rx_info =
+               container_of(work, struct bnad_rx_info, rx_cleanup_work);
+@@ -2021,8 +2022,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
+       }
+       tx_info->tx = tx;
+-      INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
+-                      (work_func_t)bnad_tx_cleanup);
++      INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
+       /* Register ISR for the Tx object */
+       if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+@@ -2278,8 +2278,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
+       rx_info->rx = rx;
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+-      INIT_WORK(&rx_info->rx_cleanup_work,
+-                      (work_func_t)(bnad_rx_cleanup));
++      INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
+       /*
+        * Init NAPI, so that state is set to NAPI_STATE_SCHED,
+diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
+index d954a97..2a1c33a 100644
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -1341,7 +1341,7 @@ static inline int macb_clear_csum(struct sk_buff *skb)
+       return 0;
+ }
+-static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       u16 queue_index = skb_get_queue_mapping(skb);
+       struct macb *bp = netdev_priv(dev);
+@@ -2612,7 +2612,7 @@ static int at91ether_close(struct net_device *dev)
+ }
+ /* Transmit packet */
+-static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct macb *lp = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
+index 20d6942..30f86d5 100644
+--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
+@@ -468,7 +468,7 @@ static void stop_pci_io(struct octeon_device *oct)
+  * this device has been detected.
+  */
+ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+-                                                   pci_channel_state_t state)
++                                                   enum pci_channel_state state)
+ {
+       struct octeon_device *oct = pci_get_drvdata(pdev);
+@@ -2869,7 +2869,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
+  * @returns whether the packet was transmitted to the device okay or not
+  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
+  */
+-static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct lio *lio;
+       struct octnet_buf_free_info *finfo;
+@@ -3371,7 +3371,7 @@ static void liquidio_del_vxlan_port(struct net_device *netdev,
+                                   OCTNET_CMD_VXLAN_PORT_DEL);
+ }
+-static struct net_device_ops lionetdevops = {
++static net_device_ops_no_const lionetdevops __read_only = {
+       .ndo_open               = liquidio_open,
+       .ndo_stop               = liquidio_stop,
+       .ndo_start_xmit         = liquidio_xmit,
+@@ -3599,8 +3599,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
+               SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
+-              if (num_iqueues > 1)
++              if (num_iqueues > 1) {
++                      pax_open_kernel();
+                       lionetdevops.ndo_select_queue = select_q;
++                      pax_close_kernel();
++              }
+               /* Associate the routines that will handle different
+                * netdev tasks.
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+index 43da891..8fbfb54 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -2997,7 +2997,7 @@ void t3_fatal_err(struct adapter *adapter)
+  * this device has been detected.
+  */
+ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
+-                                           pci_channel_state_t state)
++                                           enum pci_channel_state state)
+ {
+       struct adapter *adapter = pci_get_drvdata(pdev);
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+index 8cffcdf..aadf043 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
++++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+  */
+ struct l2t_skb_cb {
+       arp_failure_handler_func arp_failure_handler;
+-};
++} __no_const;
+ #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 3ceafb55..c62b970 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -4194,7 +4194,7 @@ bye:
+ /* EEH callbacks */
+ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
+-                                       pci_channel_state_t state)
++                                       enum pci_channel_state state)
+ {
+       int i;
+       struct adapter *adap = pci_get_drvdata(pdev);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+index 109bc63..646ff4d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+@@ -558,7 +558,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
+                          unsigned int);
+ void t4vf_free_sge_resources(struct adapter *);
+-int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
++netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *);
+ int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
+                      const struct pkt_gl *);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+index c8fd4f8..af708fc 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+@@ -1159,7 +1159,7 @@ static inline void txq_advance(struct sge_txq *tq, unsigned int n)
+  *
+  *    Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
+  */
+-int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
++netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       u32 wr_mid;
+       u64 cntrl, *end;
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index f45385f..24f6c11e 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -1021,7 +1021,7 @@ static void dm9000_send_packet(struct net_device *dev,
+  *  Hardware start transmission.
+  *  Send a packet to media from the upper layer.
+  */
+-static int
++static netdev_tx_t
+ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       unsigned long flags;
+diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
+index f0e9e2e..442241e 100644
+--- a/drivers/net/ethernet/dec/tulip/de4x5.c
++++ b/drivers/net/ethernet/dec/tulip/de4x5.c
+@@ -912,7 +912,7 @@ static int     de4x5_init(struct net_device *dev);
+ static int     de4x5_sw_reset(struct net_device *dev);
+ static int     de4x5_rx(struct net_device *dev);
+ static int     de4x5_tx(struct net_device *dev);
+-static void    de4x5_ast(struct net_device *dev);
++static void    de4x5_ast(unsigned long _dev);
+ static int     de4x5_txur(struct net_device *dev);
+ static int     de4x5_rx_ovfc(struct net_device *dev);
+@@ -1149,7 +1149,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
+       lp->gendev = gendev;
+       spin_lock_init(&lp->lock);
+       init_timer(&lp->timer);
+-      lp->timer.function = (void (*)(unsigned long))de4x5_ast;
++      lp->timer.function = de4x5_ast;
+       lp->timer.data = (unsigned long)dev;
+       de4x5_parse_params(dev);
+@@ -1743,8 +1743,9 @@ de4x5_tx(struct net_device *dev)
+ }
+ static void
+-de4x5_ast(struct net_device *dev)
++de4x5_ast(unsigned long _dev)
+ {
++      struct net_device *dev = (struct net_device *)_dev;
+       struct de4x5_private *lp = netdev_priv(dev);
+       int next_tick = DE4X5_AUTOSENSE_MS;
+       int dt;
+@@ -2371,7 +2372,7 @@ autoconf_media(struct net_device *dev)
+       lp->media = INIT;
+       lp->tcount = 0;
+-      de4x5_ast(dev);
++      de4x5_ast((unsigned long)dev);
+       return lp->media;
+ }
+@@ -5376,7 +5377,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+       for (i=0; i<ETH_ALEN; i++) {
+           tmp.addr[i] = dev->dev_addr[i];
+       }
+-      if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
++      if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+       break;
+     case DE4X5_SET_HWADDR:           /* Set the hardware address */
+@@ -5416,7 +5417,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+       spin_lock_irqsave(&lp->lock, flags);
+       memcpy(&statbuf, &lp->pktStats, ioc->len);
+       spin_unlock_irqrestore(&lp->lock, flags);
+-      if (copy_to_user(ioc->data, &statbuf, ioc->len))
++      if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
+               return -EFAULT;
+       break;
+     }
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 874c753..e161da1 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -556,7 +556,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
+       if (wrapped)
+               newacc += 65536;
+-      ACCESS_ONCE(*acc) = newacc;
++      ACCESS_ONCE_RW(*acc) = newacc;
+ }
+ static void populate_erx_stats(struct be_adapter *adapter,
+@@ -5544,7 +5544,7 @@ static void be_shutdown(struct pci_dev *pdev)
+ }
+ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+-                                          pci_channel_state_t state)
++                                          enum pci_channel_state state)
+ {
+       struct be_adapter *adapter = pci_get_drvdata(pdev);
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 36361f8..b3b5f9f 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -26,6 +26,7 @@
+ #include <linux/ethtool.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/irqreturn.h>
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+ #include <linux/phy.h>
+@@ -1174,7 +1175,7 @@ static int ftgmac100_stop(struct net_device *netdev)
+       return 0;
+ }
+-static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
++static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+                                    struct net_device *netdev)
+ {
+       struct ftgmac100 *priv = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
+index dce5f7b..222e709 100644
+--- a/drivers/net/ethernet/faraday/ftmac100.c
++++ b/drivers/net/ethernet/faraday/ftmac100.c
+@@ -31,6 +31,8 @@
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+ #include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
+ #include "ftmac100.h"
+@@ -1009,7 +1011,7 @@ static int ftmac100_stop(struct net_device *netdev)
+       return 0;
+ }
+-static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct ftmac100 *priv = netdev_priv(netdev);
+       dma_addr_t map;
+diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+index 446ae9d..79d1d75 100644
+--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
++++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+@@ -305,7 +305,7 @@ static int mpc52xx_fec_close(struct net_device *dev)
+  * invariant will hold if you make sure that the netif_*_queue()
+  * calls are done at the proper times.
+  */
+-static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+       struct bcom_fec_bd *bd;
+diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+index 61fd486..06047eb 100644
+--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
++++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+@@ -509,7 +509,7 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
+ }
+ #endif
+-static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct fs_enet_private *fep = netdev_priv(dev);
+       cbd_t __iomem *bdp;
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 4b4f5bc..23b3b00 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -112,7 +112,7 @@
+ const char gfar_driver_version[] = "2.0";
+ static int gfar_enet_open(struct net_device *dev);
+-static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ static void gfar_reset_task(struct work_struct *work);
+ static void gfar_timeout(struct net_device *dev);
+ static int gfar_close(struct net_device *dev);
+@@ -2316,7 +2316,7 @@ static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+ /* This is called by the kernel when a frame is ready for transmission.
+  * It is pointed to by the dev->hard_start_xmit function pointer
+  */
+-static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_priv_tx_q *tx_queue = NULL;
+diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
+index 5bf1ade..4e74666 100644
+--- a/drivers/net/ethernet/freescale/ucc_geth.c
++++ b/drivers/net/ethernet/freescale/ucc_geth.c
+@@ -3085,7 +3085,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
+ /* This is called by the kernel when a frame is ready for transmission. */
+ /* It is pointed to by the dev->hard_start_xmit function pointer */
+-static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ucc_geth_private *ugeth = netdev_priv(dev);
+ #ifdef CONFIG_UGETH_TX_ON_DEMAND
+diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
+index 0c4afe9..d888314 100644
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -422,7 +422,7 @@ static void hip04_start_tx_timer(struct hip04_priv *priv)
+                              ns, HRTIMER_MODE_REL);
+ }
+-static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+index 275618b..abd1703 100644
+--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
++++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+@@ -600,7 +600,7 @@ static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
+       return IRQ_HANDLED;
+ }
+-static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct hix5hd2_priv *priv = netdev_priv(dev);
+       struct hix5hd2_desc *desc;
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+index e28d960..6168a00 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+@@ -844,16 +844,18 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
+       struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+       static atomic_t id = ATOMIC_INIT(-1);
++      pax_open_kernel();
+       switch (dsaf_dev->dsaf_ver) {
+       case AE_VERSION_1:
+-              hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq;
++              const_cast(hns_dsaf_ops.toggle_ring_irq) = hns_ae_toggle_ring_irq;
+               break;
+       case AE_VERSION_2:
+-              hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq;
++              const_cast(hns_dsaf_ops.toggle_ring_irq) = hns_aev2_toggle_ring_irq;
+               break;
+       default:
+               break;
+       }
++      pax_close_kernel();
+       snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
+                (int)atomic_inc_return(&id));
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+index 1daf018..2548233 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+@@ -318,7 +318,7 @@ struct dsaf_device {
+       struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
+       struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
+       struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
+-      struct dsaf_misc_op *misc_op;
++      const struct dsaf_misc_op *misc_op;
+       struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
+       struct dsaf_int_stat int_stat;
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+index 611b67b..63ecdd4 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+@@ -522,48 +522,46 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
+ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
+ {
+-      struct dsaf_misc_op *misc_op;
+-
+-      misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL);
+-      if (!misc_op)
+-              return NULL;
+-
+-      if (dev_of_node(dsaf_dev->dev)) {
+-              misc_op->cpld_set_led = hns_cpld_set_led;
+-              misc_op->cpld_reset_led = cpld_led_reset;
+-              misc_op->cpld_set_led_id = cpld_set_led_id;
+-
+-              misc_op->dsaf_reset = hns_dsaf_rst;
+-              misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
+-              misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port;
+-              misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
+-              misc_op->ppe_srst = hns_ppe_srst_by_port;
+-              misc_op->ppe_comm_srst = hns_ppe_com_srst;
+-
+-              misc_op->get_phy_if = hns_mac_get_phy_if;
+-              misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+-
+-              misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
+-      } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+-              misc_op->cpld_set_led = hns_cpld_set_led;
+-              misc_op->cpld_reset_led = cpld_led_reset;
+-              misc_op->cpld_set_led_id = cpld_set_led_id;
+-
+-              misc_op->dsaf_reset = hns_dsaf_rst_acpi;
+-              misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
+-              misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi;
+-              misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
+-              misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
+-              misc_op->ppe_comm_srst = hns_ppe_com_srst;
+-
+-              misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
+-              misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+-
+-              misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
+-      } else {
+-              devm_kfree(dsaf_dev->dev, (void *)misc_op);
+-              misc_op = NULL;
+-      }
+-
+-      return (void *)misc_op;
++      static const struct dsaf_misc_op dsaf_misc_ops = {
++              .cpld_set_led = hns_cpld_set_led,
++              .cpld_reset_led = cpld_led_reset,
++              .cpld_set_led_id = cpld_set_led_id,
++
++              .dsaf_reset = hns_dsaf_rst,
++              .xge_srst = hns_dsaf_xge_srst_by_port,
++              .xge_core_srst = hns_dsaf_xge_core_srst_by_port,
++              .ge_srst = hns_dsaf_ge_srst_by_port,
++              .ppe_srst = hns_ppe_srst_by_port,
++              .ppe_comm_srst = hns_ppe_com_srst,
++
++              .get_phy_if = hns_mac_get_phy_if,
++              .get_sfp_prsnt = hns_mac_get_sfp_prsnt,
++
++              .cfg_serdes_loopback = hns_mac_config_sds_loopback,
++      };
++
++      static const struct dsaf_misc_op dsaf_misc_ops_acpi = {
++              .cpld_set_led = hns_cpld_set_led,
++              .cpld_reset_led = cpld_led_reset,
++              .cpld_set_led_id = cpld_set_led_id,
++
++              .dsaf_reset = hns_dsaf_rst_acpi,
++              .xge_srst = hns_dsaf_xge_srst_by_port_acpi,
++              .xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi,
++              .ge_srst = hns_dsaf_ge_srst_by_port_acpi,
++              .ppe_srst = hns_ppe_srst_by_port_acpi,
++              .ppe_comm_srst = hns_ppe_com_srst,
++
++              .get_phy_if = hns_mac_get_phy_if_acpi,
++              .get_sfp_prsnt = hns_mac_get_sfp_prsnt,
++
++              .cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi,
++      };
++
++      if (dev_of_node(dsaf_dev->dev))
++              return &dsaf_misc_ops;
++      else if (is_acpi_node(dsaf_dev->dev->fwnode))
++              return &dsaf_misc_ops_acpi;
++
++      return NULL;
+ }
+diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
+index 3dbc53c2..fa08fb8 100644
+--- a/drivers/net/ethernet/i825xx/lib82596.c
++++ b/drivers/net/ethernet/i825xx/lib82596.c
+@@ -347,7 +347,7 @@ static const char init_setup[] =
+       0x7f /*  *multi IA */ };
+ static int i596_open(struct net_device *dev);
+-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ static irqreturn_t i596_interrupt(int irq, void *dev_id);
+ static int i596_close(struct net_device *dev);
+ static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+@@ -965,7 +965,7 @@ static void i596_tx_timeout (struct net_device *dev)
+ }
+-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct i596_private *lp = netdev_priv(dev);
+       struct tx_cmd *tx_cmd;
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index 54efa9a..0d297bd 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -2047,7 +2047,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
+       dev_consume_skb_any(skb);
+ }
+-static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ehea_port *port = netdev_priv(dev);
+       struct ehea_swqe *swqe;
+diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
+index 7af09cb..010411a 100644
+--- a/drivers/net/ethernet/ibm/emac/core.c
++++ b/drivers/net/ethernet/ibm/emac/core.c
+@@ -1415,7 +1415,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
+ }
+ /* Tx lock BH */
+-static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct emac_instance *dev = netdev_priv(ndev);
+       unsigned int len = skb->len;
+@@ -1473,7 +1473,7 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot,
+ }
+ /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
+-static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct emac_instance *dev = netdev_priv(ndev);
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
+index 068789e..f4928f0 100644
+--- a/drivers/net/ethernet/intel/e100.c
++++ b/drivers/net/ethernet/intel/e100.c
+@@ -3106,7 +3106,7 @@ static void e100_shutdown(struct pci_dev *pdev)
+  * @pdev: Pointer to PCI device
+  * @state: The current pci connection state
+  */
+-static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct nic *nic = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index f42129d..d2e3932 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -5272,7 +5272,7 @@ static void e1000_netpoll(struct net_device *netdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                                      enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 7017281..6bbf47e 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6784,7 +6784,7 @@ static void e1000_netpoll(struct net_device *netdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+index 774a565..38b03e2 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+@@ -2249,7 +2249,7 @@ static int fm10k_suspend(struct pci_dev *pdev,
+  * this device has been detected.
+  */
+ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct fm10k_intfc *interface = pci_get_drvdata(pdev);
+       struct net_device *netdev = interface->netdev;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+index ed39cba..76569b9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+@@ -417,7 +417,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
+       wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+       /* Update the base adjustement value. */
+-      ACCESS_ONCE(pf->ptp_base_adj) = incval;
++      ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
+       smp_mb(); /* Force the above update. */
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 942a89f..5ca83a9 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -7809,7 +7809,7 @@ static void igb_netpoll(struct net_device *netdev)
+  *  this device has been detected.
+  **/
+ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+-                                            pci_channel_state_t state)
++                                            enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct igb_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
+index b0778ba..ed4357a 100644
+--- a/drivers/net/ethernet/intel/igbvf/netdev.c
++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
+@@ -2511,7 +2511,7 @@ static void igbvf_netpoll(struct net_device *netdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct igbvf_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index b4f0374..e174bd7 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -9846,7 +9846,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+index e5431bf..1db690e 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+@@ -1122,7 +1122,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
+       }
+       /* update the base incval used to calculate frequency adjustment */
+-      ACCESS_ONCE(adapter->base_incval) = incval;
++      ACCESS_ONCE_RW(adapter->base_incval) = incval;
+       smp_mb();
+       /* need lock to prevent incorrect read while modifying cyclecounter */
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index d9d6616..3331f28 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -3622,7 +3622,7 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
+       return __ixgbevf_maybe_stop_tx(tx_ring, size);
+ }
+-static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+       struct ixgbevf_tx_buffer *first;
+@@ -4212,7 +4212,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
+  * this device has been detected.
+  **/
+ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
+-                                                pci_channel_state_t state)
++                                                enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index d41c28d..ef80211 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2290,7 +2290,7 @@ error:
+ }
+ /* Main tx processing */
+-static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct mvneta_port *pp = netdev_priv(dev);
+       u16 txq_id = skb_get_queue_mapping(skb);
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 60227a3..160ba02 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -5236,7 +5236,7 @@ error:
+ }
+ /* Main tx processing */
+-static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_tx_queue *txq, *aggr_txq;
+diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
+index 5d5000c..7437949f 100644
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1271,7 +1271,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+       return work_done;
+ }
+-static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index e2509bb..8357fef 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -495,8 +495,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+       wmb();
+       /* we want to dirty this cache line once */
+-      ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
+-      ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
++      ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
++      ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
+       if (ring->free_tx_desc == mlx4_en_recycle_tx_desc)
+               return done < budget;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 7183ac4..691c517 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -4061,7 +4061,7 @@ static const struct pci_device_id mlx4_pci_table[] = {
+ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
+ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
+-                                            pci_channel_state_t state)
++                                            enum pci_channel_state state)
+ {
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 2385bae..5413c50 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1412,7 +1412,7 @@ static void remove_one(struct pci_dev *pdev)
+ }
+ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+-                                            pci_channel_state_t state)
++                                            enum pci_channel_state state)
+ {
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+       struct mlx5_priv *priv = &dev->priv;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index d48873b..426f12e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -4533,16 +4533,16 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+       return notifier_from_errno(err);
+ }
+-static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
++static struct notifier_block mlxsw_sp_netdevice_nb = {
+       .notifier_call = mlxsw_sp_netdevice_event,
+ };
+-static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
++static struct notifier_block mlxsw_sp_inetaddr_nb = {
+       .notifier_call = mlxsw_sp_inetaddr_event,
+       .priority = 10, /* Must be called before FIB notifier block */
+ };
+-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
++static struct notifier_block mlxsw_sp_router_netevent_nb = {
+       .notifier_call = mlxsw_sp_router_netevent_event,
+ };
+diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
+index 20cb85b..6135d90 100644
+--- a/drivers/net/ethernet/micrel/ks8695net.c
++++ b/drivers/net/ethernet/micrel/ks8695net.c
+@@ -1156,7 +1156,7 @@ ks8695_timeout(struct net_device *ndev)
+  *    sk_buff and adds it to the TX ring. It then kicks the TX DMA
+  *    engine to ensure transmission begins.
+  */
+-static int
++static netdev_tx_t
+ ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct ks8695_priv *ksp = netdev_priv(ndev);
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
+index 2fc5cd5..6c6108a 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -1020,7 +1020,7 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
+  * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
+  * So while tx is in-progress, prevent IRQ interrupt from happenning.
+  */
+-static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+       int retv = NETDEV_TX_OK;
+       struct ks_net *ks = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
+index 4367dd6..c32f151 100644
+--- a/drivers/net/ethernet/moxa/moxart_ether.c
++++ b/drivers/net/ethernet/moxa/moxart_ether.c
+@@ -319,7 +319,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
+       return IRQ_HANDLED;
+ }
+-static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       void *desc;
+diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
+index eaa37c0..8295b08 100644
+--- a/drivers/net/ethernet/neterion/s2io.c
++++ b/drivers/net/ethernet/neterion/s2io.c
+@@ -8556,7 +8556,7 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
+  * this device has been detected.
+  */
+ static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
+-                                             pci_channel_state_t state)
++                                             enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct s2io_nic *sp = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+index 6223930..975033d 100644
+--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+       struct __vxge_hw_fifo *fifo;
+       struct vxge_hw_fifo_config *config;
+       u32 txdl_size, txdl_per_memblock;
+-      struct vxge_hw_mempool_cbs fifo_mp_callback;
++      static struct vxge_hw_mempool_cbs fifo_mp_callback = {
++              .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
++      };
++
+       struct __vxge_hw_virtualpath *vpath;
+       if ((vp == NULL) || (attr == NULL)) {
+@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+               goto exit;
+       }
+-      fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
+-
+       fifo->mempool =
+               __vxge_hw_mempool_create(vpath->hldev,
+                       fifo->config->memblock_size,
+diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
+index e0993eb..d8d7f50 100644
+--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
+@@ -4043,7 +4043,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
+  * this device has been detected.
+  */
+ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
+       struct net_device *netdev = hldev->ndev;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index 39dadfc..2f40f84 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -751,7 +751,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+  *
+  * Return: NETDEV_TX_OK on success.
+  */
+-static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct nfp_net *nn = netdev_priv(netdev);
+       const struct skb_frag_struct *frag;
+diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
+index adbc47f..d072612 100644
+--- a/drivers/net/ethernet/netx-eth.c
++++ b/drivers/net/ethernet/netx-eth.c
+@@ -107,7 +107,7 @@ static void netx_eth_set_multicast_list(struct net_device *ndev)
+       /* implement me */
+ }
+-static int
++static netdev_tx_t
+ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct netx_eth_priv *priv = netdev_priv(ndev);
+diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
+index 87b7b81..b352c4b 100644
+--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
++++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
+@@ -633,7 +633,7 @@ static int w90p910_send_frame(struct net_device *dev,
+       return 0;
+ }
+-static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct w90p910_ether *ether = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
+index 9b0d7f4..c29155f 100644
+--- a/drivers/net/ethernet/nvidia/forcedeth.c
++++ b/drivers/net/ethernet/nvidia/forcedeth.c
+@@ -357,8 +357,8 @@ struct ring_desc {
+ };
+ struct ring_desc_ex {
+-      __le32 bufhigh;
+-      __le32 buflow;
++      __le32 bufhigh __intentional_overflow(0);
++      __le32 buflow __intentional_overflow(0);
+       __le32 txvlan;
+       __le32 flaglen;
+ };
+diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
+index 8e13ec8..b654ea0 100644
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -1053,7 +1053,7 @@ static int lpc_eth_close(struct net_device *ndev)
+       return 0;
+ }
+-static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct netdata_local *pldat = netdev_priv(ndev);
+       u32 len, txidx;
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 3cd87a4..3eb33e7 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -2130,7 +2130,7 @@ static int pch_gbe_stop(struct net_device *netdev)
+  *    - NETDEV_TX_OK:   Normal end
+  *    - NETDEV_TX_BUSY: Error end
+  */
+-static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+       struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+@@ -2439,7 +2439,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
+ };
+ static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index 7a0281a..ff425351 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1757,7 +1757,7 @@ err_out:
+ }
+ static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+index a496390..eaa03ae 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+@@ -2320,7 +2320,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+               max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
+       } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+               ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+-              adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
++              pax_open_kernel();
++              const_cast(adapter->nic_ops->init_driver) = qlcnic_83xx_init_default_driver;
++              pax_close_kernel();
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+               max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+               max_tx_rings = QLCNIC_MAX_TX_RINGS;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+index 3490675..cf148ea 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+       case QLCNIC_NON_PRIV_FUNC:
+               ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+-              nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
++              pax_open_kernel();
++              const_cast(nic_ops->init_driver) = qlcnic_83xx_init_non_privileged_vnic;
++              pax_close_kernel();
+               break;
+       case QLCNIC_PRIV_FUNC:
+               ahw->op_mode = QLCNIC_PRIV_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+-              nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
++              pax_open_kernel();
++              const_cast(nic_ops->init_driver) = qlcnic_83xx_init_privileged_vnic;
++              pax_close_kernel();
+               break;
+       case QLCNIC_MGMT_FUNC:
+               ahw->op_mode = QLCNIC_MGMT_FUNC;
+               ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+-              nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
++              pax_open_kernel();
++              const_cast(nic_ops->init_driver) = qlcnic_83xx_init_mgmt_vnic;
++              pax_close_kernel();
+               break;
+       default:
+               dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+index 3ebef27..988b2b6 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+@@ -3974,7 +3974,7 @@ static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+ }
+ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+-                                               pci_channel_state_t state)
++                                               enum pci_channel_state state)
+ {
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+index 0844b7c..afa10a1 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+@@ -1285,7 +1285,7 @@ flash_temp:
+ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+ {
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+-      static const struct qlcnic_dump_operations *fw_dump_ops;
++      const struct qlcnic_dump_operations *fw_dump_ops;
+       struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+       u32 entry_offset, dump, no_entries, buf_offset = 0;
+       int i, k, ops_cnt, ops_index, dump_size = 0;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index e55638c..5fe3a62 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -798,22 +798,22 @@ struct rtl8169_private {
+       struct mdio_ops {
+               void (*write)(struct rtl8169_private *, int, int);
+               int (*read)(struct rtl8169_private *, int);
+-      } mdio_ops;
++      } __no_const mdio_ops;
+       struct pll_power_ops {
+               void (*down)(struct rtl8169_private *);
+               void (*up)(struct rtl8169_private *);
+-      } pll_power_ops;
++      } __no_const pll_power_ops;
+       struct jumbo_ops {
+               void (*enable)(struct rtl8169_private *);
+               void (*disable)(struct rtl8169_private *);
+-      } jumbo_ops;
++      } __no_const jumbo_ops;
+       struct csi_ops {
+               void (*write)(struct rtl8169_private *, int, int);
+               u32 (*read)(struct rtl8169_private *, int);
+-      } csi_ops;
++      } __no_const csi_ops;
+       int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
+       int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 054e795..5180c73 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2300,7 +2300,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
+ }
+ /* Packet transmit function */
+-static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       struct sh_eth_txdesc *txdesc;
+diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
+index f0b09b0..35869b1 100644
+--- a/drivers/net/ethernet/rocker/rocker_main.c
++++ b/drivers/net/ethernet/rocker/rocker_main.c
+@@ -2834,7 +2834,7 @@ out:
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block rocker_netdevice_nb __read_mostly = {
++static struct notifier_block rocker_netdevice_nb = {
+       .notifier_call = rocker_netdevice_event,
+ };
+@@ -2868,7 +2868,7 @@ static int rocker_netevent_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block rocker_netevent_nb __read_mostly = {
++static struct notifier_block rocker_netevent_nb = {
+       .notifier_call = rocker_netevent_event,
+ };
+diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
+index c2bd537..540a981 100644
+--- a/drivers/net/ethernet/seeq/sgiseeq.c
++++ b/drivers/net/ethernet/seeq/sgiseeq.c
+@@ -578,7 +578,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
+       return 0;
+ }
+-static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct sgiseeq_private *sp = netdev_priv(dev);
+       struct hpc3_ethregs *hregs = sp->hregs;
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index c771e0a..bbb368d 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -832,7 +832,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+                      ptp->start.dma_addr);
+       /* Clear flag that signals MC ready */
+-      ACCESS_ONCE(*start) = 0;
++      ACCESS_ONCE_RW(*start) = 0;
+       rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+                               MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+       EFX_BUG_ON_PARANOID(rc);
+diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
+index 9d78830..74fc649 100644
+--- a/drivers/net/ethernet/sfc/selftest.c
++++ b/drivers/net/ethernet/sfc/selftest.c
+@@ -82,8 +82,8 @@ struct efx_loopback_state {
+       int packet_count;
+       struct sk_buff **skbs;
+       bool offload_csum;
+-      atomic_t rx_good;
+-      atomic_t rx_bad;
++      atomic_unchecked_t rx_good;
++      atomic_unchecked_t rx_bad;
+       struct efx_loopback_payload payload;
+ };
+@@ -349,12 +349,12 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
+       netif_vdbg(efx, drv, efx->net_dev,
+                  "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
+-      atomic_inc(&state->rx_good);
++      atomic_inc_unchecked(&state->rx_good);
+       return;
+  err:
+ #ifdef DEBUG
+-      if (atomic_read(&state->rx_bad) == 0) {
++      if (atomic_read_unchecked(&state->rx_bad) == 0) {
+               netif_err(efx, drv, efx->net_dev, "received packet:\n");
+               print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+                              buf_ptr, pkt_len, 0);
+@@ -363,7 +363,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
+                              &state->payload, sizeof(state->payload), 0);
+       }
+ #endif
+-      atomic_inc(&state->rx_bad);
++      atomic_inc_unchecked(&state->rx_bad);
+ }
+ /* Initialise an efx_selftest_state for a new iteration */
+@@ -397,8 +397,8 @@ static void efx_iterate_state(struct efx_nic *efx)
+       memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+       /* Fill out remaining state members */
+-      atomic_set(&state->rx_good, 0);
+-      atomic_set(&state->rx_bad, 0);
++      atomic_set_unchecked(&state->rx_good, 0);
++      atomic_set_unchecked(&state->rx_bad, 0);
+       smp_wmb();
+ }
+@@ -456,7 +456,7 @@ static int efx_poll_loopback(struct efx_nic *efx)
+ {
+       struct efx_loopback_state *state = efx->loopback_selftest;
+-      return atomic_read(&state->rx_good) == state->packet_count;
++      return atomic_read_unchecked(&state->rx_good) == state->packet_count;
+ }
+ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+@@ -482,8 +482,8 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+       netif_tx_unlock_bh(efx->net_dev);
+       /* Check TX completion and received packet counts */
+-      rx_good = atomic_read(&state->rx_good);
+-      rx_bad = atomic_read(&state->rx_bad);
++      rx_good = atomic_read_unchecked(&state->rx_good);
++      rx_bad = atomic_read_unchecked(&state->rx_bad);
+       if (tx_done != state->packet_count) {
+               /* Don't free the skbs; they will be picked up on TX
+                * overflow or channel teardown.
+diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
+index 7a254da..0693a2b4 100644
+--- a/drivers/net/ethernet/sgi/ioc3-eth.c
++++ b/drivers/net/ethernet/sgi/ioc3-eth.c
+@@ -103,7 +103,7 @@ static inline struct net_device *priv_netdev(struct ioc3_private *dev)
+ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+ static void ioc3_set_multicast_list(struct net_device *dev);
+-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ static void ioc3_timeout(struct net_device *dev);
+ static inline unsigned int ioc3_hash(const unsigned char *addr);
+ static inline void ioc3_stop(struct ioc3_private *ip);
+@@ -1397,7 +1397,7 @@ static struct pci_driver ioc3_driver = {
+       .remove         = ioc3_remove_one,
+ };
+-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       unsigned long data;
+       struct ioc3_private *ip = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
+index cb49c96..c1498cc 100644
+--- a/drivers/net/ethernet/smsc/smc911x.c
++++ b/drivers/net/ethernet/smsc/smc911x.c
+@@ -514,7 +514,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
+  * now, or set the card to generates an interrupt when ready
+  * for the packet.
+  */
+-static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct smc911x_local *lp = netdev_priv(dev);
+       unsigned int free;
+diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
+index 503a3b6..28d35c9 100644
+--- a/drivers/net/ethernet/smsc/smc91x.c
++++ b/drivers/net/ethernet/smsc/smc91x.c
+@@ -637,7 +637,7 @@ done:      if (!THROTTLE_TX_PKTS)
+  * now, or set the card to generates an interrupt when ready
+  * for the packet.
+  */
+-static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct smc_local *lp = netdev_priv(dev);
+       void __iomem *ioaddr = lp->base;
+diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
+index 4f8910b..50636e8 100644
+--- a/drivers/net/ethernet/smsc/smsc911x.c
++++ b/drivers/net/ethernet/smsc/smsc911x.c
+@@ -1760,7 +1760,7 @@ static int smsc911x_stop(struct net_device *dev)
+ }
+ /* Entry point for transmitting a packet */
+-static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct smsc911x_data *pdata = netdev_priv(dev);
+       unsigned int freespace;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index ce9aa79..ad1831f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+       writel(value, mmcaddr + MMC_CNTRL);
+-      pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+-               MMC_CNTRL, value);
++//    pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
++//             MMC_CNTRL, value);
+ }
+ /* To mask all all interrupts.*/
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4c8c60a..c29928c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1164,8 +1164,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+       if (!priv->rx_skbuff)
+               goto err_rx_skbuff;
+-      priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
+-                                          sizeof(*priv->tx_skbuff_dma),
++      priv->tx_skbuff_dma = kmalloc_array(sizeof(*priv->tx_skbuff_dma),
++                                          DMA_TX_SIZE,
+                                           GFP_KERNEL);
+       if (!priv->tx_skbuff_dma)
+               goto err_tx_skbuff_dma;
+diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
+index aa4f9d2..d9ffff3 100644
+--- a/drivers/net/ethernet/sun/sunbmac.c
++++ b/drivers/net/ethernet/sun/sunbmac.c
+@@ -950,7 +950,7 @@ static void bigmac_tx_timeout(struct net_device *dev)
+ }
+ /* Put a packet on the wire. */
+-static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct bigmac *bp = netdev_priv(dev);
+       int len, entry;
+diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
+index 9b825780..71a2b34 100644
+--- a/drivers/net/ethernet/sun/sunqe.c
++++ b/drivers/net/ethernet/sun/sunqe.c
+@@ -568,7 +568,7 @@ out:
+ }
+ /* Get a packet queued to go onto the wire. */
+-static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct sunqe *qep = netdev_priv(dev);
+       struct sunqe_buffers *qbufs = qep->buffers;
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index a2f9b47..05a9147 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -131,7 +131,7 @@ static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ }
+ /* Wrappers to common functions */
+-static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find);
+ }
+diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
+index 904a5a1..6ef5cff 100644
+--- a/drivers/net/ethernet/sun/sunvnet_common.c
++++ b/drivers/net/ethernet/sun/sunvnet_common.c
+@@ -1126,7 +1126,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
+       return skb;
+ }
+-static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
++static netdev_tx_t vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
+                               struct vnet_port *(*vnet_tx_port)
+                               (struct sk_buff *, struct net_device *))
+ {
+@@ -1134,7 +1134,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
+       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+       struct sk_buff *segs;
+       int maclen, datalen;
+-      int status;
++      netdev_tx_t status;
+       int gso_size, gso_type, gso_segs;
+       int hlen = skb_transport_header(skb) - skb_mac_header(skb);
+       int proto = IPPROTO_IP;
+@@ -1190,7 +1190,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
+       skb_push(skb, maclen);
+       skb_reset_mac_header(skb);
+-      status = 0;
++      status = NETDEV_TX_OK;
+       while (segs) {
+               struct sk_buff *curr = segs;
+diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+index 4490eba..cbd62ea 100644
+--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
++++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+@@ -2176,7 +2176,7 @@ static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+       lp->gso_size = tx->prev_gso_size;
+ }
+-static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct net_local *lp = netdev_priv(ndev);
+       struct dwceqos_tx trans;
+diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
+index d300d53..31adc932 100644
+--- a/drivers/net/ethernet/ti/cpmac.c
++++ b/drivers/net/ethernet/ti/cpmac.c
+@@ -544,7 +544,7 @@ fatal_error:
+ }
+-static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       int queue, len;
+       struct cpmac_desc *desc;
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 727a79f..38ef419 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -943,7 +943,7 @@ static void emac_tx_handler(void *token, int len, int status)
+  *
+  * Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
+  */
+-static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct device *emac_dev = &ndev->dev;
+       int ret_code;
+diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
+index 3251666..6eb86ae 100644
+--- a/drivers/net/ethernet/ti/netcp_core.c
++++ b/drivers/net/ethernet/ti/netcp_core.c
+@@ -1237,7 +1237,7 @@ out:
+ }
+ /* Submit the packet */
+-static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       int subqueue = skb_get_queue_mapping(skb);
+diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
+index 9d14731..7d6ad91 100644
+--- a/drivers/net/ethernet/via/via-rhine.c
++++ b/drivers/net/ethernet/via/via-rhine.c
+@@ -2600,7 +2600,7 @@ static struct platform_driver rhine_driver_platform = {
+       }
+ };
+-static struct dmi_system_id rhine_dmi_table[] __initdata = {
++static const struct dmi_system_id rhine_dmi_table[] __initconst = {
+       {
+               .ident = "EPIA-M",
+               .matches = {
+diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
+index 37ab46c..2875480 100644
+--- a/drivers/net/ethernet/wiznet/w5100.c
++++ b/drivers/net/ethernet/wiznet/w5100.c
+@@ -836,7 +836,7 @@ static void w5100_tx_work(struct work_struct *work)
+       w5100_tx_skb(priv->ndev, skb);
+ }
+-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct w5100_priv *priv = netdev_priv(ndev);
+diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
+index 0b37ce9..4ec594b 100644
+--- a/drivers/net/ethernet/wiznet/w5300.c
++++ b/drivers/net/ethernet/wiznet/w5300.c
+@@ -366,7 +366,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
+       netif_wake_queue(ndev);
+ }
+-static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct w5300_priv *priv = netdev_priv(ndev);
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index a9bd665..2fc2924 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -673,7 +673,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
+       return 0;
+ }
+-static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct temac_local *lp = netdev_priv(ndev);
+       struct cdmac_bd *cur_p;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 36ee7ab..7a76e3f 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -652,7 +652,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+  * start the transmission. Additionally if checksum offloading is supported,
+  * it populates AXI Stream Control fields with appropriate values.
+  */
+-static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       u32 ii;
+       u32 num_frag;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index 93dc10b..6598671 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -995,7 +995,7 @@ static int xemaclite_close(struct net_device *dev)
+  *
+  * Return:    0, always.
+  */
+-static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
++static netdev_tx_t xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
+ {
+       struct net_local *lp = netdev_priv(dev);
+       struct sk_buff *new_skb;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 3c20e87..5696f6f 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1450,7 +1450,7 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
+-static struct rtnl_link_ops geneve_link_ops __read_mostly = {
++static struct rtnl_link_ops geneve_link_ops = {
+       .kind           = "geneve",
+       .maxtype        = IFLA_GENEVE_MAX,
+       .policy         = geneve_policy,
+@@ -1516,7 +1516,7 @@ static int geneve_netdevice_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block geneve_notifier_block __read_mostly = {
++static struct notifier_block geneve_notifier_block = {
+       .notifier_call = geneve_netdevice_event,
+ };
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 97e0cbc..3aec9e5 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -58,7 +58,7 @@ struct pdp_ctx {
+       struct in_addr          ms_addr_ip4;
+       struct in_addr          sgsn_addr_ip4;
+-      atomic_t                tx_seq;
++      atomic_unchecked_t      tx_seq;
+       struct rcu_head         rcu_head;
+ };
+@@ -407,7 +407,7 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+       gtp0->flags     = 0x1e; /* v0, GTP-non-prime. */
+       gtp0->type      = GTP_TPDU;
+       gtp0->length    = htons(payload_len);
+-      gtp0->seq       = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
++      gtp0->seq       = htons((atomic_inc_return_unchecked(&pctx->tx_seq) - 1) % 0xffff);
+       gtp0->flow      = htons(pctx->u.v0.flow);
+       gtp0->number    = 0xff;
+       gtp0->spare[0]  = gtp0->spare[1] = gtp0->spare[2] = 0xff;
+@@ -751,7 +751,7 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
+-static struct rtnl_link_ops gtp_link_ops __read_mostly = {
++static struct rtnl_link_ops gtp_link_ops = {
+       .kind           = "gtp",
+       .maxtype        = IFLA_GTP_MAX,
+       .policy         = gtp_policy,
+@@ -959,7 +959,7 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+               return -ENOMEM;
+       ipv4_pdp_fill(pctx, info);
+-      atomic_set(&pctx->tx_seq, 0);
++      atomic_set_unchecked(&pctx->tx_seq, 0);
+       switch (pctx->gtp_version) {
+       case GTP_V0:
+diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
+index 78dbc44..b7831d0 100644
+--- a/drivers/net/hamradio/baycom_epp.c
++++ b/drivers/net/hamradio/baycom_epp.c
+@@ -768,7 +768,7 @@ static void epp_bh(struct work_struct *work)
+  * ===================== network driver interface =========================
+  */
+-static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct baycom_state *bc = netdev_priv(dev);
+diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
+index 591af71..a5bbc7a 100644
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -162,7 +162,7 @@ struct rndis_device {
+       enum rndis_device_state state;
+       bool link_state;
+-      atomic_t new_req_id;
++      atomic_unchecked_t new_req_id;
+       spinlock_t request_lock;
+       struct list_head req_list;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 3ba29fc..793bdcf 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -349,7 +349,7 @@ not_ip:
+       return ret_val;
+ }
+-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
++static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+ {
+       struct net_device_context *net_device_ctx = netdev_priv(net);
+       struct hv_netvsc_packet *packet = NULL;
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index 8e830f7..37da185 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -101,7 +101,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
+        * template
+        */
+       set = &rndis_msg->msg.set_req;
+-      set->req_id = atomic_inc_return(&dev->new_req_id);
++      set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+       /* Add to the request list */
+       spin_lock_irqsave(&dev->request_lock, flags);
+@@ -861,7 +861,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
+       /* Setup the rndis set */
+       halt = &request->request_msg.msg.halt_req;
+-      halt->req_id = atomic_inc_return(&dev->new_req_id);
++      halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+       /* Ignore return since this msg is optional. */
+       rndis_filter_send_request(dev, request);
+@@ -1075,8 +1075,7 @@ int rndis_filter_device_add(struct hv_device *dev,
+       if (net_device->num_chn == 1)
+               goto out;
+-      net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
+-                                       NETVSC_PACKET_SIZE);
++      net_device->sub_cb_buf = vzalloc(net_device->num_sc_offered * NETVSC_PACKET_SIZE);
+       if (!net_device->sub_cb_buf) {
+               net_device->num_chn = 1;
+               dev_info(&dev->device, "No memory for subchannels.\n");
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index 66c0eea..27486de 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -290,7 +290,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
+       return 0;
+ }
+-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
++static struct rtnl_link_ops ifb_link_ops = {
+       .kind           = "ifb",
+       .priv_size      = sizeof(struct ifb_dev_private),
+       .setup          = ifb_setup,
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index b5f9511..c883583 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -484,7 +484,7 @@ static void ipvlan_multicast_enqueue(struct ipvl_port *port,
+               schedule_work(&port->wq);
+       } else {
+               spin_unlock(&port->backlog.lock);
+-              atomic_long_inc(&skb->dev->rx_dropped);
++              atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+               kfree_skb(skb);
+       }
+ }
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 18b4e8c..65f5386 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -734,15 +734,15 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
++static struct notifier_block ipvlan_addr4_notifier_block = {
+       .notifier_call = ipvlan_addr4_event,
+ };
+-static struct notifier_block ipvlan_notifier_block __read_mostly = {
++static struct notifier_block ipvlan_notifier_block = {
+       .notifier_call = ipvlan_device_event,
+ };
+-static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
++static struct notifier_block ipvlan_addr6_notifier_block = {
+       .notifier_call = ipvlan_addr6_event,
+ };
+diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
+index a0849f4..147a4a6 100644
+--- a/drivers/net/irda/vlsi_ir.c
++++ b/drivers/net/irda/vlsi_ir.c
+@@ -142,7 +142,7 @@ static void vlsi_ring_debug(struct vlsi_ring *r)
+       printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+               __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+       printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
+-              atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
++              atomic_read_unchecked(&r->head) & r->mask, atomic_read_unchecked(&r->tail) & r->mask);
+       for (i = 0; i < r->size; i++) {
+               rd = &r->rd[i];
+               printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
+@@ -301,8 +301,8 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
+       seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+               r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+-      h = atomic_read(&r->head) & r->mask;
+-      t = atomic_read(&r->tail) & r->mask;
++      h = atomic_read_unchecked(&r->head) & r->mask;
++      t = atomic_read_unchecked(&r->tail) & r->mask;
+       seq_printf(seq, "head = %d / tail = %d ", h, t);
+       if (h == t)
+               seq_printf(seq, "(empty)\n");
+@@ -410,8 +410,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
+       r->rd = (struct ring_descr *)(r+1);
+       r->mask = size - 1;
+       r->size = size;
+-      atomic_set(&r->head, 0);
+-      atomic_set(&r->tail, 0);
++      atomic_set_unchecked(&r->head, 0);
++      atomic_set_unchecked(&r->tail, 0);
+       for (i = 0; i < size; i++) {
+               rd = r->rd + i;
+@@ -1268,10 +1268,10 @@ static int vlsi_init_chip(struct pci_dev *pdev)
+               iobase+VLSI_PIO_RINGSIZE);      
+       ptr = inw(iobase+VLSI_PIO_RINGPTR);
+-      atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
+-      atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
+-      atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
+-      atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
++      atomic_set_unchecked(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
++      atomic_set_unchecked(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
++      atomic_set_unchecked(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
++      atomic_set_unchecked(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
+       vlsi_set_baud(idev, iobase);    /* idev->new_baud used as provided by caller */
+diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
+index f9db2ce..6cd460c 100644
+--- a/drivers/net/irda/vlsi_ir.h
++++ b/drivers/net/irda/vlsi_ir.h
+@@ -671,7 +671,7 @@ struct vlsi_ring {
+       unsigned                len;
+       unsigned                size;
+       unsigned                mask;
+-      atomic_t                head, tail;
++      atomic_unchecked_t      head, tail;
+       struct ring_descr       *rd;
+ };
+@@ -681,13 +681,13 @@ static inline struct ring_descr *ring_last(struct vlsi_ring *r)
+ {
+       int t;
+-      t = atomic_read(&r->tail) & r->mask;
+-      return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
++      t = atomic_read_unchecked(&r->tail) & r->mask;
++      return (((t+1) & r->mask) == (atomic_read_unchecked(&r->head) & r->mask)) ? NULL : &r->rd[t];
+ }
+ static inline struct ring_descr *ring_put(struct vlsi_ring *r)
+ {
+-      atomic_inc(&r->tail);
++      atomic_inc_unchecked(&r->tail);
+       return ring_last(r);
+ }
+@@ -695,13 +695,13 @@ static inline struct ring_descr *ring_first(struct vlsi_ring *r)
+ {
+       int h;
+-      h = atomic_read(&r->head) & r->mask;
+-      return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
++      h = atomic_read_unchecked(&r->head) & r->mask;
++      return (h == (atomic_read_unchecked(&r->tail) & r->mask)) ? NULL : &r->rd[h];
+ }
+ static inline struct ring_descr *ring_get(struct vlsi_ring *r)
+ {
+-      atomic_inc(&r->head);
++      atomic_inc_unchecked(&r->head);
+       return ring_first(r);
+ }
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 6255973..7ae59f5 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -216,6 +216,6 @@ out:
+ }
+ /* Registered in net/core/dev.c */
+-struct pernet_operations __net_initdata loopback_net_ops = {
++struct pernet_operations __net_initconst loopback_net_ops = {
+        .init = loopback_net_init,
+ };
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 351e701..8b7039d 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3378,7 +3378,7 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
+-static struct rtnl_link_ops macsec_link_ops __read_mostly = {
++static struct rtnl_link_ops macsec_link_ops = {
+       .kind           = "macsec",
+       .priv_size      = sizeof(struct macsec_dev),
+       .maxtype        = IFLA_MACSEC_MAX,
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 3234fcd..954fb39 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -343,7 +343,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ free_nskb:
+       kfree_skb(nskb);
+ err:
+-      atomic_long_inc(&skb->dev->rx_dropped);
++      atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+ }
+ static void macvlan_flush_sources(struct macvlan_port *port,
+@@ -1508,13 +1508,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
+ int macvlan_link_register(struct rtnl_link_ops *ops)
+ {
+       /* common fields */
+-      ops->priv_size          = sizeof(struct macvlan_dev);
+-      ops->validate           = macvlan_validate;
+-      ops->maxtype            = IFLA_MACVLAN_MAX;
+-      ops->policy             = macvlan_policy;
+-      ops->changelink         = macvlan_changelink;
+-      ops->get_size           = macvlan_get_size;
+-      ops->fill_info          = macvlan_fill_info;
++      pax_open_kernel();
++      const_cast(ops->priv_size)      = sizeof(struct macvlan_dev);
++      const_cast(ops->validate)       = macvlan_validate;
++      const_cast(ops->maxtype)        = IFLA_MACVLAN_MAX;
++      const_cast(ops->policy)         = macvlan_policy;
++      const_cast(ops->changelink)     = macvlan_changelink;
++      const_cast(ops->get_size)       = macvlan_get_size;
++      const_cast(ops->fill_info)      = macvlan_fill_info;
++      pax_close_kernel();
+       return rtnl_link_register(ops);
+ };
+@@ -1602,7 +1604,7 @@ static int macvlan_device_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block macvlan_notifier_block __read_mostly = {
++static struct notifier_block macvlan_notifier_block = {
+       .notifier_call  = macvlan_device_event,
+ };
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 070e329..b829217 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -508,7 +508,7 @@ static void macvtap_setup(struct net_device *dev)
+       dev->tx_queue_len = TUN_READQ_SIZE;
+ }
+-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
++static struct rtnl_link_ops macvtap_link_ops = {
+       .kind           = "macvtap",
+       .setup          = macvtap_setup,
+       .newlink        = macvtap_newlink,
+@@ -1049,7 +1049,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+               ret = 0;
+               u = q->flags;
+-              if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
++              if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+                   put_user(u, &ifr->ifr_flags))
+                       ret = -EFAULT;
+               macvtap_put_vlan(vlan);
+@@ -1132,8 +1132,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+               }
+               ret = 0;
+               u = vlan->dev->type;
+-              if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+-                  copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
++              if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
++                  copy_to_user(ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
+                   put_user(u, &ifr->ifr_hwaddr.sa_family))
+                       ret = -EFAULT;
+               macvtap_put_vlan(vlan);
+@@ -1311,7 +1311,7 @@ static int macvtap_device_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block macvtap_notifier_block __read_mostly = {
++static struct notifier_block macvtap_notifier_block = {
+       .notifier_call  = macvtap_device_event,
+ };
+diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
+index 7b7c70e..a92dc83 100644
+--- a/drivers/net/nlmon.c
++++ b/drivers/net/nlmon.c
+@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
+       return 0;
+ }
+-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
++static struct rtnl_link_ops nlmon_link_ops = {
+       .kind                   = "nlmon",
+       .priv_size              = sizeof(struct nlmon),
+       .setup                  = nlmon_setup,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index e977ba9..e3df8dcd8 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -411,7 +411,7 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
+  *   zero on success.
+  *
+  */
+-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
++static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
+                          struct phy_c45_device_ids *c45_ids) {
+       int phy_reg;
+       int i, reg_addr;
+@@ -482,7 +482,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
+  *   its return value is in turn returned.
+  *
+  */
+-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
++static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
+                     bool is_c45, struct phy_c45_device_ids *c45_ids)
+ {
+       int phy_reg;
+@@ -520,7 +520,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
+ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
+ {
+       struct phy_c45_device_ids c45_ids = {0};
+-      u32 phy_id = 0;
++      int phy_id = 0;
+       int r;
+       r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
+diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
+index 9c4b41a..03da80b 100644
+--- a/drivers/net/plip/plip.c
++++ b/drivers/net/plip/plip.c
+@@ -950,7 +950,7 @@ plip_interrupt(void *dev_id)
+       spin_unlock_irqrestore(&nl->lock, flags);
+ }
+-static int
++static netdev_tx_t
+ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct net_local *nl = netdev_priv(dev);
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index f226db4..6d75edc 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1135,7 +1135,7 @@ static struct net *ppp_nl_get_link_net(const struct net_device *dev)
+       return ppp->ppp_net;
+ }
+-static struct rtnl_link_ops ppp_link_ops __read_mostly = {
++static struct rtnl_link_ops ppp_link_ops = {
+       .kind           = "ppp",
+       .maxtype        = IFLA_PPP_MAX,
+       .policy         = ppp_nl_policy,
+@@ -1253,7 +1253,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+       void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+       struct ppp_stats stats;
+       struct ppp_comp_stats cstats;
+-      char *vers;
+       switch (cmd) {
+       case SIOCGPPPSTATS:
+@@ -1275,8 +1274,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               break;
+       case SIOCGPPPVER:
+-              vers = PPP_VERSION;
+-              if (copy_to_user(addr, vers, strlen(vers) + 1))
++              if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
+                       break;
+               err = 0;
+               break;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index ae0905e..f22c8e9d 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -368,7 +368,7 @@ allow_packet:
+               }
+               skb->ip_summed = CHECKSUM_NONE;
+-              skb_set_network_header(skb, skb->head-skb->data);
++              skb->network_header = 0;
+               ppp_input(&po->chan, skb);
+               return NET_RX_SUCCESS;
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index a31f461..949a77a 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -170,7 +170,7 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
+       return 0;
+ }
+-static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       int i;
+       struct rionet_private *rnet = netdev_priv(ndev);
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 27ed252..80cffde 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -491,7 +491,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+       register struct tcphdr *thp;
+       register struct iphdr *ip;
+       register struct cstate *cs;
+-      int len, hdrlen;
++      long len, hdrlen;
+       unsigned char *cp = icp;
+       /* We've got a compressed packet; read the change byte */
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index a380649..fd8fe79c 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2135,7 +2135,7 @@ static unsigned int team_get_num_rx_queues(void)
+       return TEAM_DEFAULT_NUM_RX_QUEUES;
+ }
+-static struct rtnl_link_ops team_link_ops __read_mostly = {
++static struct rtnl_link_ops team_link_ops = {
+       .kind                   = DRV_NAME,
+       .priv_size              = sizeof(struct team),
+       .setup                  = team_setup,
+@@ -2930,7 +2930,7 @@ static int team_device_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block team_notifier_block __read_mostly = {
++static struct notifier_block team_notifier_block = {
+       .notifier_call = team_device_event,
+ };
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 6f9df37..3c37ed5 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -972,7 +972,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
+ {
+       struct tun_struct *tun = netdev_priv(dev);
+-      if (new_hr < NET_SKB_PAD)
++      if (new_hr < 0 || new_hr < NET_SKB_PAD)
+               new_hr = NET_SKB_PAD;
+       tun->align = new_hr;
+@@ -1556,7 +1556,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
+       return -EINVAL;
+ }
+-static struct rtnl_link_ops tun_link_ops __read_mostly = {
++static struct rtnl_link_ops tun_link_ops = {
+       .kind           = DRV_NAME,
+       .priv_size      = sizeof(struct tun_struct),
+       .setup          = tun_setup,
+@@ -1985,7 +1985,7 @@ unlock:
+ }
+ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+-                          unsigned long arg, int ifreq_len)
++                          unsigned long arg, size_t ifreq_len)
+ {
+       struct tun_file *tfile = file->private_data;
+       struct tun_struct *tun;
+@@ -1999,6 +1999,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+       int le;
+       int ret;
++      if (ifreq_len > sizeof ifr)
++              return -EFAULT;
++
+       if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
+               if (copy_from_user(&ifr, argp, ifreq_len))
+                       return -EFAULT;
+@@ -2514,7 +2517,7 @@ static int tun_device_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block tun_notifier_block __read_mostly = {
++static struct notifier_block tun_notifier_block = {
+       .notifier_call  = tun_device_event,
+ };
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 4b44586..ae19659 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -70,7 +70,7 @@
+ #include <asm/byteorder.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+-
++#include <asm/local.h>
+ #define MOD_AUTHOR                    "Option Wireless"
+ #define MOD_DESCRIPTION                       "USB High Speed Option driver"
+@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
+       struct urb *urb;
+       urb = serial->rx_urb[0];
+-      if (serial->port.count > 0) {
++      if (atomic_read(&serial->port.count) > 0) {
+               count = put_rxbuf_data(urb, serial);
+               if (count == -1)
+                       return;
+@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
+       DUMP1(urb->transfer_buffer, urb->actual_length);
+       /* Anyone listening? */
+-      if (serial->port.count == 0)
++      if (atomic_read(&serial->port.count) == 0)
+               return;
+       if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+@@ -1237,8 +1237,9 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
+  * This needs to be a tasklet otherwise we will
+  * end up recursively calling this function.
+  */
+-static void hso_unthrottle_tasklet(struct hso_serial *serial)
++static void hso_unthrottle_tasklet(unsigned long _serial)
+ {
++      struct hso_serial *serial = (struct hso_serial *)_serial;
+       unsigned long flags;
+       spin_lock_irqsave(&serial->serial_lock, flags);
+@@ -1282,18 +1283,17 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
+       tty_port_tty_set(&serial->port, tty);
+       /* check for port already opened, if not set the termios */
+-      serial->port.count++;
+-      if (serial->port.count == 1) {
++      if (atomic_inc_return(&serial->port.count) == 1) {
+               serial->rx_state = RX_IDLE;
+               /* Force default termio settings */
+               _hso_serial_set_termios(tty, NULL);
+               tasklet_init(&serial->unthrottle_tasklet,
+-                           (void (*)(unsigned long))hso_unthrottle_tasklet,
++                           hso_unthrottle_tasklet,
+                            (unsigned long)serial);
+               result = hso_start_serial_device(serial->parent, GFP_KERNEL);
+               if (result) {
+                       hso_stop_serial_device(serial->parent);
+-                      serial->port.count--;
++                      atomic_dec(&serial->port.count);
+               } else {
+                       kref_get(&serial->parent->ref);
+               }
+@@ -1331,10 +1331,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
+       /* reset the rts and dtr */
+       /* do the actual close */
+-      serial->port.count--;
++      atomic_dec(&serial->port.count);
+-      if (serial->port.count <= 0) {
+-              serial->port.count = 0;
++      if (atomic_read(&serial->port.count) <= 0) {
++              atomic_set(&serial->port.count, 0);
+               tty_port_tty_set(&serial->port, NULL);
+               if (!usb_gone)
+                       hso_stop_serial_device(serial->parent);
+@@ -1417,7 +1417,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+       /* the actual setup */
+       spin_lock_irqsave(&serial->serial_lock, flags);
+-      if (serial->port.count)
++      if (atomic_read(&serial->port.count))
+               _hso_serial_set_termios(tty, old);
+       else
+               tty->termios = *old;
+@@ -1891,7 +1891,7 @@ static void intr_callback(struct urb *urb)
+                               D1("Pending read interrupt on port %d\n", i);
+                               spin_lock(&serial->serial_lock);
+                               if (serial->rx_state == RX_IDLE &&
+-                                      serial->port.count > 0) {
++                                      atomic_read(&serial->port.count) > 0) {
+                                       /* Setup and send a ctrl req read on
+                                        * port i */
+                                       if (!serial->rx_urb_filled[0]) {
+@@ -3058,7 +3058,7 @@ static int hso_resume(struct usb_interface *iface)
+       /* Start all serial ports */
+       for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
+               if (serial_table[i] && (serial_table[i]->interface == iface)) {
+-                      if (dev2ser(serial_table[i])->port.count) {
++                      if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
+                               result =
+                                   hso_start_serial_device(serial_table[i], GFP_NOIO);
+                               hso_kick_transmit(dev2ser(serial_table[i]));
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 76465b1..2d72355 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -400,7 +400,7 @@ static int ipheth_close(struct net_device *net)
+       return 0;
+ }
+-static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
++static netdev_tx_t ipheth_tx(struct sk_buff *skb, struct net_device *net)
+ {
+       struct ipheth_device *dev = netdev_priv(net);
+       struct usb_device *udev = dev->udev;
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index c254248..e4a52dc 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -632,7 +632,7 @@ struct r8152 {
+               bool (*in_nway)(struct r8152 *);
+               void (*hw_phy_cfg)(struct r8152 *);
+               void (*autosuspend_en)(struct r8152 *tp, bool enable);
+-      } rtl_ops;
++      } __no_const rtl_ops;
+       int intr_interval;
+       u32 saved_wolopts;
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index a2515887..6d13233 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
+ /* atomic counter partially included in MAC address to make sure 2 devices
+  * do not end up with the same MAC - concept breaks in case of > 255 ifaces
+  */
+-static        atomic_t iface_counter = ATOMIC_INIT(0);
++static        atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
+ /*
+  * SYNC Timer Delay definition used to set the expiry time
+@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+       dev->net->netdev_ops = &sierra_net_device_ops;
+       /* change MAC addr to include, ifacenum, and to be unique */
+-      dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
++      dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
+       dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+       /* we will have to manufacture ethernet headers, prepare template */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 1b5f531..3c16c42 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
+ DECLARE_EWMA(pkt_len, 1, 64)
+ /* Minimum alignment for mergeable packet buffers. */
+-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
++#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
+ #define VIRTNET_DRIVER_VERSION "1.0.0"
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 1ce7420..8bef471 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1287,7 +1287,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
+       [IFLA_VRF_TABLE] = { .type = NLA_U32 },
+ };
+-static struct rtnl_link_ops vrf_link_ops __read_mostly = {
++static struct rtnl_link_ops vrf_link_ops = {
+       .kind           = DRV_NAME,
+       .priv_size      = sizeof(struct net_vrf),
+@@ -1324,7 +1324,7 @@ out:
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block vrf_notifier_block __read_mostly = {
++static struct notifier_block vrf_notifier_block = {
+       .notifier_call = vrf_device_event,
+ };
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 6e65832..def968c 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3169,7 +3169,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
+       return vxlan->net;
+ }
+-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
++static struct rtnl_link_ops vxlan_link_ops = {
+       .kind           = "vxlan",
+       .maxtype        = IFLA_VXLAN_MAX,
+       .policy         = vxlan_policy,
+@@ -3253,7 +3253,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block vxlan_notifier_block __read_mostly = {
++static struct notifier_block vxlan_notifier_block = {
+       .notifier_call = vxlan_netdevice_event,
+ };
+diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
+index 5920c99..ff2e4a5 100644
+--- a/drivers/net/wan/lmc/lmc_media.c
++++ b/drivers/net/wan/lmc/lmc_media.c
+@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
+ static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
+ lmc_media_t lmc_ds3_media = {
+-  lmc_ds3_init,                       /* special media init stuff */
+-  lmc_ds3_default,            /* reset to default state */
+-  lmc_ds3_set_status,         /* reset status to state provided */
+-  lmc_dummy_set_1,            /* set clock source */
+-  lmc_dummy_set2_1,           /* set line speed */
+-  lmc_ds3_set_100ft,          /* set cable length */
+-  lmc_ds3_set_scram,          /* set scrambler */
+-  lmc_ds3_get_link_status,    /* get link status */
+-  lmc_dummy_set_1,            /* set link status */
+-  lmc_ds3_set_crc_length,     /* set CRC length */
+-  lmc_dummy_set_1,            /* set T1 or E1 circuit type */
+-  lmc_ds3_watchdog
++  .init = lmc_ds3_init,                               /* special media init stuff */
++  .defaults = lmc_ds3_default,                        /* reset to default state */
++  .set_status = lmc_ds3_set_status,           /* reset status to state provided */
++  .set_clock_source = lmc_dummy_set_1,                /* set clock source */
++  .set_speed = lmc_dummy_set2_1,              /* set line speed */
++  .set_cable_length = lmc_ds3_set_100ft,      /* set cable length */
++  .set_scrambler = lmc_ds3_set_scram,         /* set scrambler */
++  .get_link_status = lmc_ds3_get_link_status, /* get link status */
++  .set_link_status = lmc_dummy_set_1,         /* set link status */
++  .set_crc_length = lmc_ds3_set_crc_length,   /* set CRC length */
++  .set_circuit_type = lmc_dummy_set_1,                /* set T1 or E1 circuit type */
++  .watchdog = lmc_ds3_watchdog
+ };
+ lmc_media_t lmc_hssi_media = {
+-  lmc_hssi_init,              /* special media init stuff */
+-  lmc_hssi_default,           /* reset to default state */
+-  lmc_hssi_set_status,                /* reset status to state provided */
+-  lmc_hssi_set_clock,         /* set clock source */
+-  lmc_dummy_set2_1,           /* set line speed */
+-  lmc_dummy_set_1,            /* set cable length */
+-  lmc_dummy_set_1,            /* set scrambler */
+-  lmc_hssi_get_link_status,   /* get link status */
+-  lmc_hssi_set_link_status,   /* set link status */
+-  lmc_hssi_set_crc_length,    /* set CRC length */
+-  lmc_dummy_set_1,            /* set T1 or E1 circuit type */
+-  lmc_hssi_watchdog
++  .init = lmc_hssi_init,                      /* special media init stuff */
++  .defaults = lmc_hssi_default,                       /* reset to default state */
++  .set_status = lmc_hssi_set_status,          /* reset status to state provided */
++  .set_clock_source = lmc_hssi_set_clock,     /* set clock source */
++  .set_speed = lmc_dummy_set2_1,              /* set line speed */
++  .set_cable_length = lmc_dummy_set_1,                /* set cable length */
++  .set_scrambler = lmc_dummy_set_1,           /* set scrambler */
++  .get_link_status = lmc_hssi_get_link_status,        /* get link status */
++  .set_link_status = lmc_hssi_set_link_status,        /* set link status */
++  .set_crc_length = lmc_hssi_set_crc_length,  /* set CRC length */
++  .set_circuit_type = lmc_dummy_set_1,                /* set T1 or E1 circuit type */
++  .watchdog = lmc_hssi_watchdog
+ };
+-lmc_media_t lmc_ssi_media = { lmc_ssi_init,   /* special media init stuff */
+-  lmc_ssi_default,            /* reset to default state */
+-  lmc_ssi_set_status,         /* reset status to state provided */
+-  lmc_ssi_set_clock,          /* set clock source */
+-  lmc_ssi_set_speed,          /* set line speed */
+-  lmc_dummy_set_1,            /* set cable length */
+-  lmc_dummy_set_1,            /* set scrambler */
+-  lmc_ssi_get_link_status,    /* get link status */
+-  lmc_ssi_set_link_status,    /* set link status */
+-  lmc_ssi_set_crc_length,     /* set CRC length */
+-  lmc_dummy_set_1,            /* set T1 or E1 circuit type */
+-  lmc_ssi_watchdog
++lmc_media_t lmc_ssi_media = {
++  .init = lmc_ssi_init,                               /* special media init stuff */
++  .defaults = lmc_ssi_default,                        /* reset to default state */
++  .set_status = lmc_ssi_set_status,           /* reset status to state provided */
++  .set_clock_source = lmc_ssi_set_clock,      /* set clock source */
++  .set_speed = lmc_ssi_set_speed,             /* set line speed */
++  .set_cable_length = lmc_dummy_set_1,                /* set cable length */
++  .set_scrambler = lmc_dummy_set_1,           /* set scrambler */
++  .get_link_status = lmc_ssi_get_link_status, /* get link status */
++  .set_link_status = lmc_ssi_set_link_status, /* set link status */
++  .set_crc_length = lmc_ssi_set_crc_length,   /* set CRC length */
++  .set_circuit_type = lmc_dummy_set_1,                /* set T1 or E1 circuit type */
++  .watchdog = lmc_ssi_watchdog
+ };
+ lmc_media_t lmc_t1_media = {
+-  lmc_t1_init,                        /* special media init stuff */
+-  lmc_t1_default,             /* reset to default state */
+-  lmc_t1_set_status,          /* reset status to state provided */
+-  lmc_t1_set_clock,           /* set clock source */
+-  lmc_dummy_set2_1,           /* set line speed */
+-  lmc_dummy_set_1,            /* set cable length */
+-  lmc_dummy_set_1,            /* set scrambler */
+-  lmc_t1_get_link_status,     /* get link status */
+-  lmc_dummy_set_1,            /* set link status */
+-  lmc_t1_set_crc_length,      /* set CRC length */
+-  lmc_t1_set_circuit_type,    /* set T1 or E1 circuit type */
+-  lmc_t1_watchdog
++  .init = lmc_t1_init,                                /* special media init stuff */
++  .defaults = lmc_t1_default,                 /* reset to default state */
++  .set_status = lmc_t1_set_status,            /* reset status to state provided */
++  .set_clock_source = lmc_t1_set_clock,               /* set clock source */
++  .set_speed = lmc_dummy_set2_1,              /* set line speed */
++  .set_cable_length = lmc_dummy_set_1,                /* set cable length */
++  .set_scrambler = lmc_dummy_set_1,           /* set scrambler */
++  .get_link_status = lmc_t1_get_link_status,  /* get link status */
++  .set_link_status = lmc_dummy_set_1,         /* set link status */
++  .set_crc_length = lmc_t1_set_crc_length,    /* set CRC length */
++  .set_circuit_type = lmc_t1_set_circuit_type,        /* set T1 or E1 circuit type */
++  .watchdog = lmc_t1_watchdog
+ };
+ static void
+diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
+index 2f0bd69..e46ed7b 100644
+--- a/drivers/net/wan/z85230.c
++++ b/drivers/net/wan/z85230.c
+@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
+ struct z8530_irqhandler z8530_sync =
+ {
+-      z8530_rx,
+-      z8530_tx,
+-      z8530_status
++      .rx = z8530_rx,
++      .tx = z8530_tx,
++      .status = z8530_status
+ };
+ EXPORT_SYMBOL(z8530_sync);
+@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
+ }
+ static struct z8530_irqhandler z8530_dma_sync = {
+-      z8530_dma_rx,
+-      z8530_dma_tx,
+-      z8530_dma_status
++      .rx = z8530_dma_rx,
++      .tx = z8530_dma_tx,
++      .status = z8530_dma_status
+ };
+ static struct z8530_irqhandler z8530_txdma_sync = {
+-      z8530_rx,
+-      z8530_dma_tx,
+-      z8530_dma_status
++      .rx = z8530_rx,
++      .tx = z8530_dma_tx,
++      .status = z8530_dma_status
+ };
+ /**
+@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
+ struct z8530_irqhandler z8530_nop=
+ {
+-      z8530_rx_clear,
+-      z8530_tx_clear,
+-      z8530_status_clear
++      .rx = z8530_rx_clear,
++      .tx = z8530_tx_clear,
++      .status = z8530_status_clear
+ };
+diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
+index 0b60295..b8bfa5b 100644
+--- a/drivers/net/wimax/i2400m/rx.c
++++ b/drivers/net/wimax/i2400m/rx.c
+@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
+               if (i2400m->rx_roq == NULL)
+                       goto error_roq_alloc;
+-              rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
++              rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
+                            GFP_KERNEL);
+               if (rd == NULL) {
+                       result = -ENOMEM;
+diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
+index da9998e..5ef101a 100644
+--- a/drivers/net/wireless/ath/ath10k/ce.c
++++ b/drivers/net/wireless/ath/ath10k/ce.c
+@@ -887,12 +887,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+       return 0;
+ }
+-static struct ath10k_ce_ring *
++static struct ath10k_ce_ring * __intentional_overflow(-1)
+ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+                        const struct ce_attr *attr)
+ {
+       struct ath10k_ce_ring *src_ring;
+-      u32 nentries = attr->src_nentries;
++      unsigned long nentries = attr->src_nentries;
+       dma_addr_t base_addr;
+       nentries = roundup_pow_of_two(nentries);
+@@ -938,7 +938,7 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+                         const struct ce_attr *attr)
+ {
+       struct ath10k_ce_ring *dest_ring;
+-      u32 nentries;
++      unsigned long nentries;
+       dma_addr_t base_addr;
+       nentries = roundup_pow_of_two(attr->dest_nentries);
+diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
+index 0c55cd9..7fc013b 100644
+--- a/drivers/net/wireless/ath/ath10k/htc.h
++++ b/drivers/net/wireless/ath/ath10k/htc.h
+@@ -269,13 +269,13 @@ enum ath10k_htc_ep_id {
+ struct ath10k_htc_ops {
+       void (*target_send_suspend_complete)(struct ath10k *ar);
+-};
++} __no_const;
+ struct ath10k_htc_ep_ops {
+       void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+       void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+       void (*ep_tx_credits)(struct ath10k *);
+-};
++} __no_const;
+ /* service connection information */
+ struct ath10k_htc_svc_conn_req {
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 146365b..b0aef36 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -7991,8 +7991,11 @@ int ath10k_mac_register(struct ath10k *ar)
+        * supports the pull-push mechanism.
+        */
+       if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+-                    ar->running_fw->fw_file.fw_features))
+-              ar->ops->wake_tx_queue = NULL;
++                    ar->running_fw->fw_file.fw_features)) {
++              pax_open_kernel();
++              const_cast(ar->ops->wake_tx_queue) = NULL;
++              pax_close_kernel();
++      }
+       ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+                           ath10k_reg_notifier);
+diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
+index ac25f17..2cb440b 100644
+--- a/drivers/net/wireless/ath/ath6kl/core.h
++++ b/drivers/net/wireless/ath/ath6kl/core.h
+@@ -915,7 +915,7 @@ void ath6kl_tx_data_cleanup(struct ath6kl *ar);
+ struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
+ void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
+-int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
++netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
+ struct aggr_info *aggr_init(struct ath6kl_vif *vif);
+ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
+diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
+index 9df41d5..fb12f17 100644
+--- a/drivers/net/wireless/ath/ath6kl/txrx.c
++++ b/drivers/net/wireless/ath/ath6kl/txrx.c
+@@ -353,7 +353,7 @@ fail_ctrl_tx:
+       return status;
+ }
+-int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
++netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_cookie *cookie = NULL;
+diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
+index f68cb00..7e16ed6 100644
+--- a/drivers/net/wireless/ath/ath9k/Kconfig
++++ b/drivers/net/wireless/ath/ath9k/Kconfig
+@@ -3,7 +3,6 @@ config ATH9K_HW
+ config ATH9K_COMMON
+       tristate
+       select ATH_COMMON
+-      select DEBUG_FS
+       select RELAY
+ config ATH9K_DFS_DEBUGFS
+       def_bool y
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+index f816909..e56cd8b 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+       ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+-      ACCESS_ONCE(ads->ds_link) = i->link;
+-      ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
++      ACCESS_ONCE_RW(ads->ds_link) = i->link;
++      ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
+       ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
+       ctl6 = SM(i->keytype, AR_EncrType);
+@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       if ((i->is_first || i->is_last) &&
+           i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
+-              ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
+                       | set11nTries(i->rates, 1)
+                       | set11nTries(i->rates, 2)
+                       | set11nTries(i->rates, 3)
+                       | (i->dur_update ? AR_DurUpdateEna : 0)
+                       | SM(0, AR_BurstDur);
+-              ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
+                       | set11nRate(i->rates, 1)
+                       | set11nRate(i->rates, 2)
+                       | set11nRate(i->rates, 3);
+       } else {
+-              ACCESS_ONCE(ads->ds_ctl2) = 0;
+-              ACCESS_ONCE(ads->ds_ctl3) = 0;
++              ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
++              ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
+       }
+       if (!i->is_first) {
+-              ACCESS_ONCE(ads->ds_ctl0) = 0;
+-              ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+-              ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++              ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
++              ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++              ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+               return;
+       }
+@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+               break;
+       }
+-      ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
++      ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+               | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+               | SM(i->txpower[0], AR_XmitPower0)
+               | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+               | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+                  (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+-      ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+-      ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++      ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++      ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+       if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
+               return;
+-      ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+               | set11nPktDurRTSCTS(i->rates, 1);
+-      ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
++      ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+               | set11nPktDurRTSCTS(i->rates, 3);
+-      ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+               | set11nRateFlags(i->rates, 1)
+               | set11nRateFlags(i->rates, 2)
+               | set11nRateFlags(i->rates, 3)
+               | SM(i->rtscts_rate, AR_RTSCTSRate);
+-      ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
+-      ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
+-      ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
++      ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
++      ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
++      ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
+ }
+ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+index da84b70..83e4978 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+             (i->qcu << AR_TxQcuNum_S) | desc_len;
+       checksum += val;
+-      ACCESS_ONCE(ads->info) = val;
++      ACCESS_ONCE_RW(ads->info) = val;
+       checksum += i->link;
+-      ACCESS_ONCE(ads->link) = i->link;
++      ACCESS_ONCE_RW(ads->link) = i->link;
+       checksum += i->buf_addr[0];
+-      ACCESS_ONCE(ads->data0) = i->buf_addr[0];
++      ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
+       checksum += i->buf_addr[1];
+-      ACCESS_ONCE(ads->data1) = i->buf_addr[1];
++      ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
+       checksum += i->buf_addr[2];
+-      ACCESS_ONCE(ads->data2) = i->buf_addr[2];
++      ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
+       checksum += i->buf_addr[3];
+-      ACCESS_ONCE(ads->data3) = i->buf_addr[3];
++      ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
+       checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl3) = val;
++      ACCESS_ONCE_RW(ads->ctl3) = val;
+       checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl5) = val;
++      ACCESS_ONCE_RW(ads->ctl5) = val;
+       checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl7) = val;
++      ACCESS_ONCE_RW(ads->ctl7) = val;
+       checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
+-      ACCESS_ONCE(ads->ctl9) = val;
++      ACCESS_ONCE_RW(ads->ctl9) = val;
+       checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
+-      ACCESS_ONCE(ads->ctl10) = checksum;
++      ACCESS_ONCE_RW(ads->ctl10) = checksum;
+       if (i->is_first || i->is_last) {
+-              ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
+                       | set11nTries(i->rates, 1)
+                       | set11nTries(i->rates, 2)
+                       | set11nTries(i->rates, 3)
+                       | (i->dur_update ? AR_DurUpdateEna : 0)
+                       | SM(0, AR_BurstDur);
+-              ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
++              ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
+                       | set11nRate(i->rates, 1)
+                       | set11nRate(i->rates, 2)
+                       | set11nRate(i->rates, 3);
+       } else {
+-              ACCESS_ONCE(ads->ctl13) = 0;
+-              ACCESS_ONCE(ads->ctl14) = 0;
++              ACCESS_ONCE_RW(ads->ctl13) = 0;
++              ACCESS_ONCE_RW(ads->ctl14) = 0;
+       }
+       ads->ctl20 = 0;
+@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       ctl17 = SM(i->keytype, AR_EncrType);
+       if (!i->is_first) {
+-              ACCESS_ONCE(ads->ctl11) = 0;
+-              ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+-              ACCESS_ONCE(ads->ctl15) = 0;
+-              ACCESS_ONCE(ads->ctl16) = 0;
+-              ACCESS_ONCE(ads->ctl17) = ctl17;
+-              ACCESS_ONCE(ads->ctl18) = 0;
+-              ACCESS_ONCE(ads->ctl19) = 0;
++              ACCESS_ONCE_RW(ads->ctl11) = 0;
++              ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
++              ACCESS_ONCE_RW(ads->ctl15) = 0;
++              ACCESS_ONCE_RW(ads->ctl16) = 0;
++              ACCESS_ONCE_RW(ads->ctl17) = ctl17;
++              ACCESS_ONCE_RW(ads->ctl18) = 0;
++              ACCESS_ONCE_RW(ads->ctl19) = 0;
+               return;
+       }
+-      ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
++      ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+               | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+               | SM(i->txpower[0], AR_XmitPower0)
+               | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+       val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
+       ctl12 |= SM(val, AR_PAPRDChainMask);
+-      ACCESS_ONCE(ads->ctl12) = ctl12;
+-      ACCESS_ONCE(ads->ctl17) = ctl17;
++      ACCESS_ONCE_RW(ads->ctl12) = ctl12;
++      ACCESS_ONCE_RW(ads->ctl17) = ctl17;
+-      ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+               | set11nPktDurRTSCTS(i->rates, 1);
+-      ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
++      ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+               | set11nPktDurRTSCTS(i->rates, 3);
+-      ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
++      ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
+               | set11nRateFlags(i->rates, 1)
+               | set11nRateFlags(i->rates, 2)
+               | set11nRateFlags(i->rates, 3)
+               | SM(i->rtscts_rate, AR_RTSCTSRate);
+-      ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
++      ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
+-      ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
+-      ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
+-      ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
++      ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
++      ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
++      ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
+ }
+ static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 2a5d3ad..59d9ad3 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -672,7 +672,7 @@ struct ath_hw_private_ops {
+ #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       bool (*is_aic_enabled)(struct ath_hw *ah);
+ #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+-};
++} __no_const;
+ /**
+  * struct ath_spec_scan - parameters for Atheros spectral scan
+@@ -748,7 +748,7 @@ struct ath_hw_ops {
+ #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
+ #endif
+-};
++} __no_const;
+ struct ath_nf_limits {
+       s16 max;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 7cb65c3..d213e2a 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -2622,16 +2622,18 @@ void ath9k_fill_chanctx_ops(void)
+       if (!ath9k_is_chanctx_enabled())
+               return;
+-      ath9k_ops.hw_scan                  = ath9k_hw_scan;
+-      ath9k_ops.cancel_hw_scan           = ath9k_cancel_hw_scan;
+-      ath9k_ops.remain_on_channel        = ath9k_remain_on_channel;
+-      ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
+-      ath9k_ops.add_chanctx              = ath9k_add_chanctx;
+-      ath9k_ops.remove_chanctx           = ath9k_remove_chanctx;
+-      ath9k_ops.change_chanctx           = ath9k_change_chanctx;
+-      ath9k_ops.assign_vif_chanctx       = ath9k_assign_vif_chanctx;
+-      ath9k_ops.unassign_vif_chanctx     = ath9k_unassign_vif_chanctx;
+-      ath9k_ops.mgd_prepare_tx           = ath9k_mgd_prepare_tx;
++      pax_open_kernel();
++      const_cast(ath9k_ops.hw_scan)                  = ath9k_hw_scan;
++      const_cast(ath9k_ops.cancel_hw_scan)           = ath9k_cancel_hw_scan;
++      const_cast(ath9k_ops.remain_on_channel)        = ath9k_remain_on_channel;
++      const_cast(ath9k_ops.cancel_remain_on_channel) = ath9k_cancel_remain_on_channel;
++      const_cast(ath9k_ops.add_chanctx)              = ath9k_add_chanctx;
++      const_cast(ath9k_ops.remove_chanctx)           = ath9k_remove_chanctx;
++      const_cast(ath9k_ops.change_chanctx)           = ath9k_change_chanctx;
++      const_cast(ath9k_ops.assign_vif_chanctx)       = ath9k_assign_vif_chanctx;
++      const_cast(ath9k_ops.unassign_vif_chanctx)     = ath9k_unassign_vif_chanctx;
++      const_cast(ath9k_ops.mgd_prepare_tx)           = ath9k_mgd_prepare_tx;
++      pax_close_kernel();
+ }
+ #endif
+diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
+index 237d0cd..6c094fd 100644
+--- a/drivers/net/wireless/ath/carl9170/carl9170.h
++++ b/drivers/net/wireless/ath/carl9170/carl9170.h
+@@ -297,7 +297,7 @@ struct ar9170 {
+       unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
+       bool needs_full_reset;
+       bool force_usb_reset;
+-      atomic_t pending_restarts;
++      atomic_unchecked_t pending_restarts;
+       /* interface mode settings */
+       struct list_head vif_list;
+@@ -400,7 +400,7 @@ struct ar9170 {
+       struct carl9170_sta_tid __rcu *tx_ampdu_iter;
+       struct list_head tx_ampdu_list;
+       atomic_t tx_ampdu_upload;
+-      atomic_t tx_ampdu_scheduler;
++      atomic_unchecked_t tx_ampdu_scheduler;
+       atomic_t tx_total_pending;
+       atomic_t tx_total_queued;
+       unsigned int tx_ampdu_list_len;
+@@ -412,7 +412,7 @@ struct ar9170 {
+       spinlock_t mem_lock;
+       unsigned long *mem_bitmap;
+       atomic_t mem_free_blocks;
+-      atomic_t mem_allocs;
++      atomic_unchecked_t mem_allocs;
+       /* rxstream mpdu merge */
+       struct ar9170_rx_head rx_plcp;
+diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
+index ec3a64e..4d4a4e2 100644
+--- a/drivers/net/wireless/ath/carl9170/debug.c
++++ b/drivers/net/wireless/ath/carl9170/debug.c
+@@ -223,7 +223,7 @@ static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf,
+       ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n",
+           bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks),
+-          ar->fw.mem_blocks, atomic_read(&ar->mem_allocs));
++          ar->fw.mem_blocks, atomic_read_unchecked(&ar->mem_allocs));
+       ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n",
+           atomic_read(&ar->mem_free_blocks),
+@@ -674,7 +674,7 @@ static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
+       ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n",
+               ar->fw.bug_counter);
+       ADD(buf, *ret, bufsize, "pending restart requests:%d\n",
+-              atomic_read(&ar->pending_restarts));
++              atomic_read_unchecked(&ar->pending_restarts));
+       return buf;
+ }
+ __DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
+@@ -781,7 +781,7 @@ DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d",
+ DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d",
+                     atomic_read(&ar->tx_total_queued));
+ DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d",
+-                    atomic_read(&ar->tx_ampdu_scheduler));
++                    atomic_read_unchecked(&ar->tx_ampdu_scheduler));
+ DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d",
+                     atomic_read(&ar->tx_total_pending));
+diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
+index ffb22a0..231c7bc 100644
+--- a/drivers/net/wireless/ath/carl9170/main.c
++++ b/drivers/net/wireless/ath/carl9170/main.c
+@@ -320,7 +320,7 @@ static void carl9170_zap_queues(struct ar9170 *ar)
+       rcu_read_unlock();
+       atomic_set(&ar->tx_ampdu_upload, 0);
+-      atomic_set(&ar->tx_ampdu_scheduler, 0);
++      atomic_set_unchecked(&ar->tx_ampdu_scheduler, 0);
+       atomic_set(&ar->tx_total_pending, 0);
+       atomic_set(&ar->tx_total_queued, 0);
+       atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
+@@ -370,7 +370,7 @@ static int carl9170_op_start(struct ieee80211_hw *hw)
+               ar->max_queue_stop_timeout[i] = 0;
+       }
+-      atomic_set(&ar->mem_allocs, 0);
++      atomic_set_unchecked(&ar->mem_allocs, 0);
+       err = carl9170_usb_open(ar);
+       if (err)
+@@ -490,7 +490,7 @@ static void carl9170_restart_work(struct work_struct *work)
+       if (!err && !ar->force_usb_reset) {
+               ar->restart_counter++;
+-              atomic_set(&ar->pending_restarts, 0);
++              atomic_set_unchecked(&ar->pending_restarts, 0);
+               ieee80211_restart_hw(ar->hw);
+       } else {
+@@ -513,7 +513,7 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
+        * By ignoring these *surplus* reset events, the device won't be
+        * killed again, right after it has recovered.
+        */
+-      if (atomic_inc_return(&ar->pending_restarts) > 1) {
++      if (atomic_inc_return_unchecked(&ar->pending_restarts) > 1) {
+               dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
+               return;
+       }
+@@ -1820,7 +1820,7 @@ void *carl9170_alloc(size_t priv_size)
+       spin_lock_init(&ar->tx_ampdu_list_lock);
+       spin_lock_init(&ar->mem_lock);
+       spin_lock_init(&ar->state_lock);
+-      atomic_set(&ar->pending_restarts, 0);
++      atomic_set_unchecked(&ar->pending_restarts, 0);
+       ar->vifs = 0;
+       for (i = 0; i < ar->hw->queues; i++) {
+               skb_queue_head_init(&ar->tx_status[i]);
+diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
+index 2bf04c9..ae05957 100644
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -193,7 +193,7 @@ static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
+       unsigned int chunks;
+       int cookie = -1;
+-      atomic_inc(&ar->mem_allocs);
++      atomic_inc_unchecked(&ar->mem_allocs);
+       chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
+       if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
+@@ -1130,7 +1130,7 @@ static void carl9170_tx_ampdu(struct ar9170 *ar)
+       unsigned int i = 0, done_ampdus = 0;
+       u16 seq, queue, tmpssn;
+-      atomic_inc(&ar->tx_ampdu_scheduler);
++      atomic_inc_unchecked(&ar->tx_ampdu_scheduler);
+       ar->tx_ampdu_schedule = false;
+       if (atomic_read(&ar->tx_ampdu_upload))
+diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
+index 7b5c422..caa69fa 100644
+--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
++++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
+@@ -159,7 +159,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+       struct wil6210_priv *wil;
+       struct device *dev = &pdev->dev;
+       int rc;
+-      const struct wil_platform_rops rops = {
++      static const struct wil_platform_rops rops = {
+               .ramdump = wil_platform_rop_ramdump,
+               .fw_recovery = wil_platform_rop_fw_recovery,
+       };
+diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
+index f8c4117..72c917e 100644
+--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
++++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
+@@ -37,7 +37,7 @@ struct wil_platform_ops {
+       int (*resume)(void *handle);
+       void (*uninit)(void *handle);
+       int (*notify)(void *handle, enum wil_platform_event evt);
+-};
++} __no_const;
+ /**
+  * struct wil_platform_rops - wil platform module callbacks from
+diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
+index 0e18067..1f7f9a2 100644
+--- a/drivers/net/wireless/atmel/at76c50x-usb.c
++++ b/drivers/net/wireless/atmel/at76c50x-usb.c
+@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
+ }
+ /* Convert timeout from the DFU status to jiffies */
+-static inline unsigned long at76_get_timeout(struct dfu_status *s)
++static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
+ {
+       return msecs_to_jiffies((s->poll_timeout[2] << 16)
+                               | (s->poll_timeout[1] << 8)
+diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
+index bf2e9a0..b55366e 100644
+--- a/drivers/net/wireless/atmel/atmel.c
++++ b/drivers/net/wireless/atmel/atmel.c
+@@ -1663,9 +1663,10 @@ EXPORT_SYMBOL(stop_atmel_card);
+ static int atmel_set_essid(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_point *dwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->essid;
+       struct atmel_private *priv = netdev_priv(dev);
+       /* Check if we asked for `any' */
+@@ -1691,9 +1692,10 @@ static int atmel_set_essid(struct net_device *dev,
+ static int atmel_get_essid(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_point *dwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->essid;
+       struct atmel_private *priv = netdev_priv(dev);
+       /* Get the current SSID */
+@@ -1712,9 +1714,10 @@ static int atmel_get_essid(struct net_device *dev,
+ static int atmel_get_wap(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct sockaddr *awrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct sockaddr *awrq = &wrqu->ap_addr;
+       struct atmel_private *priv = netdev_priv(dev);
+       memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
+       awrq->sa_family = ARPHRD_ETHER;
+@@ -1724,9 +1727,10 @@ static int atmel_get_wap(struct net_device *dev,
+ static int atmel_set_encode(struct net_device *dev,
+                           struct iw_request_info *info,
+-                          struct iw_point *dwrq,
++                          union iwreq_data *wrqu,
+                           char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->encoding;
+       struct atmel_private *priv = netdev_priv(dev);
+       /* Basic checking: do we have a key to set ?
+@@ -1813,9 +1817,10 @@ static int atmel_set_encode(struct net_device *dev,
+ static int atmel_get_encode(struct net_device *dev,
+                           struct iw_request_info *info,
+-                          struct iw_point *dwrq,
++                          union iwreq_data *wrqu,
+                           char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->encoding;
+       struct atmel_private *priv = netdev_priv(dev);
+       int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+@@ -2023,18 +2028,20 @@ static int atmel_get_auth(struct net_device *dev,
+ static int atmel_get_name(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        char *cwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      char *cwrq = wrqu->name;
+       strcpy(cwrq, "IEEE 802.11-DS");
+       return 0;
+ }
+ static int atmel_set_rate(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->bitrate;
+       struct atmel_private *priv = netdev_priv(dev);
+       if (vwrq->fixed == 0) {
+@@ -2073,9 +2080,10 @@ static int atmel_set_rate(struct net_device *dev,
+ static int atmel_set_mode(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        __u32 *uwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      __u32 *uwrq = &wrqu->mode;
+       struct atmel_private *priv = netdev_priv(dev);
+       if (*uwrq != IW_MODE_ADHOC && *uwrq != IW_MODE_INFRA)
+@@ -2087,9 +2095,10 @@ static int atmel_set_mode(struct net_device *dev,
+ static int atmel_get_mode(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        __u32 *uwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      __u32 *uwrq = &wrqu->mode;
+       struct atmel_private *priv = netdev_priv(dev);
+       *uwrq = priv->operating_mode;
+@@ -2098,9 +2107,10 @@ static int atmel_get_mode(struct net_device *dev,
+ static int atmel_get_rate(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->bitrate;
+       struct atmel_private *priv = netdev_priv(dev);
+       if (priv->auto_tx_rate) {
+@@ -2128,9 +2138,10 @@ static int atmel_get_rate(struct net_device *dev,
+ static int atmel_set_power(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_param *vwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->power;
+       struct atmel_private *priv = netdev_priv(dev);
+       priv->power_mode = vwrq->disabled ? 0 : 1;
+       return -EINPROGRESS;
+@@ -2138,9 +2149,10 @@ static int atmel_set_power(struct net_device *dev,
+ static int atmel_get_power(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_param *vwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->power;
+       struct atmel_private *priv = netdev_priv(dev);
+       vwrq->disabled = priv->power_mode ? 0 : 1;
+       vwrq->flags = IW_POWER_ON;
+@@ -2149,9 +2161,10 @@ static int atmel_get_power(struct net_device *dev,
+ static int atmel_set_retry(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_param *vwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->retry;
+       struct atmel_private *priv = netdev_priv(dev);
+       if (!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) {
+@@ -2172,9 +2185,10 @@ static int atmel_set_retry(struct net_device *dev,
+ static int atmel_get_retry(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_param *vwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->retry;
+       struct atmel_private *priv = netdev_priv(dev);
+       vwrq->disabled = 0;      /* Can't be disabled */
+@@ -2195,9 +2209,10 @@ static int atmel_get_retry(struct net_device *dev,
+ static int atmel_set_rts(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->rts;
+       struct atmel_private *priv = netdev_priv(dev);
+       int rthr = vwrq->value;
+@@ -2213,9 +2228,10 @@ static int atmel_set_rts(struct net_device *dev,
+ static int atmel_get_rts(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->rts;
+       struct atmel_private *priv = netdev_priv(dev);
+       vwrq->value = priv->rts_threshold;
+@@ -2227,9 +2243,10 @@ static int atmel_get_rts(struct net_device *dev,
+ static int atmel_set_frag(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->frag;
+       struct atmel_private *priv = netdev_priv(dev);
+       int fthr = vwrq->value;
+@@ -2246,9 +2263,10 @@ static int atmel_set_frag(struct net_device *dev,
+ static int atmel_get_frag(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->frag;
+       struct atmel_private *priv = netdev_priv(dev);
+       vwrq->value = priv->frag_threshold;
+@@ -2260,9 +2278,10 @@ static int atmel_get_frag(struct net_device *dev,
+ static int atmel_set_freq(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_freq *fwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_freq *fwrq = &wrqu->freq;
+       struct atmel_private *priv = netdev_priv(dev);
+       int rc = -EINPROGRESS;          /* Call commit handler */
+@@ -2290,9 +2309,10 @@ static int atmel_set_freq(struct net_device *dev,
+ static int atmel_get_freq(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_freq *fwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_freq *fwrq = &wrqu->freq;
+       struct atmel_private *priv = netdev_priv(dev);
+       fwrq->m = priv->channel;
+@@ -2302,7 +2322,7 @@ static int atmel_get_freq(struct net_device *dev,
+ static int atmel_set_scan(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_point *dwrq,
++                        union iwreq_data *dwrq,
+                         char *extra)
+ {
+       struct atmel_private *priv = netdev_priv(dev);
+@@ -2340,9 +2360,10 @@ static int atmel_set_scan(struct net_device *dev,
+ static int atmel_get_scan(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_point *dwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct atmel_private *priv = netdev_priv(dev);
+       int i;
+       char *current_ev = extra;
+@@ -2411,9 +2432,10 @@ static int atmel_get_scan(struct net_device *dev,
+ static int atmel_get_range(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_point *dwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct atmel_private *priv = netdev_priv(dev);
+       struct iw_range *range = (struct iw_range *) extra;
+       int k, i, j;
+@@ -2485,9 +2507,10 @@ static int atmel_get_range(struct net_device *dev,
+ static int atmel_set_wap(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct sockaddr *awrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct sockaddr *awrq = &wrqu->ap_addr;
+       struct atmel_private *priv = netdev_priv(dev);
+       int i;
+       static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+@@ -2527,7 +2550,7 @@ static int atmel_set_wap(struct net_device *dev,
+ static int atmel_config_commit(struct net_device *dev,
+                              struct iw_request_info *info,    /* NULL */
+-                             void *zwrq,                      /* NULL */
++                             union iwreq_data *zwrq,          /* NULL */
+                              char *extra)                     /* NULL */
+ {
+       return atmel_open(dev);
+@@ -2535,61 +2558,61 @@ static int atmel_config_commit(struct net_device *dev,
+ static const iw_handler atmel_handler[] =
+ {
+-      (iw_handler) atmel_config_commit,       /* SIOCSIWCOMMIT */
+-      (iw_handler) atmel_get_name,            /* SIOCGIWNAME */
+-      (iw_handler) NULL,                      /* SIOCSIWNWID */
+-      (iw_handler) NULL,                      /* SIOCGIWNWID */
+-      (iw_handler) atmel_set_freq,            /* SIOCSIWFREQ */
+-      (iw_handler) atmel_get_freq,            /* SIOCGIWFREQ */
+-      (iw_handler) atmel_set_mode,            /* SIOCSIWMODE */
+-      (iw_handler) atmel_get_mode,            /* SIOCGIWMODE */
+-      (iw_handler) NULL,                      /* SIOCSIWSENS */
+-      (iw_handler) NULL,                      /* SIOCGIWSENS */
+-      (iw_handler) NULL,                      /* SIOCSIWRANGE */
+-      (iw_handler) atmel_get_range,           /* SIOCGIWRANGE */
+-      (iw_handler) NULL,                      /* SIOCSIWPRIV */
+-      (iw_handler) NULL,                      /* SIOCGIWPRIV */
+-      (iw_handler) NULL,                      /* SIOCSIWSTATS */
+-      (iw_handler) NULL,                      /* SIOCGIWSTATS */
+-      (iw_handler) NULL,                      /* SIOCSIWSPY */
+-      (iw_handler) NULL,                      /* SIOCGIWSPY */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) atmel_set_wap,             /* SIOCSIWAP */
+-      (iw_handler) atmel_get_wap,             /* SIOCGIWAP */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* SIOCGIWAPLIST */
+-      (iw_handler) atmel_set_scan,            /* SIOCSIWSCAN */
+-      (iw_handler) atmel_get_scan,            /* SIOCGIWSCAN */
+-      (iw_handler) atmel_set_essid,           /* SIOCSIWESSID */
+-      (iw_handler) atmel_get_essid,           /* SIOCGIWESSID */
+-      (iw_handler) NULL,                      /* SIOCSIWNICKN */
+-      (iw_handler) NULL,                      /* SIOCGIWNICKN */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) atmel_set_rate,            /* SIOCSIWRATE */
+-      (iw_handler) atmel_get_rate,            /* SIOCGIWRATE */
+-      (iw_handler) atmel_set_rts,             /* SIOCSIWRTS */
+-      (iw_handler) atmel_get_rts,             /* SIOCGIWRTS */
+-      (iw_handler) atmel_set_frag,            /* SIOCSIWFRAG */
+-      (iw_handler) atmel_get_frag,            /* SIOCGIWFRAG */
+-      (iw_handler) NULL,                      /* SIOCSIWTXPOW */
+-      (iw_handler) NULL,                      /* SIOCGIWTXPOW */
+-      (iw_handler) atmel_set_retry,           /* SIOCSIWRETRY */
+-      (iw_handler) atmel_get_retry,           /* SIOCGIWRETRY */
+-      (iw_handler) atmel_set_encode,          /* SIOCSIWENCODE */
+-      (iw_handler) atmel_get_encode,          /* SIOCGIWENCODE */
+-      (iw_handler) atmel_set_power,           /* SIOCSIWPOWER */
+-      (iw_handler) atmel_get_power,           /* SIOCGIWPOWER */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* SIOCSIWGENIE */
+-      (iw_handler) NULL,                      /* SIOCGIWGENIE */
+-      (iw_handler) atmel_set_auth,            /* SIOCSIWAUTH */
+-      (iw_handler) atmel_get_auth,            /* SIOCGIWAUTH */
+-      (iw_handler) atmel_set_encodeext,       /* SIOCSIWENCODEEXT */
+-      (iw_handler) atmel_get_encodeext,       /* SIOCGIWENCODEEXT */
+-      (iw_handler) NULL,                      /* SIOCSIWPMKSA */
++      atmel_config_commit,    /* SIOCSIWCOMMIT */
++      atmel_get_name,         /* SIOCGIWNAME */
++      NULL,                   /* SIOCSIWNWID */
++      NULL,                   /* SIOCGIWNWID */
++      atmel_set_freq,         /* SIOCSIWFREQ */
++      atmel_get_freq,         /* SIOCGIWFREQ */
++      atmel_set_mode,         /* SIOCSIWMODE */
++      atmel_get_mode,         /* SIOCGIWMODE */
++      NULL,                   /* SIOCSIWSENS */
++      NULL,                   /* SIOCGIWSENS */
++      NULL,                   /* SIOCSIWRANGE */
++      atmel_get_range,           /* SIOCGIWRANGE */
++      NULL,                   /* SIOCSIWPRIV */
++      NULL,                   /* SIOCGIWPRIV */
++      NULL,                   /* SIOCSIWSTATS */
++      NULL,                   /* SIOCGIWSTATS */
++      NULL,                   /* SIOCSIWSPY */
++      NULL,                   /* SIOCGIWSPY */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* -- hole -- */
++      atmel_set_wap,          /* SIOCSIWAP */
++      atmel_get_wap,          /* SIOCGIWAP */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* SIOCGIWAPLIST */
++      atmel_set_scan,         /* SIOCSIWSCAN */
++      atmel_get_scan,         /* SIOCGIWSCAN */
++      atmel_set_essid,                /* SIOCSIWESSID */
++      atmel_get_essid,                /* SIOCGIWESSID */
++      NULL,                   /* SIOCSIWNICKN */
++      NULL,                   /* SIOCGIWNICKN */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* -- hole -- */
++      atmel_set_rate,         /* SIOCSIWRATE */
++      atmel_get_rate,         /* SIOCGIWRATE */
++      atmel_set_rts,          /* SIOCSIWRTS */
++      atmel_get_rts,          /* SIOCGIWRTS */
++      atmel_set_frag,         /* SIOCSIWFRAG */
++      atmel_get_frag,         /* SIOCGIWFRAG */
++      NULL,                   /* SIOCSIWTXPOW */
++      NULL,                   /* SIOCGIWTXPOW */
++      atmel_set_retry,                /* SIOCSIWRETRY */
++      atmel_get_retry,                /* SIOCGIWRETRY */
++      atmel_set_encode,               /* SIOCSIWENCODE */
++      atmel_get_encode,               /* SIOCGIWENCODE */
++      atmel_set_power,                /* SIOCSIWPOWER */
++      atmel_get_power,                /* SIOCGIWPOWER */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* SIOCSIWGENIE */
++      NULL,                   /* SIOCGIWGENIE */
++      atmel_set_auth,         /* SIOCSIWAUTH */
++      atmel_get_auth,         /* SIOCGIWAUTH */
++      atmel_set_encodeext,    /* SIOCSIWENCODEEXT */
++      atmel_get_encodeext,    /* SIOCGIWENCODEEXT */
++      NULL,                   /* SIOCSIWPMKSA */
+ };
+ static const iw_handler atmel_private_handler[] =
+diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
+index 6922cbb..c45026c 100644
+--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
++++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
+@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
+ {
+       struct ssb_bus *bus = dev->dev->sdev->bus;
+-      static const struct b206x_channel *chandata = NULL;
++      const struct b206x_channel *chandata = NULL;
+       u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
+       u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
+       u16 old_comm15, scale;
+diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
+index 83770d2..3ec8a40 100644
+--- a/drivers/net/wireless/broadcom/b43legacy/main.c
++++ b/drivers/net/wireless/broadcom/b43legacy/main.c
+@@ -1304,8 +1304,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
+ }
+ /* Interrupt handler bottom-half */
+-static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
++static void b43legacy_interrupt_tasklet(unsigned long _dev)
+ {
++      struct b43legacy_wldev *dev = (struct b43legacy_wldev *)_dev;
+       u32 reason;
+       u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
+       u32 merged_dma_reason = 0;
+@@ -3775,7 +3776,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
+       b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
+       wldev->bad_frames_preempt = modparam_bad_frames_preempt;
+       tasklet_init(&wldev->isr_tasklet,
+-                   (void (*)(unsigned long))b43legacy_interrupt_tasklet,
++                   b43legacy_interrupt_tasklet,
+                    (unsigned long)wldev);
+       if (modparam_pio)
+               wldev->__using_pio = true;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index abaf003..7c0fe5d 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -5230,6 +5230,50 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
+       .tdls_oper = brcmf_cfg80211_tdls_oper,
+ };
++static struct cfg80211_ops brcmf_cfg80211_ops2 = {
++      .add_virtual_intf = brcmf_cfg80211_add_iface,
++      .del_virtual_intf = brcmf_cfg80211_del_iface,
++      .change_virtual_intf = brcmf_cfg80211_change_iface,
++      .scan = brcmf_cfg80211_scan,
++      .set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
++      .join_ibss = brcmf_cfg80211_join_ibss,
++      .leave_ibss = brcmf_cfg80211_leave_ibss,
++      .get_station = brcmf_cfg80211_get_station,
++      .dump_station = brcmf_cfg80211_dump_station,
++      .set_tx_power = brcmf_cfg80211_set_tx_power,
++      .get_tx_power = brcmf_cfg80211_get_tx_power,
++      .add_key = brcmf_cfg80211_add_key,
++      .del_key = brcmf_cfg80211_del_key,
++      .get_key = brcmf_cfg80211_get_key,
++      .set_default_key = brcmf_cfg80211_config_default_key,
++      .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key,
++      .set_power_mgmt = brcmf_cfg80211_set_power_mgmt,
++      .connect = brcmf_cfg80211_connect,
++      .disconnect = brcmf_cfg80211_disconnect,
++      .suspend = brcmf_cfg80211_suspend,
++      .resume = brcmf_cfg80211_resume,
++      .set_pmksa = brcmf_cfg80211_set_pmksa,
++      .del_pmksa = brcmf_cfg80211_del_pmksa,
++      .flush_pmksa = brcmf_cfg80211_flush_pmksa,
++      .start_ap = brcmf_cfg80211_start_ap,
++      .stop_ap = brcmf_cfg80211_stop_ap,
++      .change_beacon = brcmf_cfg80211_change_beacon,
++      .del_station = brcmf_cfg80211_del_station,
++      .change_station = brcmf_cfg80211_change_station,
++      .sched_scan_start = brcmf_cfg80211_sched_scan_start,
++      .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
++      .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
++      .mgmt_tx = brcmf_cfg80211_mgmt_tx,
++      .remain_on_channel = brcmf_p2p_remain_on_channel,
++      .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
++      .start_p2p_device = brcmf_p2p_start_device,
++      .stop_p2p_device = brcmf_p2p_stop_device,
++      .crit_proto_start = brcmf_cfg80211_crit_proto_start,
++      .crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
++      .tdls_oper = brcmf_cfg80211_tdls_oper,
++      .set_rekey_data = brcmf_cfg80211_set_rekey_data,
++};
++
+ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+                                          enum nl80211_iftype type)
+ {
+@@ -6846,7 +6890,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+       struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev;
+       struct brcmf_cfg80211_info *cfg;
+       struct wiphy *wiphy;
+-      struct cfg80211_ops *ops;
++      struct cfg80211_ops *ops = &brcmf_cfg80211_ops;
+       struct brcmf_cfg80211_vif *vif;
+       struct brcmf_if *ifp;
+       s32 err = 0;
+@@ -6858,14 +6902,10 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+               return NULL;
+       }
+-      ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL);
+-      if (!ops)
+-              return NULL;
+-
+       ifp = netdev_priv(ndev);
+ #ifdef CONFIG_PM
+       if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
+-              ops->set_rekey_data = brcmf_cfg80211_set_rekey_data;
++              ops = &brcmf_cfg80211_ops2;
+ #endif
+       wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
+       if (!wiphy) {
+@@ -7004,7 +7044,6 @@ priv_out:
+       ifp->vif = NULL;
+ wiphy_out:
+       brcmf_free_wiphy(wiphy);
+-      kfree(ops);
+       return NULL;
+ }
+@@ -7015,7 +7054,6 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
+       brcmf_btcoex_detach(cfg);
+       wiphy_unregister(cfg->wiphy);
+-      kfree(cfg->ops);
+       wl_deinit_priv(cfg);
+       brcmf_free_wiphy(cfg->wiphy);
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+index 1c4e9dd..a6388e7 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+@@ -394,8 +394,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
+       return sh;
+ }
+-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
++static void wlc_phy_timercb_phycal(void *_pi)
+ {
++      struct brcms_phy *pi = _pi;
+       uint delay = 5;
+       if (PHY_PERICAL_MPHASE_PENDING(pi)) {
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+index a0de5db..b723817 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
+ }
+ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+-                                   void (*fn)(struct brcms_phy *pi),
++                                   void (*fn)(void *pi),
+                                    void *arg, const char *name)
+ {
+       return (struct wlapi_timer *)
+-                      brcms_init_timer(physhim->wl, (void (*)(void *))fn,
+-                                       arg, name);
++                      brcms_init_timer(physhim->wl, fn, arg, name);
+ }
+ void wlapi_free_timer(struct wlapi_timer *t)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+index dd87747..27d0934 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+ /* PHY to WL utility functions */
+ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+-                                   void (*fn)(struct brcms_phy *pi),
++                                   void (*fn)(void *pi),
+                                    void *arg, const char *name);
+ void wlapi_free_timer(struct wlapi_timer *t);
+ void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
+index 69b826d..669a1e0 100644
+--- a/drivers/net/wireless/cisco/airo.c
++++ b/drivers/net/wireless/cisco/airo.c
+@@ -4779,7 +4779,7 @@ static int get_dec_u16( char *buffer, int *start, int limit ) {
+ }
+ static int airo_config_commit(struct net_device *dev,
+-                            struct iw_request_info *info, void *zwrq,
++                            struct iw_request_info *info, union iwreq_data *zwrq,
+                             char *extra);
+ static inline int sniffing_mode(struct airo_info *ai)
+@@ -5766,9 +5766,11 @@ static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid)
+  */
+ static int airo_get_name(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       char *cwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      char *cwrq = wrqu->name;
++
+       strcpy(cwrq, "IEEE 802.11-DS");
+       return 0;
+ }
+@@ -5779,9 +5781,10 @@ static int airo_get_name(struct net_device *dev,
+  */
+ static int airo_set_freq(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_freq *fwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_freq *fwrq = &wrqu->freq;
+       struct airo_info *local = dev->ml_priv;
+       int rc = -EINPROGRESS;          /* Call commit handler */
+@@ -5820,9 +5823,10 @@ static int airo_set_freq(struct net_device *dev,
+  */
+ static int airo_get_freq(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_freq *fwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_freq *fwrq = &wrqu->freq;
+       struct airo_info *local = dev->ml_priv;
+       StatusRid status_rid;           /* Card status info */
+       int ch;
+@@ -5852,9 +5856,10 @@ static int airo_get_freq(struct net_device *dev,
+  */
+ static int airo_set_essid(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_point *dwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->essid;
+       struct airo_info *local = dev->ml_priv;
+       SsidRid SSID_rid;               /* SSIDs */
+@@ -5897,9 +5902,10 @@ static int airo_set_essid(struct net_device *dev,
+  */
+ static int airo_get_essid(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_point *dwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->essid;
+       struct airo_info *local = dev->ml_priv;
+       StatusRid status_rid;           /* Card status info */
+@@ -5925,9 +5931,10 @@ static int airo_get_essid(struct net_device *dev,
+  */
+ static int airo_set_wap(struct net_device *dev,
+                       struct iw_request_info *info,
+-                      struct sockaddr *awrq,
++                      union iwreq_data *wrqu,
+                       char *extra)
+ {
++      struct sockaddr *awrq = &wrqu->ap_addr;
+       struct airo_info *local = dev->ml_priv;
+       Cmd cmd;
+       Resp rsp;
+@@ -5960,9 +5967,10 @@ static int airo_set_wap(struct net_device *dev,
+  */
+ static int airo_get_wap(struct net_device *dev,
+                       struct iw_request_info *info,
+-                      struct sockaddr *awrq,
++                      union iwreq_data *wrqu,
+                       char *extra)
+ {
++      struct sockaddr *awrq = &wrqu->ap_addr;
+       struct airo_info *local = dev->ml_priv;
+       StatusRid status_rid;           /* Card status info */
+@@ -5981,9 +5989,10 @@ static int airo_get_wap(struct net_device *dev,
+  */
+ static int airo_set_nick(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_point *dwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct airo_info *local = dev->ml_priv;
+       /* Check the size of the string */
+@@ -6004,9 +6013,10 @@ static int airo_set_nick(struct net_device *dev,
+  */
+ static int airo_get_nick(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_point *dwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -6023,9 +6033,10 @@ static int airo_get_nick(struct net_device *dev,
+  */
+ static int airo_set_rate(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->bitrate;
+       struct airo_info *local = dev->ml_priv;
+       CapabilityRid cap_rid;          /* Card capability info */
+       u8      brate = 0;
+@@ -6093,9 +6104,10 @@ static int airo_set_rate(struct net_device *dev,
+  */
+ static int airo_get_rate(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->bitrate;
+       struct airo_info *local = dev->ml_priv;
+       StatusRid status_rid;           /* Card status info */
+@@ -6115,9 +6127,10 @@ static int airo_get_rate(struct net_device *dev,
+  */
+ static int airo_set_rts(struct net_device *dev,
+                       struct iw_request_info *info,
+-                      struct iw_param *vwrq,
++                      union iwreq_data *wrqu,
+                       char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->rts;
+       struct airo_info *local = dev->ml_priv;
+       int rthr = vwrq->value;
+@@ -6139,9 +6152,10 @@ static int airo_set_rts(struct net_device *dev,
+  */
+ static int airo_get_rts(struct net_device *dev,
+                       struct iw_request_info *info,
+-                      struct iw_param *vwrq,
++                      union iwreq_data *wrqu,
+                       char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->rts;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -6158,9 +6172,10 @@ static int airo_get_rts(struct net_device *dev,
+  */
+ static int airo_set_frag(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->frag;
+       struct airo_info *local = dev->ml_priv;
+       int fthr = vwrq->value;
+@@ -6183,9 +6198,10 @@ static int airo_set_frag(struct net_device *dev,
+  */
+ static int airo_get_frag(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->frag;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -6202,9 +6218,10 @@ static int airo_get_frag(struct net_device *dev,
+  */
+ static int airo_set_mode(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       __u32 *uwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      __u32 *uwrq = &wrqu->mode;
+       struct airo_info *local = dev->ml_priv;
+       int reset = 0;
+@@ -6265,9 +6282,10 @@ static int airo_set_mode(struct net_device *dev,
+  */
+ static int airo_get_mode(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       __u32 *uwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      __u32 *uwrq = &wrqu->mode;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -6300,9 +6318,10 @@ static inline int valid_index(struct airo_info *ai, int index)
+  */
+ static int airo_set_encode(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_point *dwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->encoding;
+       struct airo_info *local = dev->ml_priv;
+       int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1);
+       __le16 currentAuthType = local->config.authType;
+@@ -6399,9 +6418,10 @@ static int airo_set_encode(struct net_device *dev,
+  */
+ static int airo_get_encode(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_point *dwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->encoding;
+       struct airo_info *local = dev->ml_priv;
+       int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+       int wep_key_len;
+@@ -6746,9 +6766,10 @@ static int airo_get_auth(struct net_device *dev,
+  */
+ static int airo_set_txpow(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->txpower;
+       struct airo_info *local = dev->ml_priv;
+       CapabilityRid cap_rid;          /* Card capability info */
+       int i;
+@@ -6783,9 +6804,10 @@ static int airo_set_txpow(struct net_device *dev,
+  */
+ static int airo_get_txpow(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->txpower;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -6803,9 +6825,10 @@ static int airo_get_txpow(struct net_device *dev,
+  */
+ static int airo_set_retry(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->retry;
+       struct airo_info *local = dev->ml_priv;
+       int rc = -EINVAL;
+@@ -6841,9 +6864,10 @@ static int airo_set_retry(struct net_device *dev,
+  */
+ static int airo_get_retry(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->retry;
+       struct airo_info *local = dev->ml_priv;
+       vwrq->disabled = 0;      /* Can't be disabled */
+@@ -6872,9 +6896,10 @@ static int airo_get_retry(struct net_device *dev,
+  */
+ static int airo_get_range(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_point *dwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct airo_info *local = dev->ml_priv;
+       struct iw_range *range = (struct iw_range *) extra;
+       CapabilityRid cap_rid;          /* Card capability info */
+@@ -6998,9 +7023,10 @@ static int airo_get_range(struct net_device *dev,
+  */
+ static int airo_set_power(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->power;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -7055,9 +7081,10 @@ static int airo_set_power(struct net_device *dev,
+  */
+ static int airo_get_power(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *vwrq,
++                        union iwreq_data *wrqu,
+                         char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->power;
+       struct airo_info *local = dev->ml_priv;
+       __le16 mode;
+@@ -7086,9 +7113,10 @@ static int airo_get_power(struct net_device *dev,
+  */
+ static int airo_set_sens(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->sens;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -7105,9 +7133,10 @@ static int airo_set_sens(struct net_device *dev,
+  */
+ static int airo_get_sens(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *vwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->sens;
+       struct airo_info *local = dev->ml_priv;
+       readConfigRid(local, 1);
+@@ -7125,9 +7154,10 @@ static int airo_get_sens(struct net_device *dev,
+  */
+ static int airo_get_aplist(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_point *dwrq,
++                         union iwreq_data *wrqu,
+                          char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct airo_info *local = dev->ml_priv;
+       struct sockaddr *address = (struct sockaddr *) extra;
+       struct iw_quality *qual;
+@@ -7203,7 +7233,7 @@ static int airo_get_aplist(struct net_device *dev,
+  */
+ static int airo_set_scan(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_point *dwrq,
++                       union iwreq_data *dwrq,
+                        char *extra)
+ {
+       struct airo_info *ai = dev->ml_priv;
+@@ -7434,9 +7464,10 @@ static inline char *airo_translate_scan(struct net_device *dev,
+  */
+ static int airo_get_scan(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_point *dwrq,
++                       union iwreq_data *wrqu,
+                        char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct airo_info *ai = dev->ml_priv;
+       BSSListElement *net;
+       int err = 0;
+@@ -7478,7 +7509,7 @@ out:
+  */
+ static int airo_config_commit(struct net_device *dev,
+                             struct iw_request_info *info,     /* NULL */
+-                            void *zwrq,                       /* NULL */
++                            union iwreq_data *zwrq,           /* NULL */
+                             char *extra)                      /* NULL */
+ {
+       struct airo_info *local = dev->ml_priv;
+@@ -7528,61 +7559,61 @@ static const struct iw_priv_args airo_private_args[] = {
+ static const iw_handler               airo_handler[] =
+ {
+-      (iw_handler) airo_config_commit,        /* SIOCSIWCOMMIT */
+-      (iw_handler) airo_get_name,             /* SIOCGIWNAME */
+-      (iw_handler) NULL,                      /* SIOCSIWNWID */
+-      (iw_handler) NULL,                      /* SIOCGIWNWID */
+-      (iw_handler) airo_set_freq,             /* SIOCSIWFREQ */
+-      (iw_handler) airo_get_freq,             /* SIOCGIWFREQ */
+-      (iw_handler) airo_set_mode,             /* SIOCSIWMODE */
+-      (iw_handler) airo_get_mode,             /* SIOCGIWMODE */
+-      (iw_handler) airo_set_sens,             /* SIOCSIWSENS */
+-      (iw_handler) airo_get_sens,             /* SIOCGIWSENS */
+-      (iw_handler) NULL,                      /* SIOCSIWRANGE */
+-      (iw_handler) airo_get_range,            /* SIOCGIWRANGE */
+-      (iw_handler) NULL,                      /* SIOCSIWPRIV */
+-      (iw_handler) NULL,                      /* SIOCGIWPRIV */
+-      (iw_handler) NULL,                      /* SIOCSIWSTATS */
+-      (iw_handler) NULL,                      /* SIOCGIWSTATS */
++      airo_config_commit,     /* SIOCSIWCOMMIT */
++      airo_get_name,          /* SIOCGIWNAME */
++      NULL,                   /* SIOCSIWNWID */
++      NULL,                   /* SIOCGIWNWID */
++      airo_set_freq,          /* SIOCSIWFREQ */
++      airo_get_freq,          /* SIOCGIWFREQ */
++      airo_set_mode,          /* SIOCSIWMODE */
++      airo_get_mode,          /* SIOCGIWMODE */
++      airo_set_sens,          /* SIOCSIWSENS */
++      airo_get_sens,          /* SIOCGIWSENS */
++      NULL,                   /* SIOCSIWRANGE */
++      airo_get_range,         /* SIOCGIWRANGE */
++      NULL,                   /* SIOCSIWPRIV */
++      NULL,                   /* SIOCGIWPRIV */
++      NULL,                   /* SIOCSIWSTATS */
++      NULL,                   /* SIOCGIWSTATS */
+       iw_handler_set_spy,                     /* SIOCSIWSPY */
+       iw_handler_get_spy,                     /* SIOCGIWSPY */
+       iw_handler_set_thrspy,                  /* SIOCSIWTHRSPY */
+       iw_handler_get_thrspy,                  /* SIOCGIWTHRSPY */
+-      (iw_handler) airo_set_wap,              /* SIOCSIWAP */
+-      (iw_handler) airo_get_wap,              /* SIOCGIWAP */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) airo_get_aplist,           /* SIOCGIWAPLIST */
+-      (iw_handler) airo_set_scan,             /* SIOCSIWSCAN */
+-      (iw_handler) airo_get_scan,             /* SIOCGIWSCAN */
+-      (iw_handler) airo_set_essid,            /* SIOCSIWESSID */
+-      (iw_handler) airo_get_essid,            /* SIOCGIWESSID */
+-      (iw_handler) airo_set_nick,             /* SIOCSIWNICKN */
+-      (iw_handler) airo_get_nick,             /* SIOCGIWNICKN */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) airo_set_rate,             /* SIOCSIWRATE */
+-      (iw_handler) airo_get_rate,             /* SIOCGIWRATE */
+-      (iw_handler) airo_set_rts,              /* SIOCSIWRTS */
+-      (iw_handler) airo_get_rts,              /* SIOCGIWRTS */
+-      (iw_handler) airo_set_frag,             /* SIOCSIWFRAG */
+-      (iw_handler) airo_get_frag,             /* SIOCGIWFRAG */
+-      (iw_handler) airo_set_txpow,            /* SIOCSIWTXPOW */
+-      (iw_handler) airo_get_txpow,            /* SIOCGIWTXPOW */
+-      (iw_handler) airo_set_retry,            /* SIOCSIWRETRY */
+-      (iw_handler) airo_get_retry,            /* SIOCGIWRETRY */
+-      (iw_handler) airo_set_encode,           /* SIOCSIWENCODE */
+-      (iw_handler) airo_get_encode,           /* SIOCGIWENCODE */
+-      (iw_handler) airo_set_power,            /* SIOCSIWPOWER */
+-      (iw_handler) airo_get_power,            /* SIOCGIWPOWER */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* SIOCSIWGENIE */
+-      (iw_handler) NULL,                      /* SIOCGIWGENIE */
+-      (iw_handler) airo_set_auth,             /* SIOCSIWAUTH */
+-      (iw_handler) airo_get_auth,             /* SIOCGIWAUTH */
+-      (iw_handler) airo_set_encodeext,        /* SIOCSIWENCODEEXT */
+-      (iw_handler) airo_get_encodeext,        /* SIOCGIWENCODEEXT */
+-      (iw_handler) NULL,                      /* SIOCSIWPMKSA */
++      airo_set_wap,           /* SIOCSIWAP */
++      airo_get_wap,           /* SIOCGIWAP */
++      NULL,                   /* -- hole -- */
++      airo_get_aplist,                /* SIOCGIWAPLIST */
++      airo_set_scan,          /* SIOCSIWSCAN */
++      airo_get_scan,          /* SIOCGIWSCAN */
++      airo_set_essid,         /* SIOCSIWESSID */
++      airo_get_essid,         /* SIOCGIWESSID */
++      airo_set_nick,          /* SIOCSIWNICKN */
++      airo_get_nick,          /* SIOCGIWNICKN */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* -- hole -- */
++      airo_set_rate,          /* SIOCSIWRATE */
++      airo_get_rate,          /* SIOCGIWRATE */
++      airo_set_rts,           /* SIOCSIWRTS */
++      airo_get_rts,           /* SIOCGIWRTS */
++      airo_set_frag,          /* SIOCSIWFRAG */
++      airo_get_frag,          /* SIOCGIWFRAG */
++      airo_set_txpow,         /* SIOCSIWTXPOW */
++      airo_get_txpow,         /* SIOCGIWTXPOW */
++      airo_set_retry,         /* SIOCSIWRETRY */
++      airo_get_retry,         /* SIOCGIWRETRY */
++      airo_set_encode,                /* SIOCSIWENCODE */
++      airo_get_encode,                /* SIOCGIWENCODE */
++      airo_set_power,         /* SIOCSIWPOWER */
++      airo_get_power,         /* SIOCGIWPOWER */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* SIOCSIWGENIE */
++      NULL,                   /* SIOCGIWGENIE */
++      airo_set_auth,          /* SIOCSIWAUTH */
++      airo_get_auth,          /* SIOCGIWAUTH */
++      airo_set_encodeext,     /* SIOCSIWENCODEEXT */
++      airo_get_encodeext,     /* SIOCGIWENCODEEXT */
++      NULL,                   /* SIOCSIWPMKSA */
+ };
+ /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here.
+@@ -7845,7 +7876,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
+       struct airo_info *ai = dev->ml_priv;
+       int  ridcode;
+         int  enabled;
+-      static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
++      int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
+       unsigned char *iobuf;
+       /* Only super-user can write RIDs */
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+index bfa542c..c2488f7 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+@@ -3220,8 +3220,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
+       }
+ }
+-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
++static void ipw2100_irq_tasklet(unsigned long _priv)
+ {
++      struct ipw2100_priv *priv = (struct ipw2100_priv *)_priv;
+       struct net_device *dev = priv->net_dev;
+       unsigned long flags;
+       u32 inta, tmp;
+@@ -6029,7 +6030,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
+       spin_unlock_irqrestore(&priv->low_lock, flags);
+ }
+-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
++static void ipw2100_irq_tasklet(unsigned long _priv);
+ static const struct net_device_ops ipw2100_netdev_ops = {
+       .ndo_open               = ipw2100_open,
+@@ -6158,8 +6159,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
+       INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
+       INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
+-      tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+-                   ipw2100_irq_tasklet, (unsigned long)priv);
++      tasklet_init(&priv->irq_tasklet, ipw2100_irq_tasklet, (unsigned long)priv);
+       /* NOTE:  We do not start the deferred work for status checks yet */
+       priv->stop_rf_kill = 1;
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+index bfd6861..d09fb09 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+@@ -1968,8 +1968,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
+       wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+ }
+-static void ipw_irq_tasklet(struct ipw_priv *priv)
++static void ipw_irq_tasklet(unsigned long _priv)
+ {
++      struct ipw_priv *priv = (struct ipw_priv *)_priv;
+       u32 inta, inta_mask, handled = 0;
+       unsigned long flags;
+       int rc = 0;
+@@ -10705,8 +10706,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
+       INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
+ #endif                                /* CONFIG_IPW2200_QOS */
+-      tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+-                   ipw_irq_tasklet, (unsigned long)priv);
++      tasklet_init(&priv->irq_tasklet, ipw_irq_tasklet, (unsigned long)priv);
+       return ret;
+ }
+diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+index 466912e..a59ae61 100644
+--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+@@ -1399,8 +1399,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
+ }
+ static void
+-il3945_irq_tasklet(struct il_priv *il)
++il3945_irq_tasklet(unsigned long _il)
+ {
++      struct il_priv *il = (struct il_priv *)_il;
+       u32 inta, handled = 0;
+       u32 inta_fh;
+       unsigned long flags;
+@@ -3432,7 +3433,7 @@ il3945_setup_deferred_work(struct il_priv *il)
+       setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
+       tasklet_init(&il->irq_tasklet,
+-                   (void (*)(unsigned long))il3945_irq_tasklet,
++                   il3945_irq_tasklet,
+                    (unsigned long)il);
+ }
+@@ -3469,7 +3470,7 @@ static struct attribute_group il3945_attribute_group = {
+       .attrs = il3945_sysfs_entries,
+ };
+-static struct ieee80211_ops il3945_mac_ops __read_mostly = {
++static struct ieee80211_ops il3945_mac_ops = {
+       .tx = il3945_mac_tx,
+       .start = il3945_mac_start,
+       .stop = il3945_mac_stop,
+@@ -3633,7 +3634,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+        */
+       if (il3945_mod_params.disable_hw_scan) {
+               D_INFO("Disabling hw_scan\n");
+-              il3945_mac_ops.hw_scan = NULL;
++              pax_open_kernel();
++              const_cast(il3945_mac_ops.hw_scan) = NULL;
++              pax_close_kernel();
+       }
+       D_INFO("*** LOAD DRIVER ***\n");
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+index a91d170..4b3876a 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+@@ -4361,8 +4361,9 @@ il4965_synchronize_irq(struct il_priv *il)
+ }
+ static void
+-il4965_irq_tasklet(struct il_priv *il)
++il4965_irq_tasklet(unsigned long _il)
+ {
++      struct il_priv *il = (struct il_priv *)_il;
+       u32 inta, handled = 0;
+       u32 inta_fh;
+       unsigned long flags;
+@@ -6259,9 +6260,7 @@ il4965_setup_deferred_work(struct il_priv *il)
+       setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
+-      tasklet_init(&il->irq_tasklet,
+-                   (void (*)(unsigned long))il4965_irq_tasklet,
+-                   (unsigned long)il);
++      tasklet_init(&il->irq_tasklet, il4965_irq_tasklet, (unsigned long)il);
+ }
+ static void
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+index f6591c8..363b5b3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+@@ -190,7 +190,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[64];
+-      int buf_size;
++      size_t buf_size;
+       u32 offset, len;
+       memset(buf, 0, sizeof(buf));
+@@ -456,7 +456,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       u32 reset_flag;
+       memset(buf, 0, sizeof(buf));
+@@ -537,7 +537,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int ht40;
+       memset(buf, 0, sizeof(buf));
+@@ -589,7 +589,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int value;
+       memset(buf, 0, sizeof(buf));
+@@ -681,10 +681,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
+ DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
+ DEBUGFS_READ_FILE_OPS(current_sleep_command);
+-static const char *fmt_value = "  %-30s %10u\n";
+-static const char *fmt_hex   = "  %-30s       0x%02X\n";
+-static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
+-static const char *fmt_header =
++static const char fmt_value[] = "  %-30s %10u\n";
++static const char fmt_hex[]   = "  %-30s       0x%02X\n";
++static const char fmt_table[] = "  %-30s %10u  %10u  %10u  %10u\n";
++static const char fmt_header[] =
+       "%-32s    current  cumulative       delta         max\n";
+ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+@@ -1854,7 +1854,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int clear;
+       memset(buf, 0, sizeof(buf));
+@@ -1899,7 +1899,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int trace;
+       memset(buf, 0, sizeof(buf));
+@@ -1970,7 +1970,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int missed;
+       memset(buf, 0, sizeof(buf));
+@@ -2011,7 +2011,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int plcp;
+       memset(buf, 0, sizeof(buf));
+@@ -2071,7 +2071,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int flush;
+       memset(buf, 0, sizeof(buf));
+@@ -2161,7 +2161,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int rts;
+       if (!priv->cfg->ht_params)
+@@ -2202,7 +2202,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
+ {
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+@@ -2236,7 +2236,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       u32 event_log_flag;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       /* check that the interface is up */
+       if (!iwl_is_ready(priv))
+@@ -2290,7 +2290,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       u32 calib_disabled;
+-      int buf_size;
++      size_t buf_size;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+index 6c2d6da..4660f39 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+@@ -933,7 +933,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+                       rx_p1ks = data->tkip->rx_uni;
+-                      pn64 = atomic64_read(&key->tx_pn);
++                      pn64 = atomic64_read_unchecked(&key->tx_pn);
+                       tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+                       tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
+@@ -986,7 +986,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+                       aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+-                      pn64 = atomic64_read(&key->tx_pn);
++                      pn64 = atomic64_read_unchecked(&key->tx_pn);
+                       aes_tx_sc->pn = cpu_to_le64(pn64);
+               } else
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 4fdc3da..4f63dd9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -258,7 +258,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+                       rx_p1ks = data->tkip->rx_uni;
+-                      pn64 = atomic64_read(&key->tx_pn);
++                      pn64 = atomic64_read_unchecked(&key->tx_pn);
+                       tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+                       tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
+@@ -313,7 +313,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+                       aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+-                      pn64 = atomic64_read(&key->tx_pn);
++                      pn64 = atomic64_read_unchecked(&key->tx_pn);
+                       aes_tx_sc->pn = cpu_to_le64(pn64);
+               } else {
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+@@ -1610,12 +1610,12 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
+               case WLAN_CIPHER_SUITE_CCMP:
+                       iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
+                                              sta, key);
+-                      atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
++                      atomic64_set_unchecked(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
+                       break;
+               case WLAN_CIPHER_SUITE_TKIP:
+                       iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+                       iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+-                      atomic64_set(&key->tx_pn,
++                      atomic64_set_unchecked(&key->tx_pn,
+                                    (u64)seq.tkip.iv16 |
+                                    ((u64)seq.tkip.iv32 << 16));
+                       break;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index a0c1e3d..a3c2b98 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -385,7 +385,7 @@ static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+       u64 pn;
+-      pn = atomic64_inc_return(&keyconf->tx_pn);
++      pn = atomic64_inc_return_unchecked(&keyconf->tx_pn);
+       crypto_hdr[0] = pn;
+       crypto_hdr[2] = 0;
+       crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+@@ -418,7 +418,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+       case WLAN_CIPHER_SUITE_TKIP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+-              pn = atomic64_inc_return(&keyconf->tx_pn);
++              pn = atomic64_inc_return_unchecked(&keyconf->tx_pn);
+               ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
+               ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+               break;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 74f2f03..8436ddc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -2346,7 +2346,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+       struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       u32 reset_flag;
+       memset(buf, 0, sizeof(buf));
+@@ -2367,7 +2367,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ {
+       struct iwl_trans *trans = file->private_data;
+       char buf[8];
+-      int buf_size;
++      size_t buf_size;
+       int csr;
+       memset(buf, 0, sizeof(buf));
+diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+index 3e5fa78..6d26beb 100644
+--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
++++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+@@ -101,8 +101,9 @@ static int prism2_get_datarates(struct net_device *dev, u8 *rates)
+ static int prism2_get_name(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         char *name, char *extra)
++                         union iwreq_data *wrqu, char *extra)
+ {
++      char *name = wrqu->name;
+       u8 rates[10];
+       int len, i, over2 = 0;
+@@ -123,8 +124,9 @@ static int prism2_get_name(struct net_device *dev,
+ static int prism2_ioctl_siwencode(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_point *erq, char *keybuf)
++                                union iwreq_data *wrqu, char *keybuf)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       int i;
+@@ -225,8 +227,9 @@ static int prism2_ioctl_siwencode(struct net_device *dev,
+ static int prism2_ioctl_giwencode(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_point *erq, char *key)
++                                union iwreq_data *wrqu, char *key)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       int i, len;
+@@ -331,8 +334,9 @@ static int hostap_set_rate(struct net_device *dev)
+ static int prism2_ioctl_siwrate(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *rrq, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -391,8 +395,9 @@ static int prism2_ioctl_siwrate(struct net_device *dev,
+ static int prism2_ioctl_giwrate(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *rrq, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       u16 val;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -450,8 +455,9 @@ static int prism2_ioctl_giwrate(struct net_device *dev,
+ static int prism2_ioctl_siwsens(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *sens, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *sens = &wrqu->sens;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -471,8 +477,9 @@ static int prism2_ioctl_siwsens(struct net_device *dev,
+ static int prism2_ioctl_giwsens(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *sens, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *sens = &wrqu->sens;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       __le16 val;
+@@ -495,8 +502,9 @@ static int prism2_ioctl_giwsens(struct net_device *dev,
+ /* Deprecated in new wireless extension API */
+ static int prism2_ioctl_giwaplist(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_point *data, char *extra)
++                                union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       struct sockaddr *addr;
+@@ -536,8 +544,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
+ static int prism2_ioctl_siwrts(struct net_device *dev,
+                              struct iw_request_info *info,
+-                             struct iw_param *rts, char *extra)
++                             union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rts = &wrqu->rts;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       __le16 val;
+@@ -563,8 +572,9 @@ static int prism2_ioctl_siwrts(struct net_device *dev,
+ static int prism2_ioctl_giwrts(struct net_device *dev,
+                              struct iw_request_info *info,
+-                             struct iw_param *rts, char *extra)
++                             union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rts = &wrqu->rts;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       __le16 val;
+@@ -586,8 +596,9 @@ static int prism2_ioctl_giwrts(struct net_device *dev,
+ static int prism2_ioctl_siwfrag(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *rts, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rts = &wrqu->rts;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       __le16 val;
+@@ -613,8 +624,9 @@ static int prism2_ioctl_siwfrag(struct net_device *dev,
+ static int prism2_ioctl_giwfrag(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *rts, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rts = &wrqu->rts;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       __le16 val;
+@@ -679,11 +691,12 @@ static int hostap_join_ap(struct net_device *dev)
+ static int prism2_ioctl_siwap(struct net_device *dev,
+                             struct iw_request_info *info,
+-                            struct sockaddr *ap_addr, char *extra)
++                            union iwreq_data *wrqu, char *extra)
+ {
+ #ifdef PRISM2_NO_STATION_MODES
+       return -EOPNOTSUPP;
+ #else /* PRISM2_NO_STATION_MODES */
++      struct sockaddr *ap_addr = &wrqu->ap_addr;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -719,8 +732,9 @@ static int prism2_ioctl_siwap(struct net_device *dev,
+ static int prism2_ioctl_giwap(struct net_device *dev,
+                             struct iw_request_info *info,
+-                            struct sockaddr *ap_addr, char *extra)
++                            union iwreq_data *wrqu, char *extra)
+ {
++      struct sockaddr *ap_addr = &wrqu->ap_addr;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -755,8 +769,9 @@ static int prism2_ioctl_giwap(struct net_device *dev,
+ static int prism2_ioctl_siwnickn(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_point *data, char *nickname)
++                               union iwreq_data *wrqu, char *nickname)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -776,8 +791,9 @@ static int prism2_ioctl_siwnickn(struct net_device *dev,
+ static int prism2_ioctl_giwnickn(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_point *data, char *nickname)
++                               union iwreq_data *wrqu, char *nickname)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       int len;
+@@ -803,8 +819,9 @@ static int prism2_ioctl_giwnickn(struct net_device *dev,
+ static int prism2_ioctl_siwfreq(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_freq *freq, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_freq *freq = &wrqu->freq;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -840,8 +857,9 @@ static int prism2_ioctl_siwfreq(struct net_device *dev,
+ static int prism2_ioctl_giwfreq(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_freq *freq, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_freq *freq = &wrqu->freq;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       u16 val;
+@@ -884,8 +902,9 @@ static void hostap_monitor_set_type(local_info_t *local)
+ static int prism2_ioctl_siwessid(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_point *data, char *ssid)
++                               union iwreq_data *wrqu, char *ssid)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -920,8 +939,9 @@ static int prism2_ioctl_siwessid(struct net_device *dev,
+ static int prism2_ioctl_giwessid(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_point *data, char *essid)
++                               union iwreq_data *wrqu, char *essid)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       u16 val;
+@@ -956,8 +976,9 @@ static int prism2_ioctl_giwessid(struct net_device *dev,
+ static int prism2_ioctl_giwrange(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_point *data, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       struct iw_range *range = (struct iw_range *) extra;
+@@ -1131,8 +1152,9 @@ static int hostap_monitor_mode_disable(local_info_t *local)
+ static int prism2_ioctl_siwmode(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              __u32 *mode, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      __u32 *mode = &wrqu->mode;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       int double_reset = 0;
+@@ -1207,8 +1229,9 @@ static int prism2_ioctl_siwmode(struct net_device *dev,
+ static int prism2_ioctl_giwmode(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              __u32 *mode, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      __u32 *mode = &wrqu->mode;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -1232,11 +1255,12 @@ static int prism2_ioctl_giwmode(struct net_device *dev,
+ static int prism2_ioctl_siwpower(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *wrq, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
+ #ifdef PRISM2_NO_STATION_MODES
+       return -EOPNOTSUPP;
+ #else /* PRISM2_NO_STATION_MODES */
++      struct iw_param *wrq = &wrqu->power;
+       int ret = 0;
+       if (wrq->disabled)
+@@ -1291,11 +1315,12 @@ static int prism2_ioctl_siwpower(struct net_device *dev,
+ static int prism2_ioctl_giwpower(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *rrq, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
+ #ifdef PRISM2_NO_STATION_MODES
+       return -EOPNOTSUPP;
+ #else /* PRISM2_NO_STATION_MODES */
++      struct iw_param *rrq = &wrqu->power;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       __le16 enable, mcast;
+@@ -1349,8 +1374,9 @@ static int prism2_ioctl_giwpower(struct net_device *dev,
+ static int prism2_ioctl_siwretry(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *rrq, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -1410,8 +1436,9 @@ static int prism2_ioctl_siwretry(struct net_device *dev,
+ static int prism2_ioctl_giwretry(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *rrq, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       __le16 shortretry, longretry, lifetime, altretry;
+@@ -1504,8 +1531,9 @@ static u16 prism2_txpower_dBm_to_hfa386x(int val)
+ static int prism2_ioctl_siwtxpow(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *rrq, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct hostap_interface *iface;
+       local_info_t *local;
+ #ifdef RAW_TXPOWER_SETTING
+@@ -1585,9 +1613,10 @@ static int prism2_ioctl_siwtxpow(struct net_device *dev,
+ static int prism2_ioctl_giwtxpow(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *rrq, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
+ #ifdef RAW_TXPOWER_SETTING
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       u16 resp0;
+@@ -1720,8 +1749,9 @@ static inline int prism2_request_scan(struct net_device *dev)
+ static int prism2_ioctl_siwscan(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_point *data, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       int ret;
+@@ -2068,8 +2098,9 @@ static inline int prism2_ioctl_giwscan_sta(struct net_device *dev,
+ static int prism2_ioctl_giwscan(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_point *data, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface;
+       local_info_t *local;
+       int res;
+@@ -2314,7 +2345,7 @@ static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i)
+ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
+                                         struct iw_request_info *info,
+-                                        void *wrqu, char *extra)
++                                        union iwreq_data *wrqu, char *extra)
+ {
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -2665,7 +2696,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
+ static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
+                                             struct iw_request_info *info,
+-                                            void *wrqu, char *extra)
++                                            union iwreq_data *wrqu, char *extra)
+ {
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -2852,7 +2883,7 @@ static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
+ static int prism2_ioctl_priv_readmif(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   void *wrqu, char *extra)
++                                   union iwreq_data *wrqu, char *extra)
+ {
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -2873,7 +2904,7 @@ static int prism2_ioctl_priv_readmif(struct net_device *dev,
+ static int prism2_ioctl_priv_writemif(struct net_device *dev,
+                                     struct iw_request_info *info,
+-                                    void *wrqu, char *extra)
++                                    union iwreq_data *wrqu, char *extra)
+ {
+       struct hostap_interface *iface;
+       local_info_t *local;
+@@ -2911,7 +2942,7 @@ static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
+               /* Disable monitor mode - old mode was not saved, so go to
+                * Master mode */
+               mode = IW_MODE_MASTER;
+-              ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
++              ret = prism2_ioctl_siwmode(dev, NULL, (union iwreq_data *)&mode, NULL);
+       } else if (*i == 1) {
+               /* netlink socket mode is not supported anymore since it did
+                * not separate different devices from each other and was not
+@@ -2928,7 +2959,7 @@ static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
+                       break;
+               }
+               mode = IW_MODE_MONITOR;
+-              ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
++              ret = prism2_ioctl_siwmode(dev, NULL, (union iwreq_data *)&mode, NULL);
+               hostap_monitor_mode_enable(local);
+       } else
+               ret = -EINVAL;
+@@ -3094,8 +3125,9 @@ static int prism2_set_genericelement(struct net_device *dev, u8 *elem,
+ static int prism2_ioctl_siwauth(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *data, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *data = &wrqu->param;
+       struct hostap_interface *iface = netdev_priv(dev);
+       local_info_t *local = iface->local;
+@@ -3160,8 +3192,9 @@ static int prism2_ioctl_siwauth(struct net_device *dev,
+ static int prism2_ioctl_giwauth(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_param *data, char *extra)
++                              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *data = &wrqu->param;
+       struct hostap_interface *iface = netdev_priv(dev);
+       local_info_t *local = iface->local;
+@@ -3199,8 +3232,9 @@ static int prism2_ioctl_giwauth(struct net_device *dev,
+ static int prism2_ioctl_siwencodeext(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   struct iw_point *erq, char *extra)
++                                   union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct hostap_interface *iface = netdev_priv(dev);
+       local_info_t *local = iface->local;
+       struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
+@@ -3373,8 +3407,9 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
+ static int prism2_ioctl_giwencodeext(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   struct iw_point *erq, char *extra)
++                                   union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct hostap_interface *iface = netdev_priv(dev);
+       local_info_t *local = iface->local;
+       struct lib80211_crypt_data **crypt;
+@@ -3681,16 +3716,19 @@ static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local,
+ static int prism2_ioctl_siwgenie(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_point *data, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
++
+       return prism2_set_genericelement(dev, extra, data->length);
+ }
+ static int prism2_ioctl_giwgenie(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_point *data, char *extra)
++                               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct hostap_interface *iface = netdev_priv(dev);
+       local_info_t *local = iface->local;
+       int len = local->generic_elem_len - 2;
+@@ -3728,7 +3766,7 @@ static int prism2_ioctl_set_generic_element(local_info_t *local,
+ static int prism2_ioctl_siwmlme(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_point *data, char *extra)
++                              union iwreq_data *data, char *extra)
+ {
+       struct hostap_interface *iface = netdev_priv(dev);
+       local_info_t *local = iface->local;
+@@ -3883,70 +3921,70 @@ const struct ethtool_ops prism2_ethtool_ops = {
+ static const iw_handler prism2_handler[] =
+ {
+-      (iw_handler) NULL,                              /* SIOCSIWCOMMIT */
+-      (iw_handler) prism2_get_name,                   /* SIOCGIWNAME */
+-      (iw_handler) NULL,                              /* SIOCSIWNWID */
+-      (iw_handler) NULL,                              /* SIOCGIWNWID */
+-      (iw_handler) prism2_ioctl_siwfreq,              /* SIOCSIWFREQ */
+-      (iw_handler) prism2_ioctl_giwfreq,              /* SIOCGIWFREQ */
+-      (iw_handler) prism2_ioctl_siwmode,              /* SIOCSIWMODE */
+-      (iw_handler) prism2_ioctl_giwmode,              /* SIOCGIWMODE */
+-      (iw_handler) prism2_ioctl_siwsens,              /* SIOCSIWSENS */
+-      (iw_handler) prism2_ioctl_giwsens,              /* SIOCGIWSENS */
+-      (iw_handler) NULL /* not used */,               /* SIOCSIWRANGE */
+-      (iw_handler) prism2_ioctl_giwrange,             /* SIOCGIWRANGE */
+-      (iw_handler) NULL /* not used */,               /* SIOCSIWPRIV */
+-      (iw_handler) NULL /* kernel code */,            /* SIOCGIWPRIV */
+-      (iw_handler) NULL /* not used */,               /* SIOCSIWSTATS */
+-      (iw_handler) NULL /* kernel code */,            /* SIOCGIWSTATS */
+-      iw_handler_set_spy,                             /* SIOCSIWSPY */
+-      iw_handler_get_spy,                             /* SIOCGIWSPY */
+-      iw_handler_set_thrspy,                          /* SIOCSIWTHRSPY */
+-      iw_handler_get_thrspy,                          /* SIOCGIWTHRSPY */
+-      (iw_handler) prism2_ioctl_siwap,                /* SIOCSIWAP */
+-      (iw_handler) prism2_ioctl_giwap,                /* SIOCGIWAP */
+-      (iw_handler) prism2_ioctl_siwmlme,              /* SIOCSIWMLME */
+-      (iw_handler) prism2_ioctl_giwaplist,            /* SIOCGIWAPLIST */
+-      (iw_handler) prism2_ioctl_siwscan,              /* SIOCSIWSCAN */
+-      (iw_handler) prism2_ioctl_giwscan,              /* SIOCGIWSCAN */
+-      (iw_handler) prism2_ioctl_siwessid,             /* SIOCSIWESSID */
+-      (iw_handler) prism2_ioctl_giwessid,             /* SIOCGIWESSID */
+-      (iw_handler) prism2_ioctl_siwnickn,             /* SIOCSIWNICKN */
+-      (iw_handler) prism2_ioctl_giwnickn,             /* SIOCGIWNICKN */
+-      (iw_handler) NULL,                              /* -- hole -- */
+-      (iw_handler) NULL,                              /* -- hole -- */
+-      (iw_handler) prism2_ioctl_siwrate,              /* SIOCSIWRATE */
+-      (iw_handler) prism2_ioctl_giwrate,              /* SIOCGIWRATE */
+-      (iw_handler) prism2_ioctl_siwrts,               /* SIOCSIWRTS */
+-      (iw_handler) prism2_ioctl_giwrts,               /* SIOCGIWRTS */
+-      (iw_handler) prism2_ioctl_siwfrag,              /* SIOCSIWFRAG */
+-      (iw_handler) prism2_ioctl_giwfrag,              /* SIOCGIWFRAG */
+-      (iw_handler) prism2_ioctl_siwtxpow,             /* SIOCSIWTXPOW */
+-      (iw_handler) prism2_ioctl_giwtxpow,             /* SIOCGIWTXPOW */
+-      (iw_handler) prism2_ioctl_siwretry,             /* SIOCSIWRETRY */
+-      (iw_handler) prism2_ioctl_giwretry,             /* SIOCGIWRETRY */
+-      (iw_handler) prism2_ioctl_siwencode,            /* SIOCSIWENCODE */
+-      (iw_handler) prism2_ioctl_giwencode,            /* SIOCGIWENCODE */
+-      (iw_handler) prism2_ioctl_siwpower,             /* SIOCSIWPOWER */
+-      (iw_handler) prism2_ioctl_giwpower,             /* SIOCGIWPOWER */
+-      (iw_handler) NULL,                              /* -- hole -- */
+-      (iw_handler) NULL,                              /* -- hole -- */
+-      (iw_handler) prism2_ioctl_siwgenie,             /* SIOCSIWGENIE */
+-      (iw_handler) prism2_ioctl_giwgenie,             /* SIOCGIWGENIE */
+-      (iw_handler) prism2_ioctl_siwauth,              /* SIOCSIWAUTH */
+-      (iw_handler) prism2_ioctl_giwauth,              /* SIOCGIWAUTH */
+-      (iw_handler) prism2_ioctl_siwencodeext,         /* SIOCSIWENCODEEXT */
+-      (iw_handler) prism2_ioctl_giwencodeext,         /* SIOCGIWENCODEEXT */
+-      (iw_handler) NULL,                              /* SIOCSIWPMKSA */
+-      (iw_handler) NULL,                              /* -- hole -- */
++      NULL,                           /* SIOCSIWCOMMIT */
++      prism2_get_name,                /* SIOCGIWNAME */
++      NULL,                           /* SIOCSIWNWID */
++      NULL,                           /* SIOCGIWNWID */
++      prism2_ioctl_siwfreq,           /* SIOCSIWFREQ */
++      prism2_ioctl_giwfreq,           /* SIOCGIWFREQ */
++      prism2_ioctl_siwmode,           /* SIOCSIWMODE */
++      prism2_ioctl_giwmode,           /* SIOCGIWMODE */
++      prism2_ioctl_siwsens,           /* SIOCSIWSENS */
++      prism2_ioctl_giwsens,           /* SIOCGIWSENS */
++      NULL /* not used */,            /* SIOCSIWRANGE */
++      prism2_ioctl_giwrange,          /* SIOCGIWRANGE */
++      NULL /* not used */,            /* SIOCSIWPRIV */
++      NULL /* kernel code */,         /* SIOCGIWPRIV */
++      NULL /* not used */,            /* SIOCSIWSTATS */
++      NULL /* kernel code */,         /* SIOCGIWSTATS */
++      iw_handler_set_spy,             /* SIOCSIWSPY */
++      iw_handler_get_spy,             /* SIOCGIWSPY */
++      iw_handler_set_thrspy,          /* SIOCSIWTHRSPY */
++      iw_handler_get_thrspy,          /* SIOCGIWTHRSPY */
++      prism2_ioctl_siwap,             /* SIOCSIWAP */
++      prism2_ioctl_giwap,             /* SIOCGIWAP */
++      prism2_ioctl_siwmlme,           /* SIOCSIWMLME */
++      prism2_ioctl_giwaplist,         /* SIOCGIWAPLIST */
++      prism2_ioctl_siwscan,           /* SIOCSIWSCAN */
++      prism2_ioctl_giwscan,           /* SIOCGIWSCAN */
++      prism2_ioctl_siwessid,          /* SIOCSIWESSID */
++      prism2_ioctl_giwessid,          /* SIOCGIWESSID */
++      prism2_ioctl_siwnickn,          /* SIOCSIWNICKN */
++      prism2_ioctl_giwnickn,          /* SIOCGIWNICKN */
++      NULL,                           /* -- hole -- */
++      NULL,                           /* -- hole -- */
++      prism2_ioctl_siwrate,           /* SIOCSIWRATE */
++      prism2_ioctl_giwrate,           /* SIOCGIWRATE */
++      prism2_ioctl_siwrts,            /* SIOCSIWRTS */
++      prism2_ioctl_giwrts,            /* SIOCGIWRTS */
++      prism2_ioctl_siwfrag,           /* SIOCSIWFRAG */
++      prism2_ioctl_giwfrag,           /* SIOCGIWFRAG */
++      prism2_ioctl_siwtxpow,          /* SIOCSIWTXPOW */
++      prism2_ioctl_giwtxpow,          /* SIOCGIWTXPOW */
++      prism2_ioctl_siwretry,          /* SIOCSIWRETRY */
++      prism2_ioctl_giwretry,          /* SIOCGIWRETRY */
++      prism2_ioctl_siwencode,         /* SIOCSIWENCODE */
++      prism2_ioctl_giwencode,         /* SIOCGIWENCODE */
++      prism2_ioctl_siwpower,          /* SIOCSIWPOWER */
++      prism2_ioctl_giwpower,          /* SIOCGIWPOWER */
++      NULL,                           /* -- hole -- */
++      NULL,                           /* -- hole -- */
++      prism2_ioctl_siwgenie,          /* SIOCSIWGENIE */
++      prism2_ioctl_giwgenie,          /* SIOCGIWGENIE */
++      prism2_ioctl_siwauth,           /* SIOCSIWAUTH */
++      prism2_ioctl_giwauth,           /* SIOCGIWAUTH */
++      prism2_ioctl_siwencodeext,      /* SIOCSIWENCODEEXT */
++      prism2_ioctl_giwencodeext,      /* SIOCGIWENCODEEXT */
++      NULL,                           /* SIOCSIWPMKSA */
++      NULL,                           /* -- hole -- */
+ };
+ static const iw_handler prism2_private_handler[] =
+ {                                                     /* SIOCIWFIRSTPRIV + */
+-      (iw_handler) prism2_ioctl_priv_prism2_param,    /* 0 */
+-      (iw_handler) prism2_ioctl_priv_get_prism2_param, /* 1 */
+-      (iw_handler) prism2_ioctl_priv_writemif,        /* 2 */
+-      (iw_handler) prism2_ioctl_priv_readmif,         /* 3 */
++      prism2_ioctl_priv_prism2_param, /* 0 */
++      prism2_ioctl_priv_get_prism2_param, /* 1 */
++      prism2_ioctl_priv_writemif,     /* 2 */
++      prism2_ioctl_priv_readmif,              /* 3 */
+ };
+ const struct iw_handler_def hostap_iw_handler_def =
+@@ -3954,8 +3992,8 @@ const struct iw_handler_def hostap_iw_handler_def =
+       .num_standard   = ARRAY_SIZE(prism2_handler),
+       .num_private    = ARRAY_SIZE(prism2_private_handler),
+       .num_private_args = ARRAY_SIZE(prism2_priv),
+-      .standard       = (iw_handler *) prism2_handler,
+-      .private        = (iw_handler *) prism2_private_handler,
++      .standard       = prism2_handler,
++      .private        = prism2_private_handler,
+       .private_args   = (struct iw_priv_args *) prism2_priv,
+       .get_wireless_stats = hostap_get_wireless_stats,
+ };
+diff --git a/drivers/net/wireless/intersil/orinoco/wext.c b/drivers/net/wireless/intersil/orinoco/wext.c
+index 1d4dae4..0508fc1 100644
+--- a/drivers/net/wireless/intersil/orinoco/wext.c
++++ b/drivers/net/wireless/intersil/orinoco/wext.c
+@@ -154,9 +154,10 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
+ static int orinoco_ioctl_setwap(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct sockaddr *ap_addr,
++                              union iwreq_data *wrqu,
+                               char *extra)
+ {
++      struct sockaddr *ap_addr = &wrqu->ap_addr;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int err = -EINPROGRESS;         /* Call commit handler */
+       unsigned long flags;
+@@ -213,9 +214,10 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
+ static int orinoco_ioctl_getwap(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct sockaddr *ap_addr,
++                              union iwreq_data *wrqu,
+                               char *extra)
+ {
++      struct sockaddr *ap_addr = &wrqu->ap_addr;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int err = 0;
+@@ -234,9 +236,10 @@ static int orinoco_ioctl_getwap(struct net_device *dev,
+ static int orinoco_ioctl_setiwencode(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   struct iw_point *erq,
++                                   union iwreq_data *wrqu,
+                                    char *keybuf)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+       int setindex = priv->tx_key;
+@@ -325,9 +328,10 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
+ static int orinoco_ioctl_getiwencode(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   struct iw_point *erq,
++                                   union iwreq_data *wrqu,
+                                    char *keybuf)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+       unsigned long flags;
+@@ -361,9 +365,10 @@ static int orinoco_ioctl_getiwencode(struct net_device *dev,
+ static int orinoco_ioctl_setessid(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_point *erq,
++                                union iwreq_data *wrqu,
+                                 char *essidbuf)
+ {
++      struct iw_point *erq = &wrqu->essid;
+       struct orinoco_private *priv = ndev_priv(dev);
+       unsigned long flags;
+@@ -392,9 +397,10 @@ static int orinoco_ioctl_setessid(struct net_device *dev,
+ static int orinoco_ioctl_getessid(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_point *erq,
++                                union iwreq_data *wrqu,
+                                 char *essidbuf)
+ {
++      struct iw_point *erq = &wrqu->essid;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int active;
+       int err = 0;
+@@ -420,9 +426,10 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
+ static int orinoco_ioctl_setfreq(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_freq *frq,
++                               union iwreq_data *wrqu,
+                                char *extra)
+ {
++      struct iw_freq *frq = &wrqu->freq;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int chan = -1;
+       unsigned long flags;
+@@ -469,9 +476,10 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
+ static int orinoco_ioctl_getfreq(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_freq *frq,
++                               union iwreq_data *wrqu,
+                                char *extra)
+ {
++      struct iw_freq *frq = &wrqu->freq;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int tmp;
+@@ -488,9 +496,10 @@ static int orinoco_ioctl_getfreq(struct net_device *dev,
+ static int orinoco_ioctl_getsens(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *srq,
++                               union iwreq_data *wrqu,
+                                char *extra)
+ {
++      struct iw_param *srq = &wrqu->sens;
+       struct orinoco_private *priv = ndev_priv(dev);
+       struct hermes *hw = &priv->hw;
+       u16 val;
+@@ -517,9 +526,10 @@ static int orinoco_ioctl_getsens(struct net_device *dev,
+ static int orinoco_ioctl_setsens(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *srq,
++                               union iwreq_data *wrqu,
+                                char *extra)
+ {
++      struct iw_param *srq = &wrqu->sens;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int val = srq->value;
+       unsigned long flags;
+@@ -540,9 +550,10 @@ static int orinoco_ioctl_setsens(struct net_device *dev,
+ static int orinoco_ioctl_setrate(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *rrq,
++                               union iwreq_data *wrqu,
+                                char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int ratemode;
+       int bitrate; /* 100s of kilobits */
+@@ -574,9 +585,10 @@ static int orinoco_ioctl_setrate(struct net_device *dev,
+ static int orinoco_ioctl_getrate(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *rrq,
++                               union iwreq_data *wrqu,
+                                char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int err = 0;
+       int bitrate, automatic;
+@@ -610,9 +622,10 @@ static int orinoco_ioctl_getrate(struct net_device *dev,
+ static int orinoco_ioctl_setpower(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_param *prq,
++                                union iwreq_data *wrqu,
+                                 char *extra)
+ {
++      struct iw_param *prq = &wrqu->power;
+       struct orinoco_private *priv = ndev_priv(dev);
+       int err = -EINPROGRESS;         /* Call commit handler */
+       unsigned long flags;
+@@ -664,9 +677,10 @@ static int orinoco_ioctl_setpower(struct net_device *dev,
+ static int orinoco_ioctl_getpower(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_param *prq,
++                                union iwreq_data *wrqu,
+                                 char *extra)
+ {
++      struct iw_param *prq = &wrqu->power;
+       struct orinoco_private *priv = ndev_priv(dev);
+       struct hermes *hw = &priv->hw;
+       int err = 0;
+@@ -1097,7 +1111,7 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev,
+ static int orinoco_ioctl_reset(struct net_device *dev,
+                              struct iw_request_info *info,
+-                             void *wrqu,
++                             union iwreq_data *wrqu,
+                              char *extra)
+ {
+       struct orinoco_private *priv = ndev_priv(dev);
+@@ -1121,7 +1135,7 @@ static int orinoco_ioctl_reset(struct net_device *dev,
+ static int orinoco_ioctl_setibssport(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   void *wrqu,
++                                   union iwreq_data *wrqu,
+                                    char *extra)
+ {
+@@ -1143,7 +1157,7 @@ static int orinoco_ioctl_setibssport(struct net_device *dev,
+ static int orinoco_ioctl_getibssport(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   void *wrqu,
++                                   union iwreq_data *wrqu,
+                                    char *extra)
+ {
+       struct orinoco_private *priv = ndev_priv(dev);
+@@ -1155,7 +1169,7 @@ static int orinoco_ioctl_getibssport(struct net_device *dev,
+ static int orinoco_ioctl_setport3(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                void *wrqu,
++                                union iwreq_data *wrqu,
+                                 char *extra)
+ {
+       struct orinoco_private *priv = ndev_priv(dev);
+@@ -1201,7 +1215,7 @@ static int orinoco_ioctl_setport3(struct net_device *dev,
+ static int orinoco_ioctl_getport3(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                void *wrqu,
++                                union iwreq_data *wrqu,
+                                 char *extra)
+ {
+       struct orinoco_private *priv = ndev_priv(dev);
+@@ -1213,7 +1227,7 @@ static int orinoco_ioctl_getport3(struct net_device *dev,
+ static int orinoco_ioctl_setpreamble(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   void *wrqu,
++                                   union iwreq_data *wrqu,
+                                    char *extra)
+ {
+       struct orinoco_private *priv = ndev_priv(dev);
+@@ -1245,7 +1259,7 @@ static int orinoco_ioctl_setpreamble(struct net_device *dev,
+ static int orinoco_ioctl_getpreamble(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   void *wrqu,
++                                   union iwreq_data *wrqu,
+                                    char *extra)
+ {
+       struct orinoco_private *priv = ndev_priv(dev);
+@@ -1265,9 +1279,10 @@ static int orinoco_ioctl_getpreamble(struct net_device *dev,
+  * For Wireless Tools 25 and 26 append "dummy" are the end. */
+ static int orinoco_ioctl_getrid(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              struct iw_point *data,
++                              union iwreq_data *wrqu,
+                               char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       struct orinoco_private *priv = ndev_priv(dev);
+       struct hermes *hw = &priv->hw;
+       int rid = data->flags;
+@@ -1303,7 +1318,7 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
+ /* Commit handler, called after set operations */
+ static int orinoco_ioctl_commit(struct net_device *dev,
+                               struct iw_request_info *info,
+-                              void *wrqu,
++                              union iwreq_data *wrqu,
+                               char *extra)
+ {
+       struct orinoco_private *priv = ndev_priv(dev);
+@@ -1347,36 +1362,36 @@ static const struct iw_priv_args orinoco_privtab[] = {
+  */
+ static const iw_handler       orinoco_handler[] = {
+-      IW_HANDLER(SIOCSIWCOMMIT,       (iw_handler)orinoco_ioctl_commit),
+-      IW_HANDLER(SIOCGIWNAME,         (iw_handler)cfg80211_wext_giwname),
+-      IW_HANDLER(SIOCSIWFREQ,         (iw_handler)orinoco_ioctl_setfreq),
+-      IW_HANDLER(SIOCGIWFREQ,         (iw_handler)orinoco_ioctl_getfreq),
+-      IW_HANDLER(SIOCSIWMODE,         (iw_handler)cfg80211_wext_siwmode),
+-      IW_HANDLER(SIOCGIWMODE,         (iw_handler)cfg80211_wext_giwmode),
+-      IW_HANDLER(SIOCSIWSENS,         (iw_handler)orinoco_ioctl_setsens),
+-      IW_HANDLER(SIOCGIWSENS,         (iw_handler)orinoco_ioctl_getsens),
+-      IW_HANDLER(SIOCGIWRANGE,        (iw_handler)cfg80211_wext_giwrange),
++      IW_HANDLER(SIOCSIWCOMMIT,       orinoco_ioctl_commit),
++      IW_HANDLER(SIOCGIWNAME,         cfg80211_wext_giwname),
++      IW_HANDLER(SIOCSIWFREQ,         orinoco_ioctl_setfreq),
++      IW_HANDLER(SIOCGIWFREQ,         orinoco_ioctl_getfreq),
++      IW_HANDLER(SIOCSIWMODE,         cfg80211_wext_siwmode),
++      IW_HANDLER(SIOCGIWMODE,         cfg80211_wext_giwmode),
++      IW_HANDLER(SIOCSIWSENS,         orinoco_ioctl_setsens),
++      IW_HANDLER(SIOCGIWSENS,         orinoco_ioctl_getsens),
++      IW_HANDLER(SIOCGIWRANGE,        cfg80211_wext_giwrange),
+       IW_HANDLER(SIOCSIWSPY,          iw_handler_set_spy),
+       IW_HANDLER(SIOCGIWSPY,          iw_handler_get_spy),
+       IW_HANDLER(SIOCSIWTHRSPY,       iw_handler_set_thrspy),
+       IW_HANDLER(SIOCGIWTHRSPY,       iw_handler_get_thrspy),
+-      IW_HANDLER(SIOCSIWAP,           (iw_handler)orinoco_ioctl_setwap),
+-      IW_HANDLER(SIOCGIWAP,           (iw_handler)orinoco_ioctl_getwap),
+-      IW_HANDLER(SIOCSIWSCAN,         (iw_handler)cfg80211_wext_siwscan),
+-      IW_HANDLER(SIOCGIWSCAN,         (iw_handler)cfg80211_wext_giwscan),
+-      IW_HANDLER(SIOCSIWESSID,        (iw_handler)orinoco_ioctl_setessid),
+-      IW_HANDLER(SIOCGIWESSID,        (iw_handler)orinoco_ioctl_getessid),
+-      IW_HANDLER(SIOCSIWRATE,         (iw_handler)orinoco_ioctl_setrate),
+-      IW_HANDLER(SIOCGIWRATE,         (iw_handler)orinoco_ioctl_getrate),
+-      IW_HANDLER(SIOCSIWRTS,          (iw_handler)cfg80211_wext_siwrts),
+-      IW_HANDLER(SIOCGIWRTS,          (iw_handler)cfg80211_wext_giwrts),
+-      IW_HANDLER(SIOCSIWFRAG,         (iw_handler)cfg80211_wext_siwfrag),
+-      IW_HANDLER(SIOCGIWFRAG,         (iw_handler)cfg80211_wext_giwfrag),
+-      IW_HANDLER(SIOCGIWRETRY,        (iw_handler)cfg80211_wext_giwretry),
+-      IW_HANDLER(SIOCSIWENCODE,       (iw_handler)orinoco_ioctl_setiwencode),
+-      IW_HANDLER(SIOCGIWENCODE,       (iw_handler)orinoco_ioctl_getiwencode),
+-      IW_HANDLER(SIOCSIWPOWER,        (iw_handler)orinoco_ioctl_setpower),
+-      IW_HANDLER(SIOCGIWPOWER,        (iw_handler)orinoco_ioctl_getpower),
++      IW_HANDLER(SIOCSIWAP,           orinoco_ioctl_setwap),
++      IW_HANDLER(SIOCGIWAP,           orinoco_ioctl_getwap),
++      IW_HANDLER(SIOCSIWSCAN,         cfg80211_wext_siwscan),
++      IW_HANDLER(SIOCGIWSCAN,         cfg80211_wext_giwscan),
++      IW_HANDLER(SIOCSIWESSID,        orinoco_ioctl_setessid),
++      IW_HANDLER(SIOCGIWESSID,        orinoco_ioctl_getessid),
++      IW_HANDLER(SIOCSIWRATE,         orinoco_ioctl_setrate),
++      IW_HANDLER(SIOCGIWRATE,         orinoco_ioctl_getrate),
++      IW_HANDLER(SIOCSIWRTS,          cfg80211_wext_siwrts),
++      IW_HANDLER(SIOCGIWRTS,          cfg80211_wext_giwrts),
++      IW_HANDLER(SIOCSIWFRAG,         cfg80211_wext_siwfrag),
++      IW_HANDLER(SIOCGIWFRAG,         cfg80211_wext_giwfrag),
++      IW_HANDLER(SIOCGIWRETRY,        cfg80211_wext_giwretry),
++      IW_HANDLER(SIOCSIWENCODE,       orinoco_ioctl_setiwencode),
++      IW_HANDLER(SIOCGIWENCODE,       orinoco_ioctl_getiwencode),
++      IW_HANDLER(SIOCSIWPOWER,        orinoco_ioctl_setpower),
++      IW_HANDLER(SIOCGIWPOWER,        orinoco_ioctl_getpower),
+       IW_HANDLER(SIOCSIWGENIE,        orinoco_ioctl_set_genie),
+       IW_HANDLER(SIOCGIWGENIE,        orinoco_ioctl_get_genie),
+       IW_HANDLER(SIOCSIWMLME,         orinoco_ioctl_set_mlme),
+@@ -1391,15 +1406,15 @@ static const iw_handler        orinoco_handler[] = {
+   Added typecasting since we no longer use iwreq_data -- Moustafa
+  */
+ static const iw_handler       orinoco_private_handler[] = {
+-      [0] = (iw_handler)orinoco_ioctl_reset,
+-      [1] = (iw_handler)orinoco_ioctl_reset,
+-      [2] = (iw_handler)orinoco_ioctl_setport3,
+-      [3] = (iw_handler)orinoco_ioctl_getport3,
+-      [4] = (iw_handler)orinoco_ioctl_setpreamble,
+-      [5] = (iw_handler)orinoco_ioctl_getpreamble,
+-      [6] = (iw_handler)orinoco_ioctl_setibssport,
+-      [7] = (iw_handler)orinoco_ioctl_getibssport,
+-      [9] = (iw_handler)orinoco_ioctl_getrid,
++      [0] = orinoco_ioctl_reset,
++      [1] = orinoco_ioctl_reset,
++      [2] = orinoco_ioctl_setport3,
++      [3] = orinoco_ioctl_getport3,
++      [4] = orinoco_ioctl_setpreamble,
++      [5] = orinoco_ioctl_getpreamble,
++      [6] = orinoco_ioctl_setibssport,
++      [7] = orinoco_ioctl_getibssport,
++      [9] = orinoco_ioctl_getrid,
+ };
+ const struct iw_handler_def orinoco_handler_def = {
+diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
+index 48e8a97..3499ec8 100644
+--- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c
++++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
+@@ -45,7 +45,7 @@ static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
+                               u8 *wpa_ie, size_t wpa_ie_len);
+ static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);
+ static int prism54_set_wpa(struct net_device *, struct iw_request_info *,
+-                              __u32 *, char *);
++                              union iwreq_data *, char *);
+ /* In 500 kbps */
+ static const unsigned char scan_rate_list[] = { 2, 4, 11, 22,
+@@ -240,7 +240,7 @@ prism54_get_wireless_stats(struct net_device *ndev)
+ static int
+ prism54_commit(struct net_device *ndev, struct iw_request_info *info,
+-             char *cwrq, char *extra)
++             union iwreq_data *cwrq, char *extra)
+ {
+       islpci_private *priv = netdev_priv(ndev);
+@@ -256,8 +256,9 @@ prism54_commit(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_name(struct net_device *ndev, struct iw_request_info *info,
+-               char *cwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      char *cwrq = wrqu->name;
+       islpci_private *priv = netdev_priv(ndev);
+       char *capabilities;
+       union oid_res_t r;
+@@ -287,8 +288,9 @@ prism54_get_name(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_freq(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_freq *fwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_freq *fwrq = &wrqu->freq;
+       islpci_private *priv = netdev_priv(ndev);
+       int rvalue;
+       u32 c;
+@@ -307,8 +309,9 @@ prism54_set_freq(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_freq(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_freq *fwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_freq *fwrq = &wrqu->freq;
+       islpci_private *priv = netdev_priv(ndev);
+       union oid_res_t r;
+       int rvalue;
+@@ -324,8 +327,9 @@ prism54_get_freq(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_mode(struct net_device *ndev, struct iw_request_info *info,
+-               __u32 * uwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      __u32 *uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       u32 mlmeautolevel = CARD_DEFAULT_MLME_MODE;
+@@ -368,8 +372,9 @@ prism54_set_mode(struct net_device *ndev, struct iw_request_info *info,
+ /* Use mib cache */
+ static int
+ prism54_get_mode(struct net_device *ndev, struct iw_request_info *info,
+-               __u32 * uwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      __u32 *uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       BUG_ON((priv->iw_mode < IW_MODE_AUTO) || (priv->iw_mode >
+@@ -386,8 +391,9 @@ prism54_get_mode(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_sens(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_param *vwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->sens;
+       islpci_private *priv = netdev_priv(ndev);
+       u32 sens;
+@@ -399,8 +405,9 @@ prism54_set_sens(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_sens(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_param *vwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->sens;
+       islpci_private *priv = netdev_priv(ndev);
+       union oid_res_t r;
+       int rvalue;
+@@ -416,8 +423,9 @@ prism54_get_sens(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
+-                struct iw_point *dwrq, char *extra)
++                union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       struct iw_range *range = (struct iw_range *) extra;
+       islpci_private *priv = netdev_priv(ndev);
+       u8 *data;
+@@ -521,8 +529,9 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
+-              struct sockaddr *awrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      struct sockaddr *awrq = &wrqu->ap_addr;
+       islpci_private *priv = netdev_priv(ndev);
+       char bssid[6];
+       int rvalue;
+@@ -543,8 +552,9 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
+-              struct sockaddr *awrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      struct sockaddr *awrq = &wrqu->ap_addr;
+       islpci_private *priv = netdev_priv(ndev);
+       union oid_res_t r;
+       int rvalue;
+@@ -559,7 +569,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_scan(struct net_device *dev, struct iw_request_info *info,
+-               struct iw_param *vwrq, char *extra)
++               union iwreq_data *vwrq, char *extra)
+ {
+       /* hehe the device does this automagicaly */
+       return 0;
+@@ -679,8 +689,9 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_point *dwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       int i, rvalue;
+       struct obj_bsslist *bsslist;
+@@ -733,8 +744,9 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_essid(struct net_device *ndev, struct iw_request_info *info,
+-                struct iw_point *dwrq, char *extra)
++                union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       struct obj_ssid essid;
+@@ -760,8 +772,9 @@ prism54_set_essid(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
+-                struct iw_point *dwrq, char *extra)
++                union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       struct obj_ssid *essid;
+       union oid_res_t r;
+@@ -790,8 +803,9 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
+  */
+ static int
+ prism54_set_nick(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_point *dwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       if (dwrq->length > IW_ESSID_MAX_SIZE)
+@@ -807,8 +821,9 @@ prism54_set_nick(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_nick(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_point *dwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       dwrq->length = 0;
+@@ -826,9 +841,9 @@ prism54_get_nick(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_rate(struct net_device *ndev,
+                struct iw_request_info *info,
+-               struct iw_param *vwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
+-
++      struct iw_param *vwrq = &wrqu->bitrate;
+       islpci_private *priv = netdev_priv(ndev);
+       u32 rate, profile;
+       char *data;
+@@ -899,8 +914,9 @@ prism54_set_rate(struct net_device *ndev,
+ static int
+ prism54_get_rate(struct net_device *ndev,
+                struct iw_request_info *info,
+-               struct iw_param *vwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->bitrate;
+       islpci_private *priv = netdev_priv(ndev);
+       int rvalue;
+       char *data;
+@@ -926,8 +942,9 @@ prism54_get_rate(struct net_device *ndev,
+ static int
+ prism54_set_rts(struct net_device *ndev, struct iw_request_info *info,
+-              struct iw_param *vwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->rts;
+       islpci_private *priv = netdev_priv(ndev);
+       return mgt_set_request(priv, DOT11_OID_RTSTHRESH, 0, &vwrq->value);
+@@ -935,8 +952,9 @@ prism54_set_rts(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_rts(struct net_device *ndev, struct iw_request_info *info,
+-              struct iw_param *vwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->rts;
+       islpci_private *priv = netdev_priv(ndev);
+       union oid_res_t r;
+       int rvalue;
+@@ -950,8 +968,9 @@ prism54_get_rts(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_frag(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_param *vwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->frag;
+       islpci_private *priv = netdev_priv(ndev);
+       return mgt_set_request(priv, DOT11_OID_FRAGTHRESH, 0, &vwrq->value);
+@@ -959,8 +978,9 @@ prism54_set_frag(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_frag(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_param *vwrq, char *extra)
++               union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->frag;
+       islpci_private *priv = netdev_priv(ndev);
+       union oid_res_t r;
+       int rvalue;
+@@ -980,8 +1000,9 @@ prism54_get_frag(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_retry(struct net_device *ndev, struct iw_request_info *info,
+-                struct iw_param *vwrq, char *extra)
++                union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->retry;
+       islpci_private *priv = netdev_priv(ndev);
+       u32 slimit = 0, llimit = 0;     /* short and long limit */
+       u32 lifetime = 0;
+@@ -1022,8 +1043,9 @@ prism54_set_retry(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_retry(struct net_device *ndev, struct iw_request_info *info,
+-                struct iw_param *vwrq, char *extra)
++                union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->retry;
+       islpci_private *priv = netdev_priv(ndev);
+       union oid_res_t r;
+       int rvalue = 0;
+@@ -1054,8 +1076,9 @@ prism54_get_retry(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
+-                 struct iw_point *dwrq, char *extra)
++                 union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       int rvalue = 0, force = 0;
+       int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
+@@ -1155,8 +1178,9 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_encode(struct net_device *ndev, struct iw_request_info *info,
+-                 struct iw_point *dwrq, char *extra)
++                 union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       struct obj_key *key;
+       u32 devindex, index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+@@ -1203,8 +1227,9 @@ prism54_get_encode(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info,
+-                  struct iw_param *vwrq, char *extra)
++                  union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->txpower;
+       islpci_private *priv = netdev_priv(ndev);
+       union oid_res_t r;
+       int rvalue;
+@@ -1223,8 +1248,9 @@ prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
+-                  struct iw_param *vwrq, char *extra)
++                  union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->txpower;
+       islpci_private *priv = netdev_priv(ndev);
+       s32 u = vwrq->value;
+@@ -1249,8 +1275,9 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
+ static int prism54_set_genie(struct net_device *ndev,
+                            struct iw_request_info *info,
+-                           struct iw_point *data, char *extra)
++                           union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       int alen, ret = 0;
+       struct obj_attachment *attach;
+@@ -1298,8 +1325,9 @@ static int prism54_set_genie(struct net_device *ndev,
+ static int prism54_get_genie(struct net_device *ndev,
+                            struct iw_request_info *info,
+-                           struct iw_point *data, char *extra)
++                           union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       int len = priv->wpa_ie_len;
+@@ -1739,7 +1767,7 @@ out:
+ static int
+ prism54_reset(struct net_device *ndev, struct iw_request_info *info,
+-            __u32 * uwrq, char *extra)
++            union iwreq_data * uwrq, char *extra)
+ {
+       islpci_reset(netdev_priv(ndev), 0);
+@@ -1748,8 +1776,9 @@ prism54_reset(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_oid(struct net_device *ndev, struct iw_request_info *info,
+-              struct iw_point *dwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       union oid_res_t r;
+       int rvalue;
+       enum oid_num_t n = dwrq->flags;
+@@ -1763,8 +1792,9 @@ prism54_get_oid(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_u32(struct net_device *ndev, struct iw_request_info *info,
+-              __u32 * uwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       u32 oid = uwrq[0], u = uwrq[1];
+       return mgt_set_request(netdev_priv(ndev), oid, 0, &u);
+@@ -1772,8 +1802,9 @@ prism54_set_u32(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_raw(struct net_device *ndev, struct iw_request_info *info,
+-              struct iw_point *dwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       u32 oid = dwrq->flags;
+       return mgt_set_request(netdev_priv(ndev), oid, 0, extra);
+@@ -1819,7 +1850,7 @@ prism54_acl_clean(struct islpci_acl *acl)
+ static int
+ prism54_add_mac(struct net_device *ndev, struct iw_request_info *info,
+-              struct sockaddr *awrq, char *extra)
++              union iwreq_data *awrq, char *extra)
+ {
+       islpci_private *priv = netdev_priv(ndev);
+       struct islpci_acl *acl = &priv->acl;
+@@ -1848,7 +1879,7 @@ prism54_add_mac(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
+-              struct sockaddr *awrq, char *extra)
++              union iwreq_data *awrq, char *extra)
+ {
+       islpci_private *priv = netdev_priv(ndev);
+       struct islpci_acl *acl = &priv->acl;
+@@ -1875,8 +1906,9 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
+-              struct iw_point *dwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *dwrq = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       struct islpci_acl *acl = &priv->acl;
+       struct mac_entry *entry;
+@@ -1903,8 +1935,9 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_policy(struct net_device *ndev, struct iw_request_info *info,
+-                 __u32 * uwrq, char *extra)
++                 union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       struct islpci_acl *acl = &priv->acl;
+       u32 mlmeautolevel;
+@@ -1939,8 +1972,9 @@ prism54_set_policy(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_policy(struct net_device *ndev, struct iw_request_info *info,
+-                 __u32 * uwrq, char *extra)
++                 union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       struct islpci_acl *acl = &priv->acl;
+@@ -1979,7 +2013,7 @@ prism54_mac_accept(struct islpci_acl *acl, char *mac)
+ static int
+ prism54_kick_all(struct net_device *ndev, struct iw_request_info *info,
+-               struct iw_point *dwrq, char *extra)
++               union iwreq_data *dwrq, char *extra)
+ {
+       struct obj_mlme *mlme;
+       int rvalue;
+@@ -1999,7 +2033,7 @@ prism54_kick_all(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_kick_mac(struct net_device *ndev, struct iw_request_info *info,
+-               struct sockaddr *awrq, char *extra)
++               union iwreq_data *awrq, char *extra)
+ {
+       struct obj_mlme *mlme;
+       struct sockaddr *addr = (struct sockaddr *) extra;
+@@ -2085,8 +2119,7 @@ link_changed(struct net_device *ndev, u32 bitrate)
+               netif_carrier_on(ndev);
+               if (priv->iw_mode == IW_MODE_INFRA) {
+                       union iwreq_data uwrq;
+-                      prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq,
+-                                      NULL);
++                      prism54_get_wap(ndev, NULL, &uwrq, NULL);
+                       wireless_send_event(ndev, SIOCGIWAP, &uwrq, NULL);
+               } else
+                       send_simple_event(netdev_priv(ndev),
+@@ -2498,8 +2531,9 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
+ static int
+ prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
+-              __u32 * uwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       u32 mlme, authen, dot1x, filter, wep;
+@@ -2542,8 +2576,9 @@ prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info,
+-              __u32 * uwrq, char *extra)
++              union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       *uwrq = priv->wpa;
+       return 0;
+@@ -2551,8 +2586,9 @@ prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info,
+-                   __u32 * uwrq, char *extra)
++                  union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       priv->monitor_type =
+           (*uwrq ? ARPHRD_IEEE80211_PRISM : ARPHRD_IEEE80211);
+@@ -2564,8 +2600,9 @@ prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info,
+-                   __u32 * uwrq, char *extra)
++                  union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       *uwrq = (priv->monitor_type == ARPHRD_IEEE80211_PRISM);
+       return 0;
+@@ -2573,8 +2610,9 @@ prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info,
+-                __u32 * uwrq, char *extra)
++                union iwreq_data *wrqu, char *extra)
+ {
++      __u32 * uwrq = &wrqu->mode;
+       islpci_private *priv = netdev_priv(ndev);
+       priv->priv_oid = *uwrq;
+@@ -2585,8 +2623,9 @@ prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info,
+-                    struct iw_point *data, char *extra)
++                    union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       struct islpci_mgmtframe *response;
+       int ret = -EIO;
+@@ -2621,8 +2660,9 @@ prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info,
+ static int
+ prism54_debug_set_oid(struct net_device *ndev, struct iw_request_info *info,
+-                    struct iw_point *data, char *extra)
++                    union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *data = &wrqu->data;
+       islpci_private *priv = netdev_priv(ndev);
+       struct islpci_mgmtframe *response;
+       int ret = 0, response_op = PIMFOR_OP_ERROR;
+@@ -2682,60 +2722,60 @@ prism54_set_spy(struct net_device *ndev,
+ }
+ static const iw_handler prism54_handler[] = {
+-      (iw_handler) prism54_commit,    /* SIOCSIWCOMMIT */
+-      (iw_handler) prism54_get_name,  /* SIOCGIWNAME */
+-      (iw_handler) NULL,      /* SIOCSIWNWID */
+-      (iw_handler) NULL,      /* SIOCGIWNWID */
+-      (iw_handler) prism54_set_freq,  /* SIOCSIWFREQ */
+-      (iw_handler) prism54_get_freq,  /* SIOCGIWFREQ */
+-      (iw_handler) prism54_set_mode,  /* SIOCSIWMODE */
+-      (iw_handler) prism54_get_mode,  /* SIOCGIWMODE */
+-      (iw_handler) prism54_set_sens,  /* SIOCSIWSENS */
+-      (iw_handler) prism54_get_sens,  /* SIOCGIWSENS */
+-      (iw_handler) NULL,      /* SIOCSIWRANGE */
+-      (iw_handler) prism54_get_range, /* SIOCGIWRANGE */
+-      (iw_handler) NULL,      /* SIOCSIWPRIV */
+-      (iw_handler) NULL,      /* SIOCGIWPRIV */
+-      (iw_handler) NULL,      /* SIOCSIWSTATS */
+-      (iw_handler) NULL,      /* SIOCGIWSTATS */
++      prism54_commit, /* SIOCSIWCOMMIT */
++      prism54_get_name,       /* SIOCGIWNAME */
++      NULL,   /* SIOCSIWNWID */
++      NULL,   /* SIOCGIWNWID */
++      prism54_set_freq,       /* SIOCSIWFREQ */
++      prism54_get_freq,       /* SIOCGIWFREQ */
++      prism54_set_mode,       /* SIOCSIWMODE */
++      prism54_get_mode,       /* SIOCGIWMODE */
++      prism54_set_sens,       /* SIOCSIWSENS */
++      prism54_get_sens,       /* SIOCGIWSENS */
++      NULL,   /* SIOCSIWRANGE */
++      prism54_get_range,      /* SIOCGIWRANGE */
++      NULL,   /* SIOCSIWPRIV */
++      NULL,   /* SIOCGIWPRIV */
++      NULL,   /* SIOCSIWSTATS */
++      NULL,   /* SIOCGIWSTATS */
+       prism54_set_spy,        /* SIOCSIWSPY */
+       iw_handler_get_spy,     /* SIOCGIWSPY */
+       iw_handler_set_thrspy,  /* SIOCSIWTHRSPY */
+       iw_handler_get_thrspy,  /* SIOCGIWTHRSPY */
+-      (iw_handler) prism54_set_wap,   /* SIOCSIWAP */
+-      (iw_handler) prism54_get_wap,   /* SIOCGIWAP */
+-      (iw_handler) NULL,      /* -- hole -- */
+-      (iw_handler) NULL,      /* SIOCGIWAPLIST deprecated */
+-      (iw_handler) prism54_set_scan,  /* SIOCSIWSCAN */
+-      (iw_handler) prism54_get_scan,  /* SIOCGIWSCAN */
+-      (iw_handler) prism54_set_essid, /* SIOCSIWESSID */
+-      (iw_handler) prism54_get_essid, /* SIOCGIWESSID */
+-      (iw_handler) prism54_set_nick,  /* SIOCSIWNICKN */
+-      (iw_handler) prism54_get_nick,  /* SIOCGIWNICKN */
+-      (iw_handler) NULL,      /* -- hole -- */
+-      (iw_handler) NULL,      /* -- hole -- */
+-      (iw_handler) prism54_set_rate,  /* SIOCSIWRATE */
+-      (iw_handler) prism54_get_rate,  /* SIOCGIWRATE */
+-      (iw_handler) prism54_set_rts,   /* SIOCSIWRTS */
+-      (iw_handler) prism54_get_rts,   /* SIOCGIWRTS */
+-      (iw_handler) prism54_set_frag,  /* SIOCSIWFRAG */
+-      (iw_handler) prism54_get_frag,  /* SIOCGIWFRAG */
+-      (iw_handler) prism54_set_txpower,       /* SIOCSIWTXPOW */
+-      (iw_handler) prism54_get_txpower,       /* SIOCGIWTXPOW */
+-      (iw_handler) prism54_set_retry, /* SIOCSIWRETRY */
+-      (iw_handler) prism54_get_retry, /* SIOCGIWRETRY */
+-      (iw_handler) prism54_set_encode,        /* SIOCSIWENCODE */
+-      (iw_handler) prism54_get_encode,        /* SIOCGIWENCODE */
+-      (iw_handler) NULL,      /* SIOCSIWPOWER */
+-      (iw_handler) NULL,      /* SIOCGIWPOWER */
++      prism54_set_wap,        /* SIOCSIWAP */
++      prism54_get_wap,        /* SIOCGIWAP */
++      NULL,   /* -- hole -- */
++      NULL,   /* SIOCGIWAPLIST deprecated */
++      prism54_set_scan,       /* SIOCSIWSCAN */
++      prism54_get_scan,       /* SIOCGIWSCAN */
++      prism54_set_essid,      /* SIOCSIWESSID */
++      prism54_get_essid,      /* SIOCGIWESSID */
++      prism54_set_nick,       /* SIOCSIWNICKN */
++      prism54_get_nick,       /* SIOCGIWNICKN */
++      NULL,   /* -- hole -- */
++      NULL,   /* -- hole -- */
++      prism54_set_rate,       /* SIOCSIWRATE */
++      prism54_get_rate,       /* SIOCGIWRATE */
++      prism54_set_rts,        /* SIOCSIWRTS */
++      prism54_get_rts,        /* SIOCGIWRTS */
++      prism54_set_frag,       /* SIOCSIWFRAG */
++      prism54_get_frag,       /* SIOCGIWFRAG */
++      prism54_set_txpower,    /* SIOCSIWTXPOW */
++      prism54_get_txpower,    /* SIOCGIWTXPOW */
++      prism54_set_retry,      /* SIOCSIWRETRY */
++      prism54_get_retry,      /* SIOCGIWRETRY */
++      prism54_set_encode,     /* SIOCSIWENCODE */
++      prism54_get_encode,     /* SIOCGIWENCODE */
++      NULL,   /* SIOCSIWPOWER */
++      NULL,   /* SIOCGIWPOWER */
+       NULL,                   /* -- hole -- */
+       NULL,                   /* -- hole -- */
+-      (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */
+-      (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */
+-      (iw_handler) prism54_set_auth,  /* SIOCSIWAUTH */
+-      (iw_handler) prism54_get_auth,  /* SIOCGIWAUTH */
+-      (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */
+-      (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */
++      prism54_set_genie,      /* SIOCSIWGENIE */
++      prism54_get_genie,      /* SIOCGIWGENIE */
++      prism54_set_auth,       /* SIOCSIWAUTH */
++      prism54_get_auth,       /* SIOCGIWAUTH */
++      prism54_set_encodeext, /* SIOCSIWENCODEEXT */
++      prism54_get_encodeext, /* SIOCGIWENCODEEXT */
+       NULL,                   /* SIOCSIWPMKSA */
+ };
+@@ -2872,31 +2912,31 @@ static const struct iw_priv_args prism54_private_args[] = {
+ };
+ static const iw_handler prism54_private_handler[] = {
+-      (iw_handler) prism54_reset,
+-      (iw_handler) prism54_get_policy,
+-      (iw_handler) prism54_set_policy,
+-      (iw_handler) prism54_get_mac,
+-      (iw_handler) prism54_add_mac,
+-      (iw_handler) NULL,
+-      (iw_handler) prism54_del_mac,
+-      (iw_handler) NULL,
+-      (iw_handler) prism54_kick_mac,
+-      (iw_handler) NULL,
+-      (iw_handler) prism54_kick_all,
+-      (iw_handler) prism54_get_wpa,
+-      (iw_handler) prism54_set_wpa,
+-      (iw_handler) NULL,
+-      (iw_handler) prism54_debug_oid,
+-      (iw_handler) prism54_debug_get_oid,
+-      (iw_handler) prism54_debug_set_oid,
+-      (iw_handler) prism54_get_oid,
+-      (iw_handler) prism54_set_u32,
+-      (iw_handler) NULL,
+-      (iw_handler) prism54_set_raw,
+-      (iw_handler) NULL,
+-      (iw_handler) prism54_set_raw,
+-      (iw_handler) prism54_get_prismhdr,
+-      (iw_handler) prism54_set_prismhdr,
++      prism54_reset,
++      prism54_get_policy,
++      prism54_set_policy,
++      prism54_get_mac,
++      prism54_add_mac,
++      NULL,
++      prism54_del_mac,
++      NULL,
++      prism54_kick_mac,
++      NULL,
++      prism54_kick_all,
++      prism54_get_wpa,
++      prism54_set_wpa,
++      NULL,
++      prism54_debug_oid,
++      prism54_debug_get_oid,
++      prism54_debug_set_oid,
++      prism54_get_oid,
++      prism54_set_u32,
++      NULL,
++      prism54_set_raw,
++      NULL,
++      prism54_set_raw,
++      prism54_get_prismhdr,
++      prism54_set_prismhdr,
+ };
+ const struct iw_handler_def prism54_handler_def = {
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 8c35ac8..42033c1 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3360,20 +3360,20 @@ static int __init init_mac80211_hwsim(void)
+       if (channels < 1)
+               return -EINVAL;
+-      mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
+-      mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
+-      mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
+-      mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
+-      mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
+-      mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
+-      mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
+-      mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
+-      mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
+-      mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
+-      mac80211_hwsim_mchan_ops.assign_vif_chanctx =
+-              mac80211_hwsim_assign_vif_chanctx;
+-      mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
+-              mac80211_hwsim_unassign_vif_chanctx;
++      pax_open_kernel();
++      memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
++      const_cast(mac80211_hwsim_mchan_ops.hw_scan) = mac80211_hwsim_hw_scan;
++      const_cast(mac80211_hwsim_mchan_ops.cancel_hw_scan) = mac80211_hwsim_cancel_hw_scan;
++      const_cast(mac80211_hwsim_mchan_ops.sw_scan_start) = NULL;
++      const_cast(mac80211_hwsim_mchan_ops.sw_scan_complete) = NULL;
++      const_cast(mac80211_hwsim_mchan_ops.remain_on_channel) = mac80211_hwsim_roc;
++      const_cast(mac80211_hwsim_mchan_ops.cancel_remain_on_channel) = mac80211_hwsim_croc;
++      const_cast(mac80211_hwsim_mchan_ops.add_chanctx) = mac80211_hwsim_add_chanctx;
++      const_cast(mac80211_hwsim_mchan_ops.remove_chanctx) = mac80211_hwsim_remove_chanctx;
++      const_cast(mac80211_hwsim_mchan_ops.change_chanctx) = mac80211_hwsim_change_chanctx;
++      const_cast(mac80211_hwsim_mchan_ops.assign_vif_chanctx) = mac80211_hwsim_assign_vif_chanctx;
++      const_cast(mac80211_hwsim_mchan_ops.unassign_vif_chanctx) = mac80211_hwsim_unassign_vif_chanctx;
++      pax_close_kernel();
+       spin_lock_init(&hwsim_radio_lock);
+       INIT_LIST_HEAD(&hwsim_radios);
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
+index db4925d..91c12fa 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.c
++++ b/drivers/net/wireless/marvell/mwifiex/main.c
+@@ -814,7 +814,7 @@ mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
+ /*
+  * CFG802.11 network device handler for data transmission.
+  */
+-static int
++static netdev_tx_t
+ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+index 155f343..5db43e7 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+@@ -54,7 +54,7 @@
+       rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg))
+ static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev,
+-                              const unsigned int word, const u8 value)
++                              const unsigned int word, u8 value)
+ {
+       u32 reg;
+@@ -109,7 +109,7 @@ static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev,
+ }
+ static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev,
+-                             const unsigned int word, const u32 value)
++                             const unsigned int word, u32 value)
+ {
+       u32 reg;
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+index 2553cdd..6a60ef9 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+@@ -54,7 +54,7 @@
+       rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg))
+ static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev,
+-                              const unsigned int word, const u8 value)
++                              const unsigned int word, u8 value)
+ {
+       u32 reg;
+@@ -109,7 +109,7 @@ static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev,
+ }
+ static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev,
+-                             const unsigned int word, const u32 value)
++                             const unsigned int word, u32 value)
+ {
+       u32 reg;
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+index 2d64611..66754f4 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+@@ -142,7 +142,7 @@ static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
+       rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg))
+ static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev,
+-                              const unsigned int word, const u8 value)
++                              const unsigned int word, u8 value)
+ {
+       u16 reg;
+@@ -196,7 +196,7 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
+ }
+ static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev,
+-                             const unsigned int word, const u32 value)
++                             const unsigned int word, u32 value)
+ {
+       u16 reg;
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index bf3f0a3..9d2a6d0 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -83,7 +83,7 @@ static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
+ }
+ static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
+-                           const unsigned int word, const u8 value)
++                           const unsigned int word, u8 value)
+ {
+       u32 reg;
+@@ -140,7 +140,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
+ }
+ static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
+-                             const unsigned int word, const u8 value)
++                             const unsigned int word, u8 value)
+ {
+       u32 reg;
+@@ -195,7 +195,7 @@ static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
+ }
+ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
+-                          const unsigned int word, const u32 value)
++                          const unsigned int word, u32 value)
+ {
+       u32 reg;
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+index f68d492..38ba52d 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+@@ -378,7 +378,7 @@ struct rt2x00_intf {
+        * for hardware which doesn't support hardware
+        * sequence counting.
+        */
+-      atomic_t seqno;
++      atomic_unchecked_t seqno;
+ };
+ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+index 68b620b..92ecd9e 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+        * sequence counter given by mac80211.
+        */
+       if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
+-              seqno = atomic_add_return(0x10, &intf->seqno);
++              seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
+       else
+-              seqno = atomic_read(&intf->seqno);
++              seqno = atomic_read_unchecked(&intf->seqno);
+       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+       hdr->seq_ctrl |= cpu_to_le16(seqno);
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+index 03013eb..ade7027 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+@@ -63,7 +63,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+                               H2M_MAILBOX_CSR_OWNER, (__reg))
+ static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev,
+-                            const unsigned int word, const u8 value)
++                            const unsigned int word, u8 value)
+ {
+       u32 reg;
+@@ -118,7 +118,7 @@ static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev,
+ }
+ static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev,
+-                           const unsigned int word, const u32 value)
++                           const unsigned int word, u32 value)
+ {
+       u32 reg;
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+index c1397a6..82c223d 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+@@ -61,7 +61,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+       rt2x00usb_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg))
+ static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev,
+-                            const unsigned int word, const u8 value)
++                            const unsigned int word, u8 value)
+ {
+       u32 reg;
+@@ -116,7 +116,7 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
+ }
+ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
+-                           const unsigned int word, const u32 value)
++                           const unsigned int word, u32 value)
+ {
+       u32 reg;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
+index 264466f..ab69236 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.c
++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
+@@ -467,15 +467,15 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+       rtlpriv->works.hw = hw;
+       rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+       INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
+-                        (void *)rtl_watchdog_wq_callback);
++                        rtl_watchdog_wq_callback);
+       INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
+-                        (void *)rtl_ips_nic_off_wq_callback);
++                        rtl_ips_nic_off_wq_callback);
+       INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
+-                        (void *)rtl_swlps_wq_callback);
++                        rtl_swlps_wq_callback);
+       INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
+-                        (void *)rtl_swlps_rfon_wq_callback);
++                        rtl_swlps_rfon_wq_callback);
+       INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
+-                        (void *)rtl_fwevt_wq_callback);
++                        rtl_fwevt_wq_callback);
+ }
+@@ -1559,7 +1559,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL_GPL(rtl_beacon_statistic);
+-void rtl_watchdog_wq_callback(void *data)
++void rtl_watchdog_wq_callback(struct work_struct *data)
+ {
+       struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+                                                           struct rtl_works,
+@@ -1722,7 +1722,7 @@ void rtl_watch_dog_timer_callback(unsigned long data)
+       mod_timer(&rtlpriv->works.watchdog_timer,
+                 jiffies + MSECS(RTL_WATCH_DOG_TIME));
+ }
+-void rtl_fwevt_wq_callback(void *data)
++void rtl_fwevt_wq_callback(struct work_struct *data)
+ {
+       struct rtl_works *rtlworks =
+               container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
+index 74233d6..482e495 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.h
++++ b/drivers/net/wireless/realtek/rtlwifi/base.h
+@@ -134,8 +134,8 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
+                    struct ieee80211_sta *sta, u16 tid);
+ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
+                   struct ieee80211_sta *sta, u16 tid);
+-void rtl_watchdog_wq_callback(void *data);
+-void rtl_fwevt_wq_callback(void *data);
++void rtl_watchdog_wq_callback(struct work_struct *data);
++void rtl_fwevt_wq_callback(struct work_struct *data);
+ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+                     struct ieee80211_tx_info *info,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index d12586d..d6f3388 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -1098,13 +1098,16 @@ done:
+       return ret;
+ }
+-static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
++static void _rtl_pci_irq_tasklet(unsigned long _hw)
+ {
++      struct ieee80211_hw *hw = (struct ieee80211_hw *)_hw;
++
+       _rtl_pci_tx_chk_waitq(hw);
+ }
+-static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
++static void _rtl_pci_prepare_bcn_tasklet(unsigned long _hw)
+ {
++      struct ieee80211_hw *hw = (struct ieee80211_hw *)_hw;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+@@ -1225,12 +1228,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
+       rtlpci->acm_method = EACMWAY2_SW;
+       /*task */
+-      tasklet_init(&rtlpriv->works.irq_tasklet,
+-                   (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+-                   (unsigned long)hw);
+-      tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
+-                   (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+-                   (unsigned long)hw);
++      tasklet_init(&rtlpriv->works.irq_tasklet, _rtl_pci_irq_tasklet, (unsigned long)hw);
++      tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, _rtl_pci_prepare_bcn_tasklet, (unsigned long)hw);
+       INIT_WORK(&rtlpriv->works.lps_change_work,
+                 rtl_lps_change_work_callback);
+ }
+diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
+index 9a64f9b..a7728e9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
++++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
+@@ -198,7 +198,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
+       ppsc->swrf_processing = false;
+ }
+-void rtl_ips_nic_off_wq_callback(void *data)
++void rtl_ips_nic_off_wq_callback(struct work_struct *data)
+ {
+       struct rtl_works *rtlworks =
+           container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
+@@ -584,7 +584,7 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
+       spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ }
+-void rtl_swlps_rfon_wq_callback(void *data)
++void rtl_swlps_rfon_wq_callback(struct work_struct *data)
+ {
+       struct rtl_works *rtlworks =
+           container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
+@@ -676,7 +676,7 @@ void rtl_lps_change_work_callback(struct work_struct *work)
+ }
+ EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
+-void rtl_swlps_wq_callback(void *data)
++void rtl_swlps_wq_callback(struct work_struct *data)
+ {
+       struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+                                    struct rtl_works,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
+index 0df2b52..0607d33 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/ps.h
++++ b/drivers/net/wireless/realtek/rtlwifi/ps.h
+@@ -32,15 +32,15 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
+ bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
+ void rtl_ips_nic_off(struct ieee80211_hw *hw);
+ void rtl_ips_nic_on(struct ieee80211_hw *hw);
+-void rtl_ips_nic_off_wq_callback(void *data);
++void rtl_ips_nic_off_wq_callback(struct work_struct *data);
+ void rtl_lps_enter(struct ieee80211_hw *hw);
+ void rtl_lps_leave(struct ieee80211_hw *hw);
+ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode);
+ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
+-void rtl_swlps_wq_callback(void *data);
+-void rtl_swlps_rfon_wq_callback(void *data);
++void rtl_swlps_wq_callback(struct work_struct *data);
++void rtl_swlps_rfon_wq_callback(struct work_struct *data);
+ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
+ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
+ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state);
+diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
+index b661f896..ebea675 100644
+--- a/drivers/net/wireless/ti/wl1251/sdio.c
++++ b/drivers/net/wireless/ti/wl1251/sdio.c
+@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
+               irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+-              wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+-              wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
++              pax_open_kernel();
++              const_cast(wl1251_sdio_ops.enable_irq) = wl1251_enable_line_irq;
++              const_cast(wl1251_sdio_ops.disable_irq) = wl1251_disable_line_irq;
++              pax_close_kernel();
+               wl1251_info("using dedicated interrupt line");
+       } else {
+-              wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+-              wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
++              pax_open_kernel();
++              const_cast(wl1251_sdio_ops.enable_irq) = wl1251_sdio_enable_irq;
++              const_cast(wl1251_sdio_ops.disable_irq) = wl1251_sdio_disable_irq;
++              pax_close_kernel();
+               wl1251_info("using SDIO interrupt");
+       }
+diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
+index 22009e1..2e5e0c1 100644
+--- a/drivers/net/wireless/ti/wl12xx/main.c
++++ b/drivers/net/wireless/ti/wl12xx/main.c
+@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
+                      sizeof(wl->conf.mem));
+               /* read data preparation is only needed by wl127x */
+-              wl->ops->prepare_read = wl127x_prepare_read;
++              pax_open_kernel();
++              const_cast(wl->ops->prepare_read) = wl127x_prepare_read;
++              pax_close_kernel();
+               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+                             WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
+@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
+                      sizeof(wl->conf.mem));
+               /* read data preparation is only needed by wl127x */
+-              wl->ops->prepare_read = wl127x_prepare_read;
++              pax_open_kernel();
++              const_cast(wl->ops->prepare_read) = wl127x_prepare_read;
++              pax_close_kernel();
+               wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
+                             WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
+diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
+index 00a04df..859b311 100644
+--- a/drivers/net/wireless/ti/wl18xx/main.c
++++ b/drivers/net/wireless/ti/wl18xx/main.c
+@@ -2031,8 +2031,10 @@ static int wl18xx_setup(struct wl1271 *wl)
+       }
+       if (!checksum_param) {
+-              wl18xx_ops.set_rx_csum = NULL;
+-              wl18xx_ops.init_vif = NULL;
++              pax_open_kernel();
++              const_cast(wl18xx_ops.set_rx_csum) = NULL;
++              const_cast(wl18xx_ops.init_vif) = NULL;
++              pax_close_kernel();
+       }
+       /* Enable 11a Band only if we have 5G antennas */
+diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
+index dea049b..ef3bcdd 100644
+--- a/drivers/net/wireless/zydas/zd1201.c
++++ b/drivers/net/wireless/zydas/zd1201.c
+@@ -891,7 +891,7 @@ static void zd1201_set_multicast(struct net_device *dev)
+ }
+ static int zd1201_config_commit(struct net_device *dev, 
+-    struct iw_request_info *info, struct iw_point *data, char *essid)
++    struct iw_request_info *info, union iwreq_data *data, char *essid)
+ {
+       struct zd1201 *zd = netdev_priv(dev);
+@@ -899,15 +899,18 @@ static int zd1201_config_commit(struct net_device *dev,
+ }
+ static int zd1201_get_name(struct net_device *dev,
+-    struct iw_request_info *info, char *name, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      char *name = wrqu->name;
++
+       strcpy(name, "IEEE 802.11b");
+       return 0;
+ }
+ static int zd1201_set_freq(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_freq *freq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_freq *freq = &wrqu->freq;
+       struct zd1201 *zd = netdev_priv(dev);
+       short channel = 0;
+       int err;
+@@ -927,8 +930,9 @@ static int zd1201_set_freq(struct net_device *dev,
+ }
+ static int zd1201_get_freq(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_freq *freq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_freq *freq = &wrqu->freq;
+       struct zd1201 *zd = netdev_priv(dev);
+       short channel;
+       int err;
+@@ -943,8 +947,9 @@ static int zd1201_get_freq(struct net_device *dev,
+ }
+ static int zd1201_set_mode(struct net_device *dev,
+-    struct iw_request_info *info, __u32 *mode, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      __u32 *mode = &wrqu->mode;
+       struct zd1201 *zd = netdev_priv(dev);
+       short porttype, monitor = 0;
+       unsigned char buffer[IW_ESSID_MAX_SIZE+2];
+@@ -1005,8 +1010,9 @@ static int zd1201_set_mode(struct net_device *dev,
+ }
+ static int zd1201_get_mode(struct net_device *dev,
+-    struct iw_request_info *info, __u32 *mode, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      __u32 *mode = &wrqu->mode;
+       struct zd1201 *zd = netdev_priv(dev);
+       short porttype;
+       int err;
+@@ -1042,8 +1048,9 @@ static int zd1201_get_mode(struct net_device *dev,
+ }
+ static int zd1201_get_range(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_point *wrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *wrq = &wrqu->data;
+       struct iw_range *range = (struct iw_range *)extra;
+       wrq->length = sizeof(struct iw_range);
+@@ -1081,8 +1088,9 @@ static int zd1201_get_range(struct net_device *dev,
+  *    the stats after asking the bssid.
+  */
+ static int zd1201_get_wap(struct net_device *dev,
+-    struct iw_request_info *info, struct sockaddr *ap_addr, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct sockaddr *ap_addr = &wrqu->ap_addr;
+       struct zd1201 *zd = netdev_priv(dev);
+       unsigned char buffer[6];
+@@ -1102,15 +1110,16 @@ static int zd1201_get_wap(struct net_device *dev,
+ }
+ static int zd1201_set_scan(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_point *srq, char *extra)
++    struct iw_request_info *info, union iwreq_data *srq, char *extra)
+ {
+       /* We do everything in get_scan */
+       return 0;
+ }
+ static int zd1201_get_scan(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_point *srq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_point *srq = &wrqu->data;
+       struct zd1201 *zd = netdev_priv(dev);
+       int err, i, j, enabled_save;
+       struct iw_event iwe;
+@@ -1201,8 +1210,9 @@ static int zd1201_get_scan(struct net_device *dev,
+ }
+ static int zd1201_set_essid(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_point *data, char *essid)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
+ {
++      struct iw_point *data = &wrqu->essid;
+       struct zd1201 *zd = netdev_priv(dev);
+       if (data->length > IW_ESSID_MAX_SIZE)
+@@ -1216,8 +1226,9 @@ static int zd1201_set_essid(struct net_device *dev,
+ }
+ static int zd1201_get_essid(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_point *data, char *essid)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
+ {
++      struct iw_point *data = &wrqu->essid;
+       struct zd1201 *zd = netdev_priv(dev);
+       memcpy(essid, zd->essid, zd->essidlen);
+@@ -1228,8 +1239,10 @@ static int zd1201_get_essid(struct net_device *dev,
+ }
+ static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info,
+-    struct iw_point *data, char *nick)
++    union iwreq_data *wrqu, char *nick)
+ {
++      struct iw_point *data = &wrqu->data;
++
+       strcpy(nick, "zd1201");
+       data->flags = 1;
+       data->length = strlen(nick);
+@@ -1237,8 +1250,9 @@ static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info,
+ }
+ static int zd1201_set_rate(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct zd1201 *zd = netdev_priv(dev);
+       short rate;
+       int err;
+@@ -1270,8 +1284,9 @@ static int zd1201_set_rate(struct net_device *dev,
+ }
+ static int zd1201_get_rate(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->bitrate;
+       struct zd1201 *zd = netdev_priv(dev);
+       short rate;
+       int err;
+@@ -1303,8 +1318,9 @@ static int zd1201_get_rate(struct net_device *dev,
+ }
+ static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
+-    struct iw_param *rts, char *extra)
++    union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rts = &wrqu->rts;
+       struct zd1201 *zd = netdev_priv(dev);
+       int err;
+       short val = rts->value;
+@@ -1323,8 +1339,9 @@ static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
+ }
+ static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
+-    struct iw_param *rts, char *extra)
++    union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rts = &wrqu->rts;
+       struct zd1201 *zd = netdev_priv(dev);
+       short rtst;
+       int err;
+@@ -1340,8 +1357,9 @@ static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
+ }
+ static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
+-    struct iw_param *frag, char *extra)
++    union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *frag = &wrqu->frag;
+       struct zd1201 *zd = netdev_priv(dev);
+       int err;
+       short val = frag->value;
+@@ -1361,8 +1379,9 @@ static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
+ }
+ static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info,
+-    struct iw_param *frag, char *extra)
++    union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *frag = &wrqu->frag;
+       struct zd1201 *zd = netdev_priv(dev);
+       short fragt;
+       int err;
+@@ -1378,20 +1397,21 @@ static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info,
+ }
+ static int zd1201_set_retry(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *rrq, char *extra)
+ {
+       return 0;
+ }
+ static int zd1201_get_retry(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *rrq, char *extra)
+ {
+       return 0;
+ }
+ static int zd1201_set_encode(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_point *erq, char *key)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *key)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct zd1201 *zd = netdev_priv(dev);
+       short i;
+       int err, rid;
+@@ -1447,8 +1467,9 @@ static int zd1201_set_encode(struct net_device *dev,
+ }
+ static int zd1201_get_encode(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_point *erq, char *key)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *key)
+ {
++      struct iw_point *erq = &wrqu->encoding;
+       struct zd1201 *zd = netdev_priv(dev);
+       short i;
+       int err;
+@@ -1480,8 +1501,9 @@ static int zd1201_get_encode(struct net_device *dev,
+ }
+ static int zd1201_set_power(struct net_device *dev, 
+-    struct iw_request_info *info, struct iw_param *vwrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->power;
+       struct zd1201 *zd = netdev_priv(dev);
+       short enabled, duration, level;
+       int err;
+@@ -1519,8 +1541,9 @@ out:
+ }
+ static int zd1201_get_power(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *vwrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *vwrq = &wrqu->power;
+       struct zd1201 *zd = netdev_priv(dev);
+       short enabled, level, duration;
+       int err;
+@@ -1557,57 +1580,58 @@ static int zd1201_get_power(struct net_device *dev,
+ static const iw_handler zd1201_iw_handler[] =
+ {
+-      (iw_handler) zd1201_config_commit,      /* SIOCSIWCOMMIT */
+-      (iw_handler) zd1201_get_name,           /* SIOCGIWNAME */
+-      (iw_handler) NULL,                      /* SIOCSIWNWID */
+-      (iw_handler) NULL,                      /* SIOCGIWNWID */
+-      (iw_handler) zd1201_set_freq,           /* SIOCSIWFREQ */
+-      (iw_handler) zd1201_get_freq,           /* SIOCGIWFREQ */
+-      (iw_handler) zd1201_set_mode,           /* SIOCSIWMODE */
+-      (iw_handler) zd1201_get_mode,           /* SIOCGIWMODE */
+-      (iw_handler) NULL,                      /* SIOCSIWSENS */
+-      (iw_handler) NULL,                      /* SIOCGIWSENS */
+-      (iw_handler) NULL,                      /* SIOCSIWRANGE */
+-      (iw_handler) zd1201_get_range,           /* SIOCGIWRANGE */
+-      (iw_handler) NULL,                      /* SIOCSIWPRIV */
+-      (iw_handler) NULL,                      /* SIOCGIWPRIV */
+-      (iw_handler) NULL,                      /* SIOCSIWSTATS */
+-      (iw_handler) NULL,                      /* SIOCGIWSTATS */
+-      (iw_handler) NULL,                      /* SIOCSIWSPY */
+-      (iw_handler) NULL,                      /* SIOCGIWSPY */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL/*zd1201_set_wap*/,            /* SIOCSIWAP */
+-      (iw_handler) zd1201_get_wap,            /* SIOCGIWAP */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* SIOCGIWAPLIST */
+-      (iw_handler) zd1201_set_scan,           /* SIOCSIWSCAN */
+-      (iw_handler) zd1201_get_scan,           /* SIOCGIWSCAN */
+-      (iw_handler) zd1201_set_essid,          /* SIOCSIWESSID */
+-      (iw_handler) zd1201_get_essid,          /* SIOCGIWESSID */
+-      (iw_handler) NULL,                      /* SIOCSIWNICKN */
+-      (iw_handler) zd1201_get_nick,           /* SIOCGIWNICKN */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) NULL,                      /* -- hole -- */
+-      (iw_handler) zd1201_set_rate,           /* SIOCSIWRATE */
+-      (iw_handler) zd1201_get_rate,           /* SIOCGIWRATE */
+-      (iw_handler) zd1201_set_rts,            /* SIOCSIWRTS */
+-      (iw_handler) zd1201_get_rts,            /* SIOCGIWRTS */
+-      (iw_handler) zd1201_set_frag,           /* SIOCSIWFRAG */
+-      (iw_handler) zd1201_get_frag,           /* SIOCGIWFRAG */
+-      (iw_handler) NULL,                      /* SIOCSIWTXPOW */
+-      (iw_handler) NULL,                      /* SIOCGIWTXPOW */
+-      (iw_handler) zd1201_set_retry,          /* SIOCSIWRETRY */
+-      (iw_handler) zd1201_get_retry,          /* SIOCGIWRETRY */
+-      (iw_handler) zd1201_set_encode,         /* SIOCSIWENCODE */
+-      (iw_handler) zd1201_get_encode,         /* SIOCGIWENCODE */
+-      (iw_handler) zd1201_set_power,          /* SIOCSIWPOWER */
+-      (iw_handler) zd1201_get_power,          /* SIOCGIWPOWER */
++      zd1201_config_commit,   /* SIOCSIWCOMMIT */
++      zd1201_get_name,        /* SIOCGIWNAME */
++      NULL,                   /* SIOCSIWNWID */
++      NULL,                   /* SIOCGIWNWID */
++      zd1201_set_freq,        /* SIOCSIWFREQ */
++      zd1201_get_freq,        /* SIOCGIWFREQ */
++      zd1201_set_mode,        /* SIOCSIWMODE */
++      zd1201_get_mode,        /* SIOCGIWMODE */
++      NULL,                   /* SIOCSIWSENS */
++      NULL,                   /* SIOCGIWSENS */
++      NULL,                   /* SIOCSIWRANGE */
++      zd1201_get_range,       /* SIOCGIWRANGE */
++      NULL,                   /* SIOCSIWPRIV */
++      NULL,                   /* SIOCGIWPRIV */
++      NULL,                   /* SIOCSIWSTATS */
++      NULL,                   /* SIOCGIWSTATS */
++      NULL,                   /* SIOCSIWSPY */
++      NULL,                   /* SIOCGIWSPY */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* -- hole -- */
++      NULL/*zd1201_set_wap*/, /* SIOCSIWAP */
++      zd1201_get_wap,         /* SIOCGIWAP */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* SIOCGIWAPLIST */
++      zd1201_set_scan,        /* SIOCSIWSCAN */
++      zd1201_get_scan,        /* SIOCGIWSCAN */
++      zd1201_set_essid,       /* SIOCSIWESSID */
++      zd1201_get_essid,       /* SIOCGIWESSID */
++      NULL,                   /* SIOCSIWNICKN */
++      zd1201_get_nick,        /* SIOCGIWNICKN */
++      NULL,                   /* -- hole -- */
++      NULL,                   /* -- hole -- */
++      zd1201_set_rate,        /* SIOCSIWRATE */
++      zd1201_get_rate,        /* SIOCGIWRATE */
++      zd1201_set_rts,         /* SIOCSIWRTS */
++      zd1201_get_rts,         /* SIOCGIWRTS */
++      zd1201_set_frag,        /* SIOCSIWFRAG */
++      zd1201_get_frag,        /* SIOCGIWFRAG */
++      NULL,                   /* SIOCSIWTXPOW */
++      NULL,                   /* SIOCGIWTXPOW */
++      zd1201_set_retry,       /* SIOCSIWRETRY */
++      zd1201_get_retry,       /* SIOCGIWRETRY */
++      zd1201_set_encode,      /* SIOCSIWENCODE */
++      zd1201_get_encode,      /* SIOCGIWENCODE */
++      zd1201_set_power,       /* SIOCSIWPOWER */
++      zd1201_get_power,       /* SIOCGIWPOWER */
+ };
+ static int zd1201_set_hostauth(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->param;
+       struct zd1201 *zd = netdev_priv(dev);
+       if (!zd->ap)
+@@ -1617,8 +1641,9 @@ static int zd1201_set_hostauth(struct net_device *dev,
+ }
+ static int zd1201_get_hostauth(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->param;
+       struct zd1201 *zd = netdev_priv(dev);
+       short hostauth;
+       int err;
+@@ -1636,8 +1661,9 @@ static int zd1201_get_hostauth(struct net_device *dev,
+ }
+ static int zd1201_auth_sta(struct net_device *dev,
+-    struct iw_request_info *info, struct sockaddr *sta, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct sockaddr *sta = &wrqu->addr;
+       struct zd1201 *zd = netdev_priv(dev);
+       unsigned char buffer[10];
+@@ -1652,8 +1678,9 @@ static int zd1201_auth_sta(struct net_device *dev,
+ }
+ static int zd1201_set_maxassoc(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->param;
+       struct zd1201 *zd = netdev_priv(dev);
+       int err;
+@@ -1667,8 +1694,9 @@ static int zd1201_set_maxassoc(struct net_device *dev,
+ }
+ static int zd1201_get_maxassoc(struct net_device *dev,
+-    struct iw_request_info *info, struct iw_param *rrq, char *extra)
++    struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+ {
++      struct iw_param *rrq = &wrqu->param;
+       struct zd1201 *zd = netdev_priv(dev);
+       short maxassoc;
+       int err;
+@@ -1686,12 +1714,12 @@ static int zd1201_get_maxassoc(struct net_device *dev,
+ }
+ static const iw_handler zd1201_private_handler[] = {
+-      (iw_handler) zd1201_set_hostauth,       /* ZD1201SIWHOSTAUTH */
+-      (iw_handler) zd1201_get_hostauth,       /* ZD1201GIWHOSTAUTH */
+-      (iw_handler) zd1201_auth_sta,           /* ZD1201SIWAUTHSTA */
+-      (iw_handler) NULL,                      /* nothing to get */
+-      (iw_handler) zd1201_set_maxassoc,       /* ZD1201SIMAXASSOC */
+-      (iw_handler) zd1201_get_maxassoc,       /* ZD1201GIMAXASSOC */
++      zd1201_set_hostauth,    /* ZD1201SIWHOSTAUTH */
++      zd1201_get_hostauth,    /* ZD1201GIWHOSTAUTH */
++      zd1201_auth_sta,        /* ZD1201SIWAUTHSTA */
++      NULL,                   /* nothing to get */
++      zd1201_set_maxassoc,    /* ZD1201SIMAXASSOC */
++      zd1201_get_maxassoc,    /* ZD1201GIMAXASSOC */
+ };
+ static const struct iw_priv_args zd1201_private_args[] = {
+diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+index a912dc0..a8225ba 100644
+--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
++++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
+ {
+       struct zd_usb *usb = urb->context;
+       struct zd_usb_interrupt *intr = &usb->intr;
+-      int len;
++      unsigned int len;
+       u16 int_num;
+       ZD_ASSERT(in_interrupt());
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 83deeeb..bbc8855 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -178,7 +178,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+       return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+ }
+-static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct xenvif *vif = netdev_priv(dev);
+       struct xenvif_queue *queue = NULL;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 96ccd4e..8e1c6b7 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -550,7 +550,7 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
+-static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct netfront_info *np = netdev_priv(dev);
+       struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
+index 7d31179..a188713 100644
+--- a/drivers/ntb/test/ntb_pingpong.c
++++ b/drivers/ntb/test/ntb_pingpong.c
+@@ -99,7 +99,7 @@ struct pp_ctx {
+       unsigned long                   db_delay;
+       struct dentry                   *debugfs_node_dir;
+       struct dentry                   *debugfs_count;
+-      atomic_t                        count;
++      atomic_unchecked_t              count;
+ };
+ static struct dentry *pp_debugfs_dir;
+@@ -177,7 +177,7 @@ static void pp_db_event(void *ctx, int vec)
+               dev_dbg(&pp->ntb->dev,
+                       "Pong vec %d bits %#llx\n",
+                       vec, db_bits);
+-              atomic_inc(&pp->count);
++              atomic_inc_unchecked(&pp->count);
+       }
+       spin_unlock_irqrestore(&pp->db_lock, irqflags);
+ }
+@@ -194,7 +194,7 @@ static int pp_debugfs_setup(struct pp_ctx *pp)
+       if (!pp->debugfs_node_dir)
+               return -ENODEV;
+-      pp->debugfs_count = debugfs_create_atomic_t("count", S_IRUSR | S_IWUSR,
++      pp->debugfs_count = debugfs_create_atomic_unchecked_t("count", S_IRUSR | S_IWUSR,
+                                                   pp->debugfs_node_dir,
+                                                   &pp->count);
+       if (!pp->debugfs_count)
+@@ -238,7 +238,7 @@ static int pp_probe(struct ntb_client *client,
+       pp->ntb = ntb;
+       pp->db_bits = 0;
+-      atomic_set(&pp->count, 0);
++      atomic_set_unchecked(&pp->count, 0);
+       spin_lock_init(&pp->db_lock);
+       setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+       pp->db_delay = msecs_to_jiffies(delay_ms);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 60f7eab..1e905da 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2053,7 +2053,7 @@ static int nvme_resume(struct device *dev)
+ static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
+ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct nvme_dev *dev = pci_get_drvdata(pdev);
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 085c638..1819bbe 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -1304,7 +1304,9 @@ static int __init of_fdt_raw_init(void)
+               pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
+               return 0;
+       }
+-      of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
++      pax_open_kernel();
++      const_cast(of_fdt_raw_attr.size) = fdt_totalsize(initial_boot_params);
++      pax_close_kernel();
+       return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
+ }
+ late_initcall(of_fdt_raw_init);
+diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
+index 82f7000..d6d0447 100644
+--- a/drivers/oprofile/buffer_sync.c
++++ b/drivers/oprofile/buffer_sync.c
+@@ -345,7 +345,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
+               if (cookie == NO_COOKIE)
+                       offset = pc;
+               if (cookie == INVALID_COOKIE) {
+-                      atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++                      atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+                       offset = pc;
+               }
+               if (cookie != last_cookie) {
+@@ -389,14 +389,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
+       /* add userspace sample */
+       if (!mm) {
+-              atomic_inc(&oprofile_stats.sample_lost_no_mm);
++              atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
+               return 0;
+       }
+       cookie = lookup_dcookie(mm, s->eip, &offset);
+       if (cookie == INVALID_COOKIE) {
+-              atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++              atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+               return 0;
+       }
+@@ -554,7 +554,7 @@ void sync_buffer(int cpu)
+               /* ignore backtraces if failed to add a sample */
+               if (state == sb_bt_start) {
+                       state = sb_bt_ignore;
+-                      atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++                      atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
+               }
+       }
+       release_mm(mm);
+diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
+index c0cc4e7..44d4e54 100644
+--- a/drivers/oprofile/event_buffer.c
++++ b/drivers/oprofile/event_buffer.c
+@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
+       }
+       if (buffer_pos == buffer_size) {
+-              atomic_inc(&oprofile_stats.event_lost_overflow);
++              atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
+               return;
+       }
+diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
+index ed2c3ec..deda85a 100644
+--- a/drivers/oprofile/oprof.c
++++ b/drivers/oprofile/oprof.c
+@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
+       if (oprofile_ops.switch_events())
+               return;
+-      atomic_inc(&oprofile_stats.multiplex_counter);
++      atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
+       start_switch_worker();
+ }
+diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
+index 59659ce..6c860a0 100644
+--- a/drivers/oprofile/oprofile_stats.c
++++ b/drivers/oprofile/oprofile_stats.c
+@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
+               cpu_buf->sample_invalid_eip = 0;
+       }
+-      atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+-      atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+-      atomic_set(&oprofile_stats.event_lost_overflow, 0);
+-      atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+-      atomic_set(&oprofile_stats.multiplex_counter, 0);
++      atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
++      atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
++      atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
++      atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
++      atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
+ }
+diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
+index 1fc622b..8c48fc3 100644
+--- a/drivers/oprofile/oprofile_stats.h
++++ b/drivers/oprofile/oprofile_stats.h
+@@ -13,11 +13,11 @@
+ #include <linux/atomic.h>
+ struct oprofile_stat_struct {
+-      atomic_t sample_lost_no_mm;
+-      atomic_t sample_lost_no_mapping;
+-      atomic_t bt_lost_no_mapping;
+-      atomic_t event_lost_overflow;
+-      atomic_t multiplex_counter;
++      atomic_unchecked_t sample_lost_no_mm;
++      atomic_unchecked_t sample_lost_no_mapping;
++      atomic_unchecked_t bt_lost_no_mapping;
++      atomic_unchecked_t event_lost_overflow;
++      atomic_unchecked_t multiplex_counter;
+ };
+ extern struct oprofile_stat_struct oprofile_stats;
+diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
+index a0e5260..a6d7637 100644
+--- a/drivers/oprofile/oprofilefs.c
++++ b/drivers/oprofile/oprofilefs.c
+@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
+ static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+ {
+-      atomic_t *val = file->private_data;
+-      return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
++      atomic_unchecked_t *val = file->private_data;
++      return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
+ }
+@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
+ int oprofilefs_create_ro_atomic(struct dentry *root,
+-      char const *name, atomic_t *val)
++      char const *name, atomic_unchecked_t *val)
+ {
+       return __oprofilefs_create_file(root, name,
+                                       &atomic_ro_fops, 0444, val);
+diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
+index bdef916..88c7dee 100644
+--- a/drivers/oprofile/timer_int.c
++++ b/drivers/oprofile/timer_int.c
+@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block __refdata oprofile_cpu_notifier = {
++static struct notifier_block oprofile_cpu_notifier = {
+       .notifier_call = oprofile_cpu_notify,
+ };
+diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
+index 74ed3e4..3e74a1c 100644
+--- a/drivers/parport/procfs.c
++++ b/drivers/parport/procfs.c
+@@ -65,7 +65,7 @@ static int do_active_device(struct ctl_table *table, int write,
+       *ppos += len;
+-      return copy_to_user(result, buffer, len) ? -EFAULT : 0;
++      return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
+ }
+ #ifdef CONFIG_PARPORT_1284
+@@ -107,7 +107,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
+       *ppos += len;
+-      return copy_to_user (result, buffer, len) ? -EFAULT : 0;
++      return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
+ }
+ #endif /* IEEE1284.3 support. */
+diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
+index f6221d7..80121ae 100644
+--- a/drivers/pci/hotplug/acpiphp_ibm.c
++++ b/drivers/pci/hotplug/acpiphp_ibm.c
+@@ -465,7 +465,9 @@ static int __init ibm_acpiphp_init(void)
+               goto init_cleanup;
+       }
+-      ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
++      pax_open_kernel();
++      const_cast(ibm_apci_table_attr.size) = ibm_get_table_from_acpi(NULL);
++      pax_close_kernel();
+       retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
+       return retval;
+diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
+index 88a44a7..de358ce 100644
+--- a/drivers/pci/hotplug/cpcihp_generic.c
++++ b/drivers/pci/hotplug/cpcihp_generic.c
+@@ -73,7 +73,6 @@ static u16 port;
+ static unsigned int enum_bit;
+ static u8 enum_mask;
+-static struct cpci_hp_controller_ops generic_hpc_ops;
+ static struct cpci_hp_controller generic_hpc;
+ static int __init validate_parameters(void)
+@@ -139,6 +138,10 @@ static int query_enum(void)
+       return ((value & enum_mask) == enum_mask);
+ }
++static struct cpci_hp_controller_ops generic_hpc_ops = {
++      .query_enum = query_enum,
++};
++
+ static int __init cpcihp_generic_init(void)
+ {
+       int status;
+@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
+       pci_dev_put(dev);
+       memset(&generic_hpc, 0, sizeof(struct cpci_hp_controller));
+-      generic_hpc_ops.query_enum = query_enum;
+       generic_hpc.ops = &generic_hpc_ops;
+       status = cpci_hp_register_controller(&generic_hpc);
+diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
+index 5f49c3f..438f019 100644
+--- a/drivers/pci/hotplug/cpcihp_zt5550.c
++++ b/drivers/pci/hotplug/cpcihp_zt5550.c
+@@ -59,7 +59,6 @@
+ /* local variables */
+ static bool debug;
+ static bool poll;
+-static struct cpci_hp_controller_ops zt5550_hpc_ops;
+ static struct cpci_hp_controller zt5550_hpc;
+ /* Primary cPCI bus bridge device */
+@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
+       return 0;
+ }
++static struct cpci_hp_controller_ops zt5550_hpc_ops = {
++      .query_enum = zt5550_hc_query_enum,
++};
++
+ static int zt5550_hc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+       int status;
+@@ -215,16 +218,17 @@ static int zt5550_hc_init_one(struct pci_dev *pdev, const struct pci_device_id *
+       dbg("returned from zt5550_hc_config");
+       memset(&zt5550_hpc, 0, sizeof(struct cpci_hp_controller));
+-      zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
+       zt5550_hpc.ops = &zt5550_hpc_ops;
+       if (!poll) {
+               zt5550_hpc.irq = hc_dev->irq;
+               zt5550_hpc.irq_flags = IRQF_SHARED;
+               zt5550_hpc.dev_id = hc_dev;
+-              zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
+-              zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
+-              zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
++              pax_open_kernel();
++              const_cast(zt5550_hpc_ops.enable_irq) = zt5550_hc_enable_irq;
++              const_cast(zt5550_hpc_ops.disable_irq) = zt5550_hc_disable_irq;
++              const_cast(zt5550_hpc_ops.check_irq) = zt5550_hc_check_irq;
++              pax_open_kernel();
+       } else {
+               info("using ENUM# polling mode");
+       }
+diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
+index c25fc90..b054774 100644
+--- a/drivers/pci/hotplug/cpqphp_nvram.c
++++ b/drivers/pci/hotplug/cpqphp_nvram.c
+@@ -425,8 +425,10 @@ static u32 store_HRT(void __iomem *rom_start)
+ void compaq_nvram_init(void __iomem *rom_start)
+ {
++#ifndef CONFIG_PAX_KERNEXEC
+       if (rom_start)
+               compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
++#endif
+       dbg("int15 entry  = %p\n", compaq_int15_entry_point);
+diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
+index 9acd199..1b19f5b 100644
+--- a/drivers/pci/hotplug/pci_hotplug_core.c
++++ b/drivers/pci/hotplug/pci_hotplug_core.c
+@@ -434,8 +434,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+               return -EINVAL;
+       }
+-      slot->ops->owner = owner;
+-      slot->ops->mod_name = mod_name;
++      pax_open_kernel();
++      const_cast(slot->ops->owner) = owner;
++      const_cast(slot->ops->mod_name) = mod_name;
++      pax_close_kernel();
+       mutex_lock(&pci_hp_mutex);
+       /*
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index ac531e6..716d058 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -87,7 +87,7 @@ static int init_slot(struct controller *ctrl)
+       struct slot *slot = ctrl->slot;
+       struct hotplug_slot *hotplug = NULL;
+       struct hotplug_slot_info *info = NULL;
+-      struct hotplug_slot_ops *ops = NULL;
++      hotplug_slot_ops_no_const *ops = NULL;
+       char name[SLOT_NAME_SIZE];
+       int retval = -ENOMEM;
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index 98f1222..d57e451 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -476,8 +476,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
+ {
+       struct attribute **msi_attrs;
+       struct attribute *msi_attr;
+-      struct device_attribute *msi_dev_attr;
+-      struct attribute_group *msi_irq_group;
++      device_attribute_no_const *msi_dev_attr;
++      attribute_group_no_const *msi_irq_group;
+       const struct attribute_group **msi_irq_groups;
+       struct msi_desc *entry;
+       int ret = -ENOMEM;
+@@ -539,7 +539,7 @@ error_attrs:
+       count = 0;
+       msi_attr = msi_attrs[count];
+       while (msi_attr) {
+-              msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
++              msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
+               kfree(msi_attr->name);
+               kfree(msi_dev_attr);
+               ++count;
+@@ -1369,12 +1369,14 @@ static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
+       if (ops == NULL) {
+               info->ops = &pci_msi_domain_ops_default;
+       } else {
++              pax_open_kernel();
+               if (ops->set_desc == NULL)
+-                      ops->set_desc = pci_msi_domain_set_desc;
++                      const_cast(ops->set_desc) = pci_msi_domain_set_desc;
+               if (ops->msi_check == NULL)
+-                      ops->msi_check = pci_msi_domain_check_cap;
++                      const_cast(ops->msi_check) = pci_msi_domain_check_cap;
+               if (ops->handle_error == NULL)
+-                      ops->handle_error = pci_msi_domain_handle_error;
++                      const_cast(ops->handle_error) = pci_msi_domain_handle_error;
++              pax_close_kernel();
+       }
+ }
+@@ -1383,12 +1385,14 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
+       struct irq_chip *chip = info->chip;
+       BUG_ON(!chip);
++      pax_open_kernel();
+       if (!chip->irq_write_msi_msg)
+-              chip->irq_write_msi_msg = pci_msi_domain_write_msg;
++              const_cast(chip->irq_write_msi_msg) = pci_msi_domain_write_msg;
+       if (!chip->irq_mask)
+-              chip->irq_mask = pci_msi_mask_irq;
++              const_cast(chip->irq_mask) = pci_msi_mask_irq;
+       if (!chip->irq_unmask)
+-              chip->irq_unmask = pci_msi_unmask_irq;
++              const_cast(chip->irq_unmask) = pci_msi_unmask_irq;
++      pax_close_kernel();
+ }
+ /**
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index bcd10c7..c7c18bc 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -1141,7 +1141,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+ {
+       /* allocate attribute structure, piggyback attribute name */
+       int name_len = write_combine ? 13 : 10;
+-      struct bin_attribute *res_attr;
++      bin_attribute_no_const *res_attr;
+       char *res_attr_name;
+       int retval;
+@@ -1321,7 +1321,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
+ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
+ {
+       int retval;
+-      struct bin_attribute *attr;
++      bin_attribute_no_const *attr;
+       /* If the device has VPD, try to expose it in sysfs. */
+       if (dev->vpd) {
+@@ -1368,7 +1368,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
+ {
+       int retval;
+       int rom_size;
+-      struct bin_attribute *attr;
++      bin_attribute_no_const *attr;
+       if (!sysfs_initialized)
+               return -EACCES;
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 9730c47..773a322 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -113,7 +113,7 @@ struct pci_vpd_ops {
+ struct pci_vpd {
+       const struct pci_vpd_ops *ops;
+-      struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
++      bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
+       struct mutex    lock;
+       unsigned int    len;
+       u16             flag;
+@@ -314,7 +314,7 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
+ #endif /* CONFIG_PCI_IOV */
+-unsigned long pci_cardbus_resource_alignment(struct resource *);
++unsigned long pci_cardbus_resource_alignment(const struct resource *);
+ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
+                                                    struct resource *res)
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 0ec649d..f93be68 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -27,9 +27,9 @@
+ #define MODULE_PARAM_PREFIX "pcie_aspm."
+ /* Note: those are not register definitions */
+-#define ASPM_STATE_L0S_UP     (1)     /* Upstream direction L0s state */
+-#define ASPM_STATE_L0S_DW     (2)     /* Downstream direction L0s state */
+-#define ASPM_STATE_L1         (4)     /* L1 state */
++#define ASPM_STATE_L0S_UP     (1U)    /* Upstream direction L0s state */
++#define ASPM_STATE_L0S_DW     (2U)    /* Downstream direction L0s state */
++#define ASPM_STATE_L1         (4U)    /* L1 state */
+ #define ASPM_STATE_L0S                (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+ #define ASPM_STATE_ALL                (ASPM_STATE_L0S | ASPM_STATE_L1)
+@@ -775,7 +775,7 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
+ }
+ EXPORT_SYMBOL(pci_disable_link_state);
+-static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
++static int pcie_aspm_set_policy(const char *val, const struct kernel_param *kp)
+ {
+       int i;
+       struct pcie_link_state *link;
+@@ -802,7 +802,7 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
+       return 0;
+ }
+-static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
++static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
+ {
+       int i, cnt = 0;
+       for (i = 0; i < ARRAY_SIZE(policy_str); i++)
+diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
+index 70d7ad8..66f87d6 100644
+--- a/drivers/pci/pcie/portdrv_pci.c
++++ b/drivers/pci/pcie/portdrv_pci.c
+@@ -370,7 +370,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
++static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
+       /*
+        * Boxes that should not use MSI for PCIe PME signaling.
+        */
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 93f280d..a349035 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -180,7 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+       u16 orig_cmd;
+       struct pci_bus_region region, inverted_region;
+-      mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
++      mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
+       /* No printks while decoding is disabled! */
+       if (!dev->mmio_always_on) {
+diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
+index 2408abe..455d4d4 100644
+--- a/drivers/pci/proc.c
++++ b/drivers/pci/proc.c
+@@ -437,7 +437,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
+ static int __init pci_proc_init(void)
+ {
+       struct pci_dev *dev = NULL;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+       proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
++#endif
+       proc_create("devices", 0, proc_bus_pci_dir,
+                   &proc_bus_pci_dev_operations);
+       proc_initialized = 1;
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index c74059e..95cd7bc 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -405,8 +405,12 @@ static void __assign_resources_sorted(struct list_head *head,
+       /* Update res in head list with add_size in realloc_head list */
+       list_for_each_entry_safe(dev_res, tmp_res, head, list) {
+-              dev_res->res->end += get_res_add_size(realloc_head,
+-                                                      dev_res->res);
++              resource_size_t add_size = get_res_add_size(realloc_head, dev_res->res);
++
++              if (dev_res->res->start == 0 && dev_res->res->end == RESOURCE_SIZE_MAX)
++                      dev_res->res->end = add_size - 1;
++              else
++                      dev_res->res->end += get_res_add_size(realloc_head, dev_res->res);
+               /*
+                * There are two kinds of additional resources in the list:
+@@ -1119,7 +1123,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+       return 0;
+ }
+-unsigned long pci_cardbus_resource_alignment(struct resource *res)
++unsigned long pci_cardbus_resource_alignment(const struct resource *res)
+ {
+       if (res->flags & IORESOURCE_IO)
+               return pci_cardbus_io_size;
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index 35f6218..481d098 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -1098,7 +1098,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
+       struct device_node *np = dev->dev.of_node;
+       struct nmk_gpio_chip *nmk_chip;
+       struct gpio_chip *chip;
+-      struct irq_chip *irqchip;
++      irq_chip_no_const *irqchip;
+       int latent_irq;
+       bool supports_sleepmode;
+       int irq;
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 80daead..388a2c6 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -23,6 +23,7 @@
+ #include <linux/pinctrl/pinmux.h>
+ /* Since we request GPIOs from ourself */
+ #include <linux/pinctrl/consumer.h>
++#include <asm/pgtable.h>
+ #include "pinctrl-at91.h"
+ #include "core.h"
+@@ -1600,7 +1601,9 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
+       at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
+       /* Setup proper .irq_set_type function */
+-      gpio_irqchip.irq_set_type = at91_gpio->ops->irq_type;
++      pax_open_kernel();
++      const_cast(gpio_irqchip.irq_set_type) = at91_gpio->ops->irq_type;
++      pax_close_kernel();
+       /* Disable irqs of this PIO controller */
+       writel_relaxed(~0, at91_gpio->regbase + PIO_IDR);
+diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
+index e8a44a9..d859973 100644
+--- a/drivers/platform/chrome/chromeos_laptop.c
++++ b/drivers/platform/chrome/chromeos_laptop.c
+@@ -518,7 +518,7 @@ static struct chromeos_laptop cr48 = {
+       .callback = chromeos_laptop_dmi_matched, \
+       .driver_data = (void *)&board_
+-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
++static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
+       {
+               .ident = "Samsung Series 5 550",
+               .matches = {
+diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
+index 308a853..b0693fd 100644
+--- a/drivers/platform/chrome/chromeos_pstore.c
++++ b/drivers/platform/chrome/chromeos_pstore.c
+@@ -14,7 +14,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pstore_ram.h>
+-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
++static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
+       {
+               /*
+                * Today all Chromebooks/boxes ship with Google_* as version and
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index f9a2454..2759664 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -300,7 +300,7 @@ static int cros_ec_lpc_remove(struct platform_device *pdev)
+       return 0;
+ }
+-static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = {
++static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
+       {
+               /*
+                * Today all Chromebooks/boxes ship with Google_* as version and
+diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
+index 0056294..8f8c2d5 100644
+--- a/drivers/platform/x86/alienware-wmi.c
++++ b/drivers/platform/x86/alienware-wmi.c
+@@ -209,7 +209,7 @@ struct wmax_led_args {
+ } __packed;
+ static struct platform_device *platform_device;
+-static struct device_attribute *zone_dev_attrs;
++static device_attribute_no_const *zone_dev_attrs;
+ static struct attribute **zone_attrs;
+ static struct platform_zone *zone_data;
+@@ -219,7 +219,7 @@ static struct platform_driver platform_driver = {
+                  }
+ };
+-static struct attribute_group zone_attribute_group = {
++static attribute_group_no_const zone_attribute_group = {
+       .name = "rgb_zones",
+ };
+diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
+index a66be13..124be13 100644
+--- a/drivers/platform/x86/apple-gmux.c
++++ b/drivers/platform/x86/apple-gmux.c
+@@ -482,7 +482,7 @@ static int gmux_set_power_state(enum vga_switcheroo_client_id id,
+       return gmux_set_discrete_state(apple_gmux_data, state);
+ }
+-static int gmux_get_client_id(struct pci_dev *pdev)
++static enum vga_switcheroo_client_id gmux_get_client_id(struct pci_dev *pdev)
+ {
+       /*
+        * Early Macbook Pros with switchable graphics use nvidia
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 7c093a0..f2fb59f 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -1872,6 +1872,10 @@ static int show_dsts(struct seq_file *m, void *data)
+       int err;
+       u32 retval = -1;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      return -EPERM;
++#endif
++
+       err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
+       if (err < 0)
+@@ -1888,6 +1892,10 @@ static int show_devs(struct seq_file *m, void *data)
+       int err;
+       u32 retval = -1;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      return -EPERM;
++#endif
++
+       err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
+                                   &retval);
+@@ -1912,6 +1920,10 @@ static int show_call(struct seq_file *m, void *data)
+       union acpi_object *obj;
+       acpi_status status;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      return -EPERM;
++#endif
++
+       status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
+                                    1, asus->debug.method_id,
+                                    &input, &output);
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index e1c2b6d..8f25439 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -805,7 +805,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
+       return 1;
+ }
+-static struct dmi_system_id __initdata compal_dmi_table[] = {
++static const struct dmi_system_id __initconst compal_dmi_table[] = {
+       {
+               .ident = "FL90/IFL90",
+               .matches = {
+diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
+index 458e6c9..089aee7 100644
+--- a/drivers/platform/x86/hdaps.c
++++ b/drivers/platform/x86/hdaps.c
+@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
+    "ThinkPad T42p", so the order of the entries matters.
+    If your ThinkPad is not recognized, please update to latest
+    BIOS. This is especially the case for some R52 ThinkPads. */
+-static struct dmi_system_id __initdata hdaps_whitelist[] = {
++static const struct dmi_system_id __initconst hdaps_whitelist[] = {
+       HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
+       HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
+       HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
+diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
+index c62e5e1..854b418 100644
+--- a/drivers/platform/x86/ibm_rtl.c
++++ b/drivers/platform/x86/ibm_rtl.c
+@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
+ }
+-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
++static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
+       {                                                  \
+               .matches = {                               \
+                       DMI_MATCH(DMI_SYS_VENDOR, "IBM"),  \
+diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
+index 6aa33c4..cfb5425 100644
+--- a/drivers/platform/x86/intel_oaktrail.c
++++ b/drivers/platform/x86/intel_oaktrail.c
+@@ -299,7 +299,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
+       return 0;
+ }
+-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
++static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
+       {
+               .ident = "OakTrail platform",
+               .matches = {
+diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
+index 4231770..cbf93a6 100644
+--- a/drivers/platform/x86/msi-laptop.c
++++ b/drivers/platform/x86/msi-laptop.c
+@@ -605,7 +605,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
+       return 1;
+ }
+-static struct dmi_system_id __initdata msi_dmi_table[] = {
++static const struct dmi_system_id __initconst msi_dmi_table[] = {
+       {
+               .ident = "MSI S270",
+               .matches = {
+@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
+       if (!quirks->ec_read_only) {
+               /* allow userland write sysfs file  */
+-              dev_attr_bluetooth.store = store_bluetooth;
+-              dev_attr_wlan.store = store_wlan;
+-              dev_attr_threeg.store = store_threeg;
+-              dev_attr_bluetooth.attr.mode |= S_IWUSR;
+-              dev_attr_wlan.attr.mode |= S_IWUSR;
+-              dev_attr_threeg.attr.mode |= S_IWUSR;
++              pax_open_kernel();
++              const_cast(dev_attr_bluetooth.store) = store_bluetooth;
++              const_cast(dev_attr_wlan.store) = store_wlan;
++              const_cast(dev_attr_threeg.store) = store_threeg;
++              const_cast(dev_attr_bluetooth.attr.mode) |= S_IWUSR;
++              const_cast(dev_attr_wlan.attr.mode) |= S_IWUSR;
++              const_cast(dev_attr_threeg.attr.mode) |= S_IWUSR;
++              pax_close_kernel();
+       }
+       /* disable hardware control by fn key */
+diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
+index 978e6d6..1f0b37d 100644
+--- a/drivers/platform/x86/msi-wmi.c
++++ b/drivers/platform/x86/msi-wmi.c
+@@ -184,7 +184,7 @@ static const struct backlight_ops msi_backlight_ops = {
+ static void msi_wmi_notify(u32 value, void *context)
+ {
+       struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+-      static struct key_entry *key;
++      struct key_entry *key;
+       union acpi_object *obj;
+       acpi_status status;
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index 8c146e2..356c62e 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id __initdata samsung_dmi_table[] = {
++static const struct dmi_system_id __initconst samsung_dmi_table[] = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR,
+diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
+index e6aac72..e11ff24 100644
+--- a/drivers/platform/x86/samsung-q10.c
++++ b/drivers/platform/x86/samsung-q10.c
+@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
+       return 1;
+ }
+-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
++static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
+       {
+               .ident = "Samsung Q10",
+               .matches = {
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index 1dba359..2850ab9 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -2556,7 +2556,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
+ }
+ /* High speed charging function */
+-static struct device_attribute *hsc_handle;
++static device_attribute_no_const *hsc_handle;
+ static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+               struct device_attribute *attr,
+@@ -2630,7 +2630,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
+ }
+ /* low battery function */
+-static struct device_attribute *lowbatt_handle;
++static device_attribute_no_const *lowbatt_handle;
+ static ssize_t sony_nc_lowbatt_store(struct device *dev,
+               struct device_attribute *attr,
+@@ -2696,7 +2696,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
+ }
+ /* fan speed function */
+-static struct device_attribute *fan_handle, *hsf_handle;
++static device_attribute_no_const *fan_handle, *hsf_handle;
+ static ssize_t sony_nc_hsfan_store(struct device *dev,
+               struct device_attribute *attr,
+@@ -2803,7 +2803,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
+ }
+ /* USB charge function */
+-static struct device_attribute *uc_handle;
++static device_attribute_no_const *uc_handle;
+ static ssize_t sony_nc_usb_charge_store(struct device *dev,
+               struct device_attribute *attr,
+@@ -2877,7 +2877,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
+ }
+ /* Panel ID function */
+-static struct device_attribute *panel_handle;
++static device_attribute_no_const *panel_handle;
+ static ssize_t sony_nc_panelid_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+@@ -2924,7 +2924,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
+ }
+ /* smart connect function */
+-static struct device_attribute *sc_handle;
++static device_attribute_no_const *sc_handle;
+ static ssize_t sony_nc_smart_conn_store(struct device *dev,
+               struct device_attribute *attr,
+@@ -4880,7 +4880,7 @@ static struct acpi_driver sony_pic_driver = {
+       .drv.pm = &sony_pic_pm,
+ };
+-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
++static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
+       {
+               .ident = "Sony Vaio",
+               .matches = {
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index b65ce75..d92001e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -2462,10 +2462,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+                               && !tp_features.bright_unkfw)
+                       TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+       }
++}
+ #undef TPACPI_COMPARE_KEY
+ #undef TPACPI_MAY_SEND_KEY
+-}
+ /*
+  * Polling driver
+@@ -4203,7 +4203,7 @@ static int bluetooth_get_status(void)
+                       TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+ }
+-static int bluetooth_set_status(enum tpacpi_rfkill_state state)
++static int bluetooth_set_status(const enum tpacpi_rfkill_state state)
+ {
+       int status;
+@@ -4391,7 +4391,7 @@ static int wan_get_status(void)
+                       TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+ }
+-static int wan_set_status(enum tpacpi_rfkill_state state)
++static int wan_set_status(const enum tpacpi_rfkill_state state)
+ {
+       int status;
+@@ -4577,7 +4577,7 @@ static int uwb_get_status(void)
+                       TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
+ }
+-static int uwb_set_status(enum tpacpi_rfkill_state state)
++static int uwb_set_status(const enum tpacpi_rfkill_state state)
+ {
+       int status;
+@@ -9526,7 +9526,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
+       },
+ };
+-static int __init set_ibm_param(const char *val, struct kernel_param *kp)
++static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
+ {
+       unsigned int i;
+       struct ibm_struct *ibm;
+diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
+index 3151fd1..12c5b20 100644
+--- a/drivers/pnp/base.h
++++ b/drivers/pnp/base.h
+@@ -163,7 +163,7 @@ struct pnp_resource *pnp_add_resource(struct pnp_dev *dev,
+                                     struct resource *res);
+ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
+                                         int flags);
+-struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
++struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, resource_size_t dma,
+                                         int flags);
+ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
+                                        resource_size_t start,
+diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
+index 438d4c7..ca8a2fb 100644
+--- a/drivers/pnp/pnpbios/bioscalls.c
++++ b/drivers/pnp/pnpbios/bioscalls.c
+@@ -59,7 +59,7 @@ do { \
+       set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
+ } while(0)
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+                       (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+ /*
+@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+       cpu = get_cpu();
+       save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
++
++      pax_open_kernel();
+       get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
++      pax_close_kernel();
+       /* On some boxes IRQ's during PnP BIOS calls are deadly.  */
+       spin_lock_irqsave(&pnp_bios_lock, flags);
+@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+                            :"memory");
+       spin_unlock_irqrestore(&pnp_bios_lock, flags);
++      pax_open_kernel();
+       get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
++      pax_close_kernel();
++
+       put_cpu();
+       /* If we get here and this is set then the PnP BIOS faulted on us. */
+@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
+       return status;
+ }
+-void pnpbios_calls_init(union pnp_bios_install_struct *header)
++void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
+ {
+       int i;
+@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
+       pnp_bios_callpoint.offset = header->fields.pm16offset;
+       pnp_bios_callpoint.segment = PNP_CS16;
++      pax_open_kernel();
++
+       for_each_possible_cpu(i) {
+               struct desc_struct *gdt = get_cpu_gdt_table(i);
+               if (!gdt)
+@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
+               set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
+                        (unsigned long)__va(header->fields.pm16dseg));
+       }
++
++      pax_close_kernel();
+ }
+diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
+index c38a5b9..6b3284c 100644
+--- a/drivers/pnp/pnpbios/core.c
++++ b/drivers/pnp/pnpbios/core.c
+@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
+       return 0;
+ }
+-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
++static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
+       {                       /* PnPBIOS GPF on boot */
+        .callback = exploding_pnp_bios,
+        .ident = "Higraded P14H",
+diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
+index f980ff7..77121c4 100644
+--- a/drivers/pnp/resource.c
++++ b/drivers/pnp/resource.c
+@@ -543,7 +543,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
+       return pnp_res;
+ }
+-struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
++struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, resource_size_t dma,
+                                         int flags)
+ {
+       struct pnp_resource *pnp_res;
+@@ -551,7 +551,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
+       pnp_res = pnp_new_resource(dev);
+       if (!pnp_res) {
+-              dev_err(&dev->dev, "can't add resource for DMA %d\n", dma);
++              dev_err(&dev->dev, "can't add resource for DMA %lld\n", dma);
+               return NULL;
+       }
+diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
+index dfe1ee8..67e820c 100644
+--- a/drivers/power/pda_power.c
++++ b/drivers/power/pda_power.c
+@@ -38,7 +38,11 @@ static struct power_supply *pda_psy_ac, *pda_psy_usb;
+ #if IS_ENABLED(CONFIG_USB_PHY)
+ static struct usb_phy *transceiver;
+-static struct notifier_block otg_nb;
++static int otg_handle_notification(struct notifier_block *nb,
++              unsigned long event, void *unused);
++static struct notifier_block otg_nb = {
++      .notifier_call = otg_handle_notification
++};
+ #endif
+ static struct regulator *ac_draw;
+@@ -373,7 +377,6 @@ static int pda_power_probe(struct platform_device *pdev)
+ #if IS_ENABLED(CONFIG_USB_PHY)
+       if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
+-              otg_nb.notifier_call = otg_handle_notification;
+               ret = usb_register_notifier(transceiver, &otg_nb);
+               if (ret) {
+                       dev_err(dev, "failure to register otg notifier\n");
+diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
+index cc439fd..8fa30df 100644
+--- a/drivers/power/power_supply.h
++++ b/drivers/power/power_supply.h
+@@ -16,12 +16,12 @@ struct power_supply;
+ #ifdef CONFIG_SYSFS
+-extern void power_supply_init_attrs(struct device_type *dev_type);
++extern void power_supply_init_attrs(void);
+ extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
+ #else
+-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
++static inline void power_supply_init_attrs(void) {}
+ #define power_supply_uevent NULL
+ #endif /* CONFIG_SYSFS */
+diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
+index a74d8ca..c98d745 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
+ ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
+ EXPORT_SYMBOL_GPL(power_supply_notifier);
+-static struct device_type power_supply_dev_type;
++extern const struct attribute_group *power_supply_attr_groups[];
++static struct device_type power_supply_dev_type = {
++      .groups = power_supply_attr_groups,
++};
+ #define POWER_SUPPLY_DEFERRED_REGISTER_TIME   msecs_to_jiffies(10)
+@@ -969,7 +972,7 @@ static int __init power_supply_class_init(void)
+               return PTR_ERR(power_supply_class);
+       power_supply_class->dev_uevent = power_supply_uevent;
+-      power_supply_init_attrs(&power_supply_dev_type);
++      power_supply_init_attrs();
+       return 0;
+ }
+diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
+index bcde8d1..0406331 100644
+--- a/drivers/power/power_supply_sysfs.c
++++ b/drivers/power/power_supply_sysfs.c
+@@ -239,17 +239,15 @@ static struct attribute_group power_supply_attr_group = {
+       .is_visible = power_supply_attr_is_visible,
+ };
+-static const struct attribute_group *power_supply_attr_groups[] = {
++const struct attribute_group *power_supply_attr_groups[] = {
+       &power_supply_attr_group,
+       NULL,
+ };
+-void power_supply_init_attrs(struct device_type *dev_type)
++void power_supply_init_attrs(void)
+ {
+       int i;
+-      dev_type->groups = power_supply_attr_groups;
+-
+       for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
+               __power_supply_attrs[i] = &power_supply_attrs[i].attr;
+ }
+diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
+index 1b5d450..b6042f8 100644
+--- a/drivers/power/reset/at91-reset.c
++++ b/drivers/power/reset/at91-reset.c
+@@ -17,6 +17,7 @@
+ #include <linux/of_address.h>
+ #include <linux/platform_device.h>
+ #include <linux/reboot.h>
++#include <asm/pgtable.h>
+ #include <soc/at91/at91sam9_ddrsdr.h>
+ #include <soc/at91/at91sam9_sdramc.h>
+@@ -206,7 +207,9 @@ static int __init at91_reset_probe(struct platform_device *pdev)
+       }
+       match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
+-      at91_restart_nb.notifier_call = match->data;
++      pax_open_kernel();
++      const_cast(at91_restart_nb.notifier_call) = match->data;
++      pax_close_kernel();
+       sclk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(sclk))
+diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
+index 14bde0d..9391277 100644
+--- a/drivers/powercap/powercap_sys.c
++++ b/drivers/powercap/powercap_sys.c
+@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
+       struct device_attribute name_attr;
+ };
++static ssize_t show_constraint_name(struct device *dev,
++                              struct device_attribute *dev_attr,
++                              char *buf);
++
+ static struct powercap_constraint_attr
+-                              constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
++                              constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
++      [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
++              .power_limit_attr = {
++                      .attr = {
++                              .name   = NULL,
++                              .mode   = S_IWUSR | S_IRUGO
++                      },
++                      .show   = show_constraint_power_limit_uw,
++                      .store  = store_constraint_power_limit_uw
++              },
++
++              .time_window_attr = {
++                      .attr = {
++                              .name   = NULL,
++                              .mode   = S_IWUSR | S_IRUGO
++                      },
++                      .show   = show_constraint_time_window_us,
++                      .store  = store_constraint_time_window_us
++              },
++
++              .max_power_attr = {
++                      .attr = {
++                              .name   = NULL,
++                              .mode   = S_IRUGO
++                      },
++                      .show   = show_constraint_max_power_uw,
++                      .store  = NULL
++              },
++
++              .min_power_attr = {
++                      .attr = {
++                              .name   = NULL,
++                              .mode   = S_IRUGO
++                      },
++                      .show   = show_constraint_min_power_uw,
++                      .store  = NULL
++              },
++
++              .max_time_window_attr = {
++                      .attr = {
++                              .name   = NULL,
++                              .mode   = S_IRUGO
++                      },
++                      .show   = show_constraint_max_time_window_us,
++                      .store  = NULL
++              },
++
++              .min_time_window_attr = {
++                      .attr = {
++                              .name   = NULL,
++                              .mode   = S_IRUGO
++                      },
++                      .show   = show_constraint_min_time_window_us,
++                      .store  = NULL
++              },
++
++              .name_attr = {
++                      .attr = {
++                              .name   = NULL,
++                              .mode   = S_IRUGO
++                      },
++                      .show   = show_constraint_name,
++                      .store  = NULL
++              }
++      }
++};
+ /* A list of powercap control_types */
+ static LIST_HEAD(powercap_cntrl_list);
+@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
+ }
+ static int create_constraint_attribute(int id, const char *name,
+-                              int mode,
+-                              struct device_attribute *dev_attr,
+-                              ssize_t (*show)(struct device *,
+-                                      struct device_attribute *, char *),
+-                              ssize_t (*store)(struct device *,
+-                                      struct device_attribute *,
+-                              const char *, size_t)
+-                              )
++                              struct device_attribute *dev_attr)
+ {
++      name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
+-      dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
+-                                                              id, name);
+-      if (!dev_attr->attr.name)
++      if (!name)
+               return -ENOMEM;
+-      dev_attr->attr.mode = mode;
+-      dev_attr->show = show;
+-      dev_attr->store = store;
++
++      pax_open_kernel();
++      const_cast(dev_attr->attr.name) = name;
++      pax_close_kernel();
+       return 0;
+ }
+@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
+       for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+               ret = create_constraint_attribute(i, "power_limit_uw",
+-                                      S_IWUSR | S_IRUGO,
+-                                      &constraint_attrs[i].power_limit_attr,
+-                                      show_constraint_power_limit_uw,
+-                                      store_constraint_power_limit_uw);
++                                      &constraint_attrs[i].power_limit_attr);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "time_window_us",
+-                                      S_IWUSR | S_IRUGO,
+-                                      &constraint_attrs[i].time_window_attr,
+-                                      show_constraint_time_window_us,
+-                                      store_constraint_time_window_us);
++                                      &constraint_attrs[i].time_window_attr);
+               if (ret)
+                       goto err_alloc;
+-              ret = create_constraint_attribute(i, "name", S_IRUGO,
+-                              &constraint_attrs[i].name_attr,
+-                              show_constraint_name,
+-                              NULL);
++              ret = create_constraint_attribute(i, "name",
++                              &constraint_attrs[i].name_attr);
+               if (ret)
+                       goto err_alloc;
+-              ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
+-                              &constraint_attrs[i].max_power_attr,
+-                              show_constraint_max_power_uw,
+-                              NULL);
++              ret = create_constraint_attribute(i, "max_power_uw",
++                              &constraint_attrs[i].max_power_attr);
+               if (ret)
+                       goto err_alloc;
+-              ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
+-                              &constraint_attrs[i].min_power_attr,
+-                              show_constraint_min_power_uw,
+-                              NULL);
++              ret = create_constraint_attribute(i, "min_power_uw",
++                              &constraint_attrs[i].min_power_attr);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "max_time_window_us",
+-                              S_IRUGO,
+-                              &constraint_attrs[i].max_time_window_attr,
+-                              show_constraint_max_time_window_us,
+-                              NULL);
++                              &constraint_attrs[i].max_time_window_attr);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "min_time_window_us",
+-                              S_IRUGO,
+-                              &constraint_attrs[i].min_time_window_attr,
+-                              show_constraint_min_time_window_us,
+-                              NULL);
++                              &constraint_attrs[i].min_time_window_attr);
+               if (ret)
+                       goto err_alloc;
+@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_max_energy_range_uj.attr;
+       if (power_zone->ops->get_energy_uj) {
++              pax_open_kernel();
+               if (power_zone->ops->reset_energy_uj)
+-                      dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
++                      const_cast(dev_attr_energy_uj.attr.mode) = S_IWUSR | S_IRUGO;
+               else
+-                      dev_attr_energy_uj.attr.mode = S_IRUGO;
++                      const_cast(dev_attr_energy_uj.attr.mode) = S_IRUGO;
++              pax_close_kernel();
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_energy_uj.attr;
+       }
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 9c5d414..c7900ce 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -51,7 +51,7 @@ struct ptp_clock {
+       struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
+       wait_queue_head_t tsev_wq;
+       int defunct; /* tells readers to go away when clock is being removed */
+-      struct device_attribute *pin_dev_attr;
++      device_attribute_no_const *pin_dev_attr;
+       struct attribute **pin_attr;
+       struct attribute_group pin_attr_group;
+ };
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 302e626..12579af 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
+               goto no_pin_attr;
+       for (i = 0; i < n_pins; i++) {
+-              struct device_attribute *da = &ptp->pin_dev_attr[i];
++              device_attribute_no_const *da = &ptp->pin_dev_attr[i];
+               sysfs_attr_init(&da->attr);
+               da->attr.name = info->pin_config[i].name;
+               da->attr.mode = 0644;
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index db320e8..bbd864d 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3886,7 +3886,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+       const struct regulation_constraints *constraints = NULL;
+       const struct regulator_init_data *init_data;
+       struct regulator_config *config = NULL;
+-      static atomic_t regulator_no = ATOMIC_INIT(-1);
++      static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
+       struct regulator_dev *rdev;
+       struct device *dev;
+       int ret, i;
+@@ -3979,7 +3979,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+       rdev->dev.class = &regulator_class;
+       rdev->dev.parent = dev;
+       dev_set_name(&rdev->dev, "regulator.%lu",
+-                  (unsigned long) atomic_inc_return(&regulator_no));
++                  (unsigned long) atomic_inc_return_unchecked(&regulator_no));
+       /* set regulator constraints */
+       if (init_data)
+diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
+index b87f62d..34f1cdf 100644
+--- a/drivers/regulator/max8660.c
++++ b/drivers/regulator/max8660.c
+@@ -423,8 +423,10 @@ static int max8660_probe(struct i2c_client *client,
+               max8660->shadow_regs[MAX8660_OVER1] = 5;
+       } else {
+               /* Otherwise devices can be toggled via software */
+-              max8660_dcdc_ops.enable = max8660_dcdc_enable;
+-              max8660_dcdc_ops.disable = max8660_dcdc_disable;
++              pax_open_kernel();
++              const_cast(max8660_dcdc_ops.enable) = max8660_dcdc_enable;
++              const_cast(max8660_dcdc_ops.disable) = max8660_dcdc_disable;
++              pax_close_kernel();
+       }
+       /*
+diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
+index 3958f50..8139dc1 100644
+--- a/drivers/regulator/max8973-regulator.c
++++ b/drivers/regulator/max8973-regulator.c
+@@ -750,9 +750,11 @@ static int max8973_probe(struct i2c_client *client,
+               if (!pdata->enable_ext_control) {
+                       max->desc.enable_reg = MAX8973_VOUT;
+                       max->desc.enable_mask = MAX8973_VOUT_ENABLE;
+-                      max->ops.enable = regulator_enable_regmap;
+-                      max->ops.disable = regulator_disable_regmap;
+-                      max->ops.is_enabled = regulator_is_enabled_regmap;
++                      pax_open_kernel();
++                      const_cast(max->ops.enable) = regulator_enable_regmap;
++                      const_cast(max->ops.disable) = regulator_disable_regmap;
++                      const_cast(max->ops.is_enabled) = regulator_is_enabled_regmap;
++                      pax_close_kernel();
+                       break;
+               }
+@@ -780,9 +782,11 @@ static int max8973_probe(struct i2c_client *client,
+               max->desc.enable_reg = MAX8973_VOUT;
+               max->desc.enable_mask = MAX8973_VOUT_ENABLE;
+-              max->ops.enable = regulator_enable_regmap;
+-              max->ops.disable = regulator_disable_regmap;
+-              max->ops.is_enabled = regulator_is_enabled_regmap;
++              pax_open_kernel();
++              const_cast(max->ops.enable) = regulator_enable_regmap;
++              const_cast(max->ops.disable) = regulator_disable_regmap;
++              const_cast(max->ops.is_enabled) = regulator_is_enabled_regmap;
++              pax_close_kernel();
+               max->ops.set_current_limit = max8973_set_current_limit;
+               max->ops.get_current_limit = max8973_get_current_limit;
+               break;
+diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
+index 0d17c92..ce5897e 100644
+--- a/drivers/regulator/mc13892-regulator.c
++++ b/drivers/regulator/mc13892-regulator.c
+@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
+       mc13xxx_unlock(mc13892);
+       /* update mc13892_vcam ops */
+-      memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
++      pax_open_kernel();
++      memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
+                                               sizeof(struct regulator_ops));
+-      mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
+-      mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
++      const_cast(mc13892_vcam_ops.set_mode) = mc13892_vcam_set_mode,
++      const_cast(mc13892_vcam_ops.get_mode) = mc13892_vcam_get_mode,
++      pax_close_kernel();
+       mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
+       mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index fe0539e..247590f 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -329,9 +329,10 @@ void rproc_free_vring(struct rproc_vring *rvring)
+  *
+  * Returns 0 on success, or an appropriate error code otherwise
+  */
+-static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
++static int rproc_handle_vdev(struct rproc *rproc, void *_rsc,
+                                                       int offset, int avail)
+ {
++      struct fw_rsc_vdev *rsc = _rsc;
+       struct device *dev = &rproc->dev;
+       struct rproc_vdev *rvdev;
+       int i, ret;
+@@ -406,9 +407,10 @@ free_rvdev:
+  *
+  * Returns 0 on success, or an appropriate error code otherwise
+  */
+-static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
++static int rproc_handle_trace(struct rproc *rproc, void *_rsc,
+                                                       int offset, int avail)
+ {
++      struct fw_rsc_trace *rsc = _rsc;
+       struct rproc_mem_entry *trace;
+       struct device *dev = &rproc->dev;
+       void *ptr;
+@@ -486,9 +488,10 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
+  * and not allow firmwares to request access to physical addresses that
+  * are outside those ranges.
+  */
+-static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
++static int rproc_handle_devmem(struct rproc *rproc, void *_rsc,
+                                                       int offset, int avail)
+ {
++      struct fw_rsc_devmem *rsc = _rsc;
+       struct rproc_mem_entry *mapping;
+       struct device *dev = &rproc->dev;
+       int ret;
+@@ -558,10 +561,11 @@ out:
+  * pressure is important; it may have a substantial impact on performance.
+  */
+ static int rproc_handle_carveout(struct rproc *rproc,
+-                                              struct fw_rsc_carveout *rsc,
++                                              void *_rsc,
+                                               int offset, int avail)
+ {
++      struct fw_rsc_carveout *rsc = _rsc;
+       struct rproc_mem_entry *carveout, *mapping;
+       struct device *dev = &rproc->dev;
+       dma_addr_t dma;
+@@ -680,9 +684,11 @@ free_carv:
+       return ret;
+ }
+-static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc,
++static int rproc_count_vrings(struct rproc *rproc, void *_rsc,
+                             int offset, int avail)
+ {
++      struct fw_rsc_vdev *rsc = _rsc;
++
+       /* Summarize the number of notification IDs */
+       rproc->max_notifyid += rsc->num_of_vrings;
+@@ -694,18 +700,18 @@ static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+  * enum fw_resource_type.
+  */
+ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
+-      [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
+-      [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
+-      [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
++      [RSC_CARVEOUT] = rproc_handle_carveout,
++      [RSC_DEVMEM] = rproc_handle_devmem,
++      [RSC_TRACE] = rproc_handle_trace,
+       [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */
+ };
+ static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = {
+-      [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
++      [RSC_VDEV] = rproc_handle_vdev,
+ };
+ static rproc_handle_resource_t rproc_count_vrings_handler[RSC_LAST] = {
+-      [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
++      [RSC_VDEV] = rproc_count_vrings,
+ };
+ /* handle firmware resource entries before booting the remote processor */
+diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
+index 9a3f2a6..c19b00a 100644
+--- a/drivers/rtc/rtc-armada38x.c
++++ b/drivers/rtc/rtc-armada38x.c
+@@ -18,6 +18,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/rtc.h>
++#include <asm/pgtable.h>
+ #define RTC_STATUS        0x0
+ #define RTC_STATUS_ALARM1         BIT(0)
+@@ -246,8 +247,10 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
+                * If there is no interrupt available then we can't
+                * use the alarm
+                */
+-              armada38x_rtc_ops.set_alarm = NULL;
+-              armada38x_rtc_ops.alarm_irq_enable = NULL;
++              pax_open_kernel();
++              const_cast(armada38x_rtc_ops.set_alarm) = NULL;
++              const_cast(armada38x_rtc_ops.alarm_irq_enable) = NULL;
++              pax_close_kernel();
+       }
+       platform_set_drvdata(pdev, rtc);
+       if (rtc->irq != -1)
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 43745ca..9eb24ff 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -732,7 +732,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+       hpet_rtc_timer_init();
+       /* export at least the first block of NVRAM */
+-      nvram.size = address_space - NVRAM_OFFSET;
++      pax_open_kernel();
++      const_cast(nvram.size) = address_space - NVRAM_OFFSET;
++      pax_close_kernel();
+       retval = sysfs_create_bin_file(&dev->kobj, &nvram);
+       if (retval < 0) {
+               dev_dbg(dev, "can't create nvram file? %d\n", retval);
+diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
+index a6d9434..dc26b71 100644
+--- a/drivers/rtc/rtc-dev.c
++++ b/drivers/rtc/rtc-dev.c
+@@ -16,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/rtc.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include "rtc-core.h"
+ static dev_t rtc_devt;
+@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
+               if (copy_from_user(&tm, uarg, sizeof(tm)))
+                       return -EFAULT;
++              gr_log_timechange();
++
+               return rtc_set_time(rtc, &tm);
+       case RTC_PIE_ON:
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index 8e1c5cb..6fe95b9 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -111,7 +111,7 @@ struct ds1307 {
+       u8                      offset; /* register's offset */
+       u8                      regs[11];
+       u16                     nvram_offset;
+-      struct bin_attribute    *nvram;
++      bin_attribute_no_const  *nvram;
+       enum ds_type            type;
+       unsigned long           flags;
+ #define HAS_NVRAM     0               /* bit 0 == sysfs file active */
+diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
+index 58698d2..8560ebf 100644
+--- a/drivers/rtc/rtc-m41t80.c
++++ b/drivers/rtc/rtc-m41t80.c
+@@ -798,9 +798,11 @@ static int m41t80_probe(struct i2c_client *client,
+                       dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
+                       client->irq = 0;
+               } else {
+-                      m41t80_rtc_ops.read_alarm = m41t80_read_alarm;
+-                      m41t80_rtc_ops.set_alarm = m41t80_set_alarm;
+-                      m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable;
++                      pax_open_kernel();
++                      const_cast(m41t80_rtc_ops.read_alarm) = m41t80_read_alarm;
++                      const_cast(m41t80_rtc_ops.set_alarm) = m41t80_set_alarm;
++                      const_cast(m41t80_rtc_ops.alarm_irq_enable) = m41t80_alarm_irq_enable;
++                      pax_close_kernel();
+                       /* Enable the wakealarm */
+                       device_init_wakeup(&client->dev, true);
+               }
+diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
+index d99a705..99654e7 100644
+--- a/drivers/rtc/rtc-m48t59.c
++++ b/drivers/rtc/rtc-m48t59.c
+@@ -485,7 +485,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
+       if (IS_ERR(m48t59->rtc))
+               return PTR_ERR(m48t59->rtc);
+-      m48t59_nvram_attr.size = pdata->offset;
++      pax_open_kernel();
++      const_cast(m48t59_nvram_attr.size) = pdata->offset;
++      pax_close_kernel();
+       ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+       if (ret)
+diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
+index 1f9f7b4..6f87883 100644
+--- a/drivers/rtc/rtc-rv3029c2.c
++++ b/drivers/rtc/rtc-rv3029c2.c
+@@ -832,9 +832,11 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
+                       dev_warn(dev, "unable to request IRQ, alarms disabled\n");
+                       rv3029->irq = 0;
+               } else {
+-                      rv3029_rtc_ops.read_alarm = rv3029_read_alarm;
+-                      rv3029_rtc_ops.set_alarm = rv3029_set_alarm;
+-                      rv3029_rtc_ops.alarm_irq_enable = rv3029_alarm_irq_enable;
++                      pax_open_kernel();
++                      const_cast(rv3029_rtc_ops.read_alarm) = rv3029_read_alarm;
++                      const_cast(rv3029_rtc_ops.set_alarm) = rv3029_set_alarm;
++                      const_cast(rv3029_rtc_ops.alarm_irq_enable) = rv3029_alarm_irq_enable;
++                      pax_close_kernel();
+               }
+       }
+diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
+index 9a2f6a9..da6bfcb 100644
+--- a/drivers/rtc/rtc-rv8803.c
++++ b/drivers/rtc/rtc-rv8803.c
+@@ -497,6 +497,15 @@ static struct rtc_class_ops rv8803_rtc_ops = {
+       .ioctl = rv8803_ioctl,
+ };
++static struct rtc_class_ops rv8803_rtc_alarm_ops = {
++      .read_time = rv8803_get_time,
++      .set_time = rv8803_set_time,
++      .ioctl = rv8803_ioctl,
++      .read_alarm = rv8803_get_alarm,
++      .set_alarm = rv8803_set_alarm,
++      .alarm_irq_enable = rv8803_alarm_irq_enable,
++};
++
+ static int rv8803_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+ {
+@@ -540,15 +549,11 @@ static int rv8803_probe(struct i2c_client *client,
+               if (err) {
+                       dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
+                       client->irq = 0;
+-              } else {
+-                      rv8803_rtc_ops.read_alarm = rv8803_get_alarm;
+-                      rv8803_rtc_ops.set_alarm = rv8803_set_alarm;
+-                      rv8803_rtc_ops.alarm_irq_enable = rv8803_alarm_irq_enable;
+               }
+       }
+       rv8803->rtc = devm_rtc_device_register(&client->dev, client->name,
+-                                             &rv8803_rtc_ops, THIS_MODULE);
++                                             client->irq > 0 ? &rv8803_rtc_alarm_ops : &rv8803_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rv8803->rtc)) {
+               dev_err(&client->dev, "unable to register the class device\n");
+               return PTR_ERR(rv8803->rtc);
+diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
+index 7163b91..d7a2c31 100644
+--- a/drivers/rtc/rtc-rx8010.c
++++ b/drivers/rtc/rtc-rx8010.c
+@@ -483,9 +483,11 @@ static int rx8010_probe(struct i2c_client *client,
+                       dev_err(&client->dev, "unable to request IRQ\n");
+                       client->irq = 0;
+               } else {
+-                      rx8010_rtc_ops.read_alarm = rx8010_read_alarm;
+-                      rx8010_rtc_ops.set_alarm = rx8010_set_alarm;
+-                      rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable;
++                      pax_open_kernel();
++                      const_cast(rx8010_rtc_ops.read_alarm) = rx8010_read_alarm;
++                      const_cast(rx8010_rtc_ops.set_alarm) = rx8010_set_alarm;
++                      const_cast(rx8010_rtc_ops.alarm_irq_enable) = rx8010_alarm_irq_enable;
++                      pax_close_kernel();
+               }
+       }
+diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
+index 3a2da4c..1d1d4b1 100644
+--- a/drivers/rtc/rtc-test.c
++++ b/drivers/rtc/rtc-test.c
+@@ -112,8 +112,10 @@ static int test_probe(struct platform_device *plat_dev)
+       struct rtc_device *rtc;
+       if (test_mmss64) {
+-              test_rtc_ops.set_mmss64 = test_rtc_set_mmss64;
+-              test_rtc_ops.set_mmss = NULL;
++              pax_open_kernel();
++              const_cast(test_rtc_ops.set_mmss64) = test_rtc_set_mmss64;
++              const_cast(test_rtc_ops.set_mmss) = NULL;
++              pax_close_kernel();
+       }
+       rtc = devm_rtc_device_register(&plat_dev->dev, "test",
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index 6678d1f..0293b70 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -770,6 +770,11 @@ static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
+       return 0;
+ }
++static void aac_probe_container_scsi_done(struct scsi_cmnd * scsicmd)
++{
++      scsicmd->device = NULL;
++}
++
+ int aac_probe_container(struct aac_dev *dev, int cid)
+ {
+       struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
+@@ -782,7 +787,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
+               return -ENOMEM;
+       }
+       scsicmd->list.next = NULL;
+-      scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
++      scsicmd->scsi_done = aac_probe_container_scsi_done;
+       scsicmd->device = scsidev;
+       scsidev->sdev_state = 0;
+diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
+index d47b527..f2c4a89 100644
+--- a/drivers/scsi/aic7xxx/aic79xx.h
++++ b/drivers/scsi/aic7xxx/aic79xx.h
+@@ -1046,7 +1046,7 @@ typedef enum {
+ typedef uint8_t ahd_mode_state;
+-typedef void ahd_callback_t (void *);
++typedef void ahd_linux_callback_t (u_long);
+ struct ahd_completion
+ {
+diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
+index 109e2c9..7d3c9b5 100644
+--- a/drivers/scsi/aic7xxx/aic79xx_core.c
++++ b/drivers/scsi/aic7xxx/aic79xx_core.c
+@@ -207,7 +207,7 @@ static void                ahd_add_scb_to_free_list(struct ahd_softc *ahd,
+ static u_int          ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
+                                    u_int prev, u_int next, u_int tid);
+ static void           ahd_reset_current_bus(struct ahd_softc *ahd);
+-static ahd_callback_t ahd_stat_timer;
++static ahd_linux_callback_t   ahd_stat_timer;
+ #ifdef AHD_DUMP_SEQ
+ static void           ahd_dumpseq(struct ahd_softc *ahd);
+ #endif
+@@ -7041,10 +7041,9 @@ static const char *termstat_strings[] = {
+ /***************************** Timer Facilities *******************************/
+ #define ahd_timer_init init_timer
+ #define ahd_timer_stop del_timer_sync
+-typedef void ahd_linux_callback_t (u_long);
+ static void
+-ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
++ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_linux_callback_t *func, void *arg)
+ {
+       struct ahd_softc *ahd;
+@@ -7052,7 +7051,7 @@ ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
+       del_timer(timer);
+       timer->data = (u_long)arg;
+       timer->expires = jiffies + (usec * HZ)/1000000;
+-      timer->function = (ahd_linux_callback_t*)func;
++      timer->function = func;
+       add_timer(timer);
+ }
+@@ -8878,9 +8877,9 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
+ /**************************** Statistics Processing ***************************/
+ static void
+-ahd_stat_timer(void *arg)
++ahd_stat_timer(unsigned long arg)
+ {
+-      struct  ahd_softc *ahd = arg;
++      struct  ahd_softc *ahd = (struct ahd_softc *)arg;
+       u_long  s;
+       int     enint_coal;
+       
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index f05e773..b48c418 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -5465,7 +5465,7 @@ beiscsi_hw_health_check(struct work_struct *work)
+ static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
+-              pci_channel_state_t state)
++              enum pci_channel_state state)
+ {
+       struct beiscsi_hba *phba = NULL;
+diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
+index 0e119d8..1bf8a49 100644
+--- a/drivers/scsi/bfa/bfa.h
++++ b/drivers/scsi/bfa/bfa.h
+@@ -225,8 +225,10 @@ struct bfa_faa_args_s {
+       bfa_boolean_t           busy;
+ };
++enum iocfc_event;
++
+ struct bfa_iocfc_s {
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bfa_iocfc_s *, enum iocfc_event);
+       struct bfa_s            *bfa;
+       struct bfa_iocfc_cfg_s  cfg;
+       u32             req_cq_pi[BFI_IOC_MAX_CQS];
+diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
+index 7209afa..2450c125 100644
+--- a/drivers/scsi/bfa/bfa_core.c
++++ b/drivers/scsi/bfa/bfa_core.c
+@@ -1919,15 +1919,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
+       struct list_head                *qe;
+       struct list_head                *qen;
+       struct bfa_cb_qe_s      *hcb_qe;
+-      bfa_cb_cbfn_status_t    cbfn;
+       list_for_each_safe(qe, qen, comp_q) {
+               hcb_qe = (struct bfa_cb_qe_s *) qe;
+               if (hcb_qe->pre_rmv) {
+                       /* qe is invalid after return, dequeue before cbfn() */
+                       list_del(qe);
+-                      cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+-                      cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
++                      hcb_qe->cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+               } else
+                       hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+       }
+diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
+index df6760c..3b22f4d 100644
+--- a/drivers/scsi/bfa/bfa_cs.h
++++ b/drivers/scsi/bfa/bfa_cs.h
+@@ -184,8 +184,6 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
+  * @ BFA state machine interfaces
+  */
+-typedef void (*bfa_sm_t)(void *sm, int event);
+-
+ /*
+  * oc - object class eg. bfa_ioc
+  * st - state, eg. reset
+@@ -195,20 +193,75 @@ typedef void (*bfa_sm_t)(void *sm, int event);
+ #define bfa_sm_state_decl(oc, st, otype, etype)               \
+       static void oc ## _sm_ ## st(otype * fsm, etype event)
+-#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
++#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (_state))
+ #define bfa_sm_send_event(_sm, _event)        ((_sm)->sm((_sm), (_event)))
+ #define bfa_sm_get_state(_sm)         ((_sm)->sm)
+-#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
++#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (_state))
+ /*
+  * For converting from state machine function to state encoding.
+  */
+-struct bfa_sm_table_s {
+-      bfa_sm_t        sm;     /*  state machine function      */
++struct bfa_iocpf_s;
++enum iocpf_event;
++typedef void (*bfa_fsm_iocpf_t)(struct bfa_iocpf_s *, enum iocpf_event);
++
++struct iocpf_sm_table_s {
++      bfa_fsm_iocpf_t sm;     /*  state machine function      */
+       int             state;  /*  state machine encoding      */
+       char            *name;  /*  state name for display      */
+ };
+-#define BFA_SM(_sm)   ((bfa_sm_t)(_sm))
++
++struct bfa_ioc_s;
++enum ioc_event;
++typedef void (*bfa_fsm_ioc_t)(struct bfa_ioc_s *, enum ioc_event);
++
++struct ioc_sm_table_s {
++      bfa_fsm_ioc_t sm;       /*  state machine function      */
++      int             state;  /*  state machine encoding      */
++      char            *name;  /*  state name for display      */
++};
++
++struct bfa_fcs_rport_s;
++enum rport_event;
++typedef void(*bfa_fcs_rport_t)(struct bfa_fcs_rport_s *, enum rport_event);
++
++struct rport_sm_table_s {
++      bfa_fcs_rport_t sm;     /*  state machine function      */
++      int             state;  /*  state machine encoding      */
++      char            *name;  /*  state name for display      */
++};
++
++struct bfa_fcs_vport_s;
++enum bfa_fcs_vport_event;
++typedef void(*bfa_fcs_vport_t)(struct bfa_fcs_vport_s *, enum bfa_fcs_vport_event);
++
++struct vport_sm_table_s {
++      bfa_fcs_vport_t sm;     /*  state machine function      */
++      int             state;  /*  state machine encoding      */
++      char            *name;  /*  state name for display      */
++};
++
++struct bfa_fcs_itnim_s;
++enum bfa_fcs_itnim_event;
++typedef void(*bfa_fcs_itnim_t)(struct bfa_fcs_itnim_s *, enum bfa_fcs_itnim_event);
++
++struct itnim_sm_table_s {
++      bfa_fcs_itnim_t sm;     /*  state machine function      */
++      int             state;  /*  state machine encoding      */
++      char            *name;  /*  state name for display      */
++};
++
++struct bfa_fcport_s;
++enum bfa_fcport_sm_event;
++typedef void(*bfa_fcport_t)(struct bfa_fcport_s *, enum bfa_fcport_sm_event);
++
++struct fcport_sm_table_s {
++      bfa_fcport_t sm;        /*  state machine function      */
++      int             state;  /*  state machine encoding      */
++      char            *name;  /*  state name for display      */
++};
++
++#define BFA_SM(_sm)   (_sm)
+ /*
+  * State machine with entry actions.
+@@ -226,17 +279,66 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
+       static void oc ## _sm_ ## st ## _entry(otype * fsm)
+ #define bfa_fsm_set_state(_fsm, _state) do {  \
+-      (_fsm)->fsm = (bfa_fsm_t)(_state);      \
++      (_fsm)->fsm = (_state);      \
+       _state ## _entry(_fsm);      \
+ } while (0)
+ #define bfa_fsm_send_event(_fsm, _event)      ((_fsm)->fsm((_fsm), (_event)))
+ #define bfa_fsm_get_state(_fsm)                       ((_fsm)->fsm)
+-#define bfa_fsm_cmp_state(_fsm, _state)               \
+-      ((_fsm)->fsm == (bfa_fsm_t)(_state))
++#define bfa_fsm_cmp_state(_fsm, _state)               ((_fsm)->fsm == (_state))
+ static inline int
+-bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
++iocpf_sm_to_state(struct iocpf_sm_table_s *smt, bfa_fsm_iocpf_t sm)
++{
++      int     i = 0;
++
++      while (smt[i].sm && smt[i].sm != sm)
++              i++;
++      return smt[i].state;
++}
++
++static inline int
++ioc_sm_to_state(struct ioc_sm_table_s *smt, bfa_fsm_ioc_t sm)
++{
++      int     i = 0;
++
++      while (smt[i].sm && smt[i].sm != sm)
++              i++;
++      return smt[i].state;
++}
++
++static inline int
++rport_sm_to_state(struct rport_sm_table_s *smt, bfa_fcs_rport_t sm)
++{
++      int     i = 0;
++
++      while (smt[i].sm && smt[i].sm != sm)
++              i++;
++      return smt[i].state;
++}
++
++static inline int
++vport_sm_to_state(struct vport_sm_table_s *smt, bfa_fcs_vport_t sm)
++{
++      int     i = 0;
++
++      while (smt[i].sm && smt[i].sm != sm)
++              i++;
++      return smt[i].state;
++}
++
++static inline int
++itnim_sm_to_state(struct itnim_sm_table_s *smt, bfa_fcs_itnim_t sm)
++{
++      int     i = 0;
++
++      while (smt[i].sm && smt[i].sm != sm)
++              i++;
++      return smt[i].state;
++}
++
++static inline int
++fcport_sm_to_state(struct fcport_sm_table_s *smt, bfa_fcport_t sm)
+ {
+       int     i = 0;
+diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
+index e93921d..ee6b4c0 100644
+--- a/drivers/scsi/bfa/bfa_fcpim.h
++++ b/drivers/scsi/bfa/bfa_fcpim.h
+@@ -37,7 +37,7 @@ struct bfa_iotag_s {
+ struct bfa_itn_s {
+       bfa_isr_func_t isr;
+-};
++} __no_const;
+ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+               void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+@@ -165,9 +165,11 @@ struct bfa_fcp_mod_s {
+ /*
+  * BFA IO (initiator mode)
+  */
++enum bfa_ioim_event;
++
+ struct bfa_ioim_s {
+       struct list_head        qe;             /*  queue elememt       */
+-      bfa_sm_t                sm;             /*  BFA ioim state machine */
++      void (*sm)(struct bfa_ioim_s *, enum bfa_ioim_event);/*  BFA ioim state machine */
+       struct bfa_s            *bfa;           /*  BFA module  */
+       struct bfa_fcpim_s      *fcpim;         /*  parent fcpim module */
+       struct bfa_itnim_s      *itnim;         /*  i-t-n nexus for this IO  */
+@@ -197,9 +199,11 @@ struct bfa_ioim_sp_s {
+ /*
+  * BFA Task management command (initiator mode)
+  */
++enum bfa_tskim_event;
++
+ struct bfa_tskim_s {
+       struct list_head        qe;
+-      bfa_sm_t                sm;
++      void (*sm)(struct bfa_tskim_s *, enum bfa_tskim_event);
+       struct bfa_s            *bfa;   /*  BFA module  */
+       struct bfa_fcpim_s      *fcpim; /*  parent fcpim module */
+       struct bfa_itnim_s      *itnim; /*  i-t-n nexus for this IO  */
+@@ -219,9 +223,11 @@ struct bfa_tskim_s {
+ /*
+  * BFA i-t-n (initiator mode)
+  */
++enum bfa_itnim_event;
++
+ struct bfa_itnim_s {
+       struct list_head        qe;     /*  queue element       */
+-      bfa_sm_t                sm;     /*  i-t-n im BFA state machine  */
++      void (*sm)(struct bfa_itnim_s *, enum bfa_itnim_event);/*  i-t-n im BFA state machine  */
+       struct bfa_s            *bfa;   /*  bfa instance        */
+       struct bfa_rport_s      *rport; /*  bfa rport   */
+       void                    *ditn;  /*  driver i-t-n structure      */
+diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
+index 1e7e139..c2031dd 100644
+--- a/drivers/scsi/bfa/bfa_fcs.c
++++ b/drivers/scsi/bfa/bfa_fcs.c
+@@ -39,10 +39,21 @@ struct bfa_fcs_mod_s {
+ #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
+ static struct bfa_fcs_mod_s fcs_modules[] = {
+-      { bfa_fcs_port_attach, NULL, NULL },
+-      { bfa_fcs_uf_attach, NULL, NULL },
+-      { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
+-        bfa_fcs_fabric_modexit },
++      {
++              .attach = bfa_fcs_port_attach,
++              .modinit = NULL,
++              .modexit = NULL
++      },
++      {
++              .attach = bfa_fcs_uf_attach,
++              .modinit = NULL,
++              .modexit = NULL
++      },
++      {
++              .attach = bfa_fcs_fabric_attach,
++              .modinit = bfa_fcs_fabric_modinit,
++              .modexit = bfa_fcs_fabric_modexit
++      },
+ };
+ /*
+diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
+index 0f797a5..73b170a 100644
+--- a/drivers/scsi/bfa/bfa_fcs.h
++++ b/drivers/scsi/bfa/bfa_fcs.h
+@@ -67,8 +67,10 @@ struct bfa_fcs_s;
+ #define BFA_FCS_PID_IS_WKA(pid)  ((bfa_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
+ #define BFA_FCS_MAX_RPORT_LOGINS 1024
++enum vport_ns_event;
++
+ struct bfa_fcs_lport_ns_s {
+-      bfa_sm_t        sm;             /*  state machine */
++      void (*sm)(struct bfa_fcs_lport_ns_s *, enum vport_ns_event);/*  state machine */
+       struct bfa_timer_s timer;
+       struct bfa_fcs_lport_s *port;   /*  parent port */
+       struct bfa_fcxp_s *fcxp;
+@@ -77,18 +79,20 @@ struct bfa_fcs_lport_ns_s {
+       u8      num_rsnn_nn_retries;
+ };
++enum port_scn_event;
+ struct bfa_fcs_lport_scn_s {
+-      bfa_sm_t        sm;             /*  state machine */
++      void (*sm)(struct bfa_fcs_lport_scn_s *, enum port_scn_event);/*  state machine */
+       struct bfa_timer_s timer;
+       struct bfa_fcs_lport_s *port;   /*  parent port */
+       struct bfa_fcxp_s *fcxp;
+       struct bfa_fcxp_wqe_s fcxp_wqe;
+ };
++enum port_fdmi_event;
+ struct bfa_fcs_lport_fdmi_s {
+-      bfa_sm_t        sm;             /*  state machine */
++      void (*sm)(struct bfa_fcs_lport_fdmi_s *, enum port_fdmi_event);/*  state machine */
+       struct bfa_timer_s timer;
+       struct bfa_fcs_lport_ms_s *ms;  /*  parent ms */
+       struct bfa_fcxp_s *fcxp;
+@@ -97,9 +101,10 @@ struct bfa_fcs_lport_fdmi_s {
+       u8      rsvd[3];
+ };
++enum port_ms_event;
+ struct bfa_fcs_lport_ms_s {
+-      bfa_sm_t        sm;             /*  state machine */
++      void (*sm)(struct bfa_fcs_lport_ms_s *, enum port_ms_event);/*  state machine */
+       struct bfa_timer_s timer;
+       struct bfa_fcs_lport_s *port;   /*  parent port */
+       struct bfa_fcxp_s *fcxp;
+@@ -139,10 +144,11 @@ union bfa_fcs_lport_topo_u {
+       struct bfa_fcs_lport_n2n_s pn2n;
+ };
++enum bfa_fcs_lport_event;
+ struct bfa_fcs_lport_s {
+       struct list_head         qe;    /*  used by port/vport */
+-      bfa_sm_t               sm;      /*  state machine */
++      void (*sm)(struct bfa_fcs_lport_s *, enum bfa_fcs_lport_event); /*  state machine */
+       struct bfa_fcs_fabric_s *fabric;        /*  parent fabric */
+       struct bfa_lport_cfg_s  port_cfg;       /*  port configuration */
+       struct bfa_timer_s link_timer;  /*  timer for link offline */
+@@ -179,10 +185,11 @@ enum bfa_fcs_fabric_type {
+       BFA_FCS_FABRIC_LOOP = 3,
+ };
++enum bfa_fcs_fabric_event;
+ struct bfa_fcs_fabric_s {
+       struct list_head   qe;          /*  queue element */
+-      bfa_sm_t         sm;            /*  state machine */
++      void (*sm)(struct bfa_fcs_fabric_s *, enum bfa_fcs_fabric_event); /*  state machine */
+       struct bfa_fcs_s *fcs;          /*  FCS instance */
+       struct bfa_fcs_lport_s  bport;  /*  base logical port */
+       enum bfa_fcs_fabric_type fab_type; /*  fabric type */
+@@ -355,9 +362,11 @@ void            bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
+                                             struct fchs_s *rx_frame, u32 len);
+ void          bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
++enum bfa_fcs_vport_event;
++
+ struct bfa_fcs_vport_s {
+       struct list_head                qe;             /*  queue elem  */
+-      bfa_sm_t                sm;             /*  state machine       */
++      void (*sm)(struct bfa_fcs_vport_s *, enum bfa_fcs_vport_event);/*  state machine        */
+       bfa_fcs_lport_t         lport;          /*  logical port        */
+       struct bfa_timer_s      timer;
+       struct bfad_vport_s     *vport_drv;     /*  Driver private      */
+@@ -409,8 +418,10 @@ struct bfa_fcs_tin_s;
+ struct bfa_fcs_iprp_s;
+ /* Rport Features (RPF) */
++enum rpf_event;
++
+ struct bfa_fcs_rpf_s {
+-      bfa_sm_t        sm;     /*  state machine */
++      void (*sm)(struct bfa_fcs_rpf_s *, enum rpf_event); /*  state machine */
+       struct bfa_fcs_rport_s *rport;  /*  parent rport */
+       struct bfa_timer_s      timer;  /*  general purpose timer */
+       struct bfa_fcxp_s       *fcxp;  /*  FCXP needed for discarding */
+@@ -425,6 +436,8 @@ struct bfa_fcs_rpf_s {
+        */
+ };
++enum rport_event;
++
+ struct bfa_fcs_rport_s {
+       struct list_head        qe;     /*  used by port/vport */
+       struct bfa_fcs_lport_s *port;   /*  parent FCS port */
+@@ -441,7 +454,7 @@ struct bfa_fcs_rport_s {
+       wwn_t   pwwn;   /*  port wwn of rport */
+       wwn_t   nwwn;   /*  node wwn of rport */
+       struct bfa_rport_symname_s psym_name; /*  port symbolic name  */
+-      bfa_sm_t        sm;             /*  state machine */
++      void (*sm)(struct bfa_fcs_rport_s *, enum rport_event); /*  state machine */
+       struct bfa_timer_s timer;       /*  general purpose timer */
+       struct bfa_fcs_itnim_s *itnim;  /*  ITN initiator mode role */
+       struct bfa_fcs_tin_s *tin;      /*  ITN initiator mode role */
+@@ -502,9 +515,10 @@ void  bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
+  * forward declarations
+  */
+ struct bfad_itnim_s;
++enum bfa_fcs_itnim_event;
+ struct bfa_fcs_itnim_s {
+-      bfa_sm_t                sm;             /*  state machine */
++      void (*sm)(struct bfa_fcs_itnim_s *, enum bfa_fcs_itnim_event);/*  state machine */
+       struct bfa_fcs_rport_s  *rport;         /*  parent remote rport  */
+       struct bfad_itnim_s     *itnim_drv;     /*  driver peer instance */
+       struct bfa_fcs_s        *fcs;           /*  fcs instance        */
+diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
+index 2e3b19e..7a9b729 100644
+--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
++++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
+@@ -60,7 +60,7 @@ static void  bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
+ static void   bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
+                                          enum bfa_fcs_itnim_event event);
+-static struct bfa_sm_table_s itnim_sm_table[] = {
++static struct itnim_sm_table_s itnim_sm_table[] = {
+       {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE},
+       {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND},
+       {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT},
+@@ -673,7 +673,7 @@ bfa_status_t
+ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
+ {
+       bfa_trc(itnim->fcs, itnim->rport->pid);
+-      switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) {
++      switch (itnim_sm_to_state(itnim_sm_table, itnim->sm)) {
+       case BFA_ITNIM_ONLINE:
+       case BFA_ITNIM_INITIATIOR:
+               return BFA_STATUS_OK;
+@@ -773,7 +773,7 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+       if (itnim == NULL)
+               return BFA_STATUS_NO_FCPIM_NEXUS;
+-      attr->state         = bfa_sm_to_state(itnim_sm_table, itnim->sm);
++      attr->state         = itnim_sm_to_state(itnim_sm_table, itnim->sm);
+       attr->retry         = itnim->seq_rec;
+       attr->rec_support   = itnim->rec_support;
+       attr->conf_comp     = itnim->conf_comp;
+diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
+index 7733ad5..11f32d1 100644
+--- a/drivers/scsi/bfa/bfa_fcs_lport.c
++++ b/drivers/scsi/bfa/bfa_fcs_lport.c
+@@ -90,15 +90,26 @@ static struct {
+       void            (*offline) (struct bfa_fcs_lport_s *port);
+ } __port_action[] = {
+       {
+-      bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
+-                      bfa_fcs_lport_unknown_offline}, {
+-      bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
+-                      bfa_fcs_lport_fab_offline}, {
+-      bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
+-                      bfa_fcs_lport_n2n_offline}, {
+-      bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
+-                      bfa_fcs_lport_loop_offline},
+-      };
++              .init = bfa_fcs_lport_unknown_init,
++              .online = bfa_fcs_lport_unknown_online,
++              .offline = bfa_fcs_lport_unknown_offline
++      },
++      {
++              .init = bfa_fcs_lport_fab_init,
++              .online = bfa_fcs_lport_fab_online,
++              .offline = bfa_fcs_lport_fab_offline
++      },
++      {
++              .init = bfa_fcs_lport_n2n_init,
++              .online = bfa_fcs_lport_n2n_online,
++              .offline = bfa_fcs_lport_n2n_offline
++      },
++      {
++              .init = bfa_fcs_lport_loop_init,
++              .online = bfa_fcs_lport_loop_online,
++              .offline = bfa_fcs_lport_loop_offline
++      },
++};
+ /*
+  *  fcs_port_sm FCS logical port state machine
+@@ -6040,7 +6051,7 @@ static void      bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+ static void   bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+                                       enum bfa_fcs_vport_event event);
+-static struct bfa_sm_table_s  vport_sm_table[] = {
++static struct vport_sm_table_s vport_sm_table[] = {
+       {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
+       {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
+       {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
+@@ -6871,7 +6882,7 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
+       memset(attr, 0, sizeof(struct bfa_vport_attr_s));
+       bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
+-      attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
++      attr->vport_state = vport_sm_to_state(vport_sm_table, vport->sm);
+ }
+diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
+index de50349..6d676be 100644
+--- a/drivers/scsi/bfa/bfa_fcs_rport.c
++++ b/drivers/scsi/bfa/bfa_fcs_rport.c
+@@ -144,7 +144,7 @@ static void        bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+ static void   bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+                                               enum rport_event event);
+-static struct bfa_sm_table_s rport_sm_table[] = {
++static struct rport_sm_table_s rport_sm_table[] = {
+       {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
+       {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI},
+       {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
+@@ -2980,7 +2980,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
+ int
+ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
+ {
+-      return bfa_sm_to_state(rport_sm_table, rport->sm);
++      return rport_sm_to_state(rport_sm_table, rport->sm);
+ }
+diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
+index a1ada4a..6ed9ba2 100644
+--- a/drivers/scsi/bfa/bfa_ioc.c
++++ b/drivers/scsi/bfa/bfa_ioc.c
+@@ -148,7 +148,7 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
+-static struct bfa_sm_table_s ioc_sm_table[] = {
++static struct ioc_sm_table_s ioc_sm_table[] = {
+       {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
+       {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+       {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
+@@ -236,7 +236,7 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
+                                               enum iocpf_event);
+ bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
+-static struct bfa_sm_table_s iocpf_sm_table[] = {
++static struct iocpf_sm_table_s iocpf_sm_table[] = {
+       {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+       {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+       {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+@@ -2830,12 +2830,12 @@ enum bfa_ioc_state
+ bfa_ioc_get_state(struct bfa_ioc_s *ioc)
+ {
+       enum bfa_iocpf_state iocpf_st;
+-      enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
++      enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm);
+       if (ioc_st == BFA_IOC_ENABLING ||
+               ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+-              iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
++              iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+               switch (iocpf_st) {
+               case BFA_IOCPF_SEMWAIT:
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 713745d..78b9671 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -259,7 +259,7 @@ struct bfa_ioc_cbfn_s {
+       bfa_ioc_disable_cbfn_t  disable_cbfn;
+       bfa_ioc_hbfail_cbfn_t   hbfail_cbfn;
+       bfa_ioc_reset_cbfn_t    reset_cbfn;
+-};
++} __no_const;
+ /*
+  * IOC event notification mechanism.
+@@ -286,16 +286,20 @@ struct bfa_ioc_notify_s {
+       (__notify)->cbarg = (__cbarg);      \
+ } while (0)
++enum iocpf_event;
++
+ struct bfa_iocpf_s {
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bfa_iocpf_s *, enum iocpf_event);
+       struct bfa_ioc_s        *ioc;
+       bfa_boolean_t           fw_mismatch_notified;
+       bfa_boolean_t           auto_recover;
+       u32                     poll_time;
+ };
++enum ioc_event;
++
+ struct bfa_ioc_s {
+-      bfa_fsm_t               fsm;
++      void (*fsm)(struct bfa_ioc_s *, enum ioc_event);
+       struct bfa_s            *bfa;
+       struct bfa_pcidev_s     pcidev;
+       struct bfa_timer_mod_s  *timer_mod;
+@@ -353,7 +357,7 @@ struct bfa_ioc_hwif_s {
+       void            (*ioc_set_alt_fwstate)  (struct bfa_ioc_s *ioc,
+                                       enum bfi_ioc_state fwstate);
+       enum bfi_ioc_state      (*ioc_get_alt_fwstate)  (struct bfa_ioc_s *ioc);
+-};
++} __no_const;
+ /*
+  * Queue element to wait for room in request queue. FIFO order is
+@@ -779,8 +783,10 @@ struct bfa_dconf_s {
+ };
+ #pragma pack()
++enum bfa_dconf_event;
++
+ struct bfa_dconf_mod_s {
+-      bfa_sm_t                sm;
++      void (*sm)(struct bfa_dconf_mod_s *, enum bfa_dconf_event);
+       u8                      instance;
+       bfa_boolean_t           read_data_valid;
+       bfa_boolean_t           min_cfg;
+diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
+index 53135f2..640621b 100644
+--- a/drivers/scsi/bfa/bfa_modules.h
++++ b/drivers/scsi/bfa/bfa_modules.h
+@@ -79,12 +79,12 @@ enum {
+                                                                       \
+       extern struct bfa_module_s hal_mod_ ## __mod;                   \
+       struct bfa_module_s hal_mod_ ## __mod = {                       \
+-              bfa_ ## __mod ## _meminfo,                              \
+-              bfa_ ## __mod ## _attach,                               \
+-              bfa_ ## __mod ## _detach,                               \
+-              bfa_ ## __mod ## _start,                                \
+-              bfa_ ## __mod ## _stop,                                 \
+-              bfa_ ## __mod ## _iocdisable,                           \
++              .meminfo = bfa_ ## __mod ## _meminfo,                   \
++              .attach = bfa_ ## __mod ## _attach,                     \
++              .detach = bfa_ ## __mod ## _detach,                     \
++              .start = bfa_ ## __mod ## _start,                       \
++              .stop = bfa_ ## __mod ## _stop,                         \
++              .iocdisable = bfa_ ## __mod ## _iocdisable,             \
+       }
+ #define BFA_CACHELINE_SZ      (256)
+diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
+index 12de292..ec9f0ab 100644
+--- a/drivers/scsi/bfa/bfa_svc.c
++++ b/drivers/scsi/bfa/bfa_svc.c
+@@ -225,7 +225,7 @@ static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+ static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+                                       enum bfa_fcport_ln_sm_event event);
+-static struct bfa_sm_table_s hal_port_sm_table[] = {
++static struct fcport_sm_table_s hal_port_sm_table[] = {
+       {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
+       {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
+       {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
+@@ -3642,7 +3642,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+       fcport->event_arg.i2hmsg = i2hmsg;
+       bfa_trc(bfa, msg->mhdr.msg_id);
+-      bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
++      bfa_trc(bfa, fcport_sm_to_state(hal_port_sm_table, fcport->sm));
+       switch (msg->mhdr.msg_id) {
+       case BFI_FCPORT_I2H_ENABLE_RSP:
+@@ -4077,7 +4077,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
+       attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
+       attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
+-      attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
++      attr->port_state = fcport_sm_to_state(hal_port_sm_table, fcport->sm);
+       attr->fec_state = fcport->fec_state;
+@@ -4159,7 +4159,7 @@ bfa_fcport_is_disabled(struct bfa_s *bfa)
+ {
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+-      return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
++      return fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
+               BFA_PORT_ST_DISABLED;
+ }
+@@ -4169,7 +4169,7 @@ bfa_fcport_is_dport(struct bfa_s *bfa)
+ {
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+-      return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
++      return (fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
+               BFA_PORT_ST_DPORT);
+ }
+@@ -4178,7 +4178,7 @@ bfa_fcport_is_ddport(struct bfa_s *bfa)
+ {
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+-      return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
++      return (fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
+               BFA_PORT_ST_DDPORT);
+ }
+diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
+index ea2278b..6f51a73 100644
+--- a/drivers/scsi/bfa/bfa_svc.h
++++ b/drivers/scsi/bfa/bfa_svc.h
+@@ -160,6 +160,8 @@ struct bfa_fcxp_rsp_info_s {
+       u32     rsp_maxlen;     /*  max response length expected */
+ };
++typedef void (*bfa_sm_t)(void *sm, int event);
++
+ struct bfa_fcxp_s {
+       struct list_head        qe;             /*  fcxp queue element */
+       bfa_sm_t        sm;             /*  state machine */
+@@ -295,9 +297,11 @@ struct bfa_rport_info_s {
+ /*
+  * BFA rport data structure
+  */
++enum bfa_rport_event;
++
+ struct bfa_rport_s {
+       struct list_head        qe;     /*  queue element                   */
+-      bfa_sm_t        sm;             /*  state machine                   */
++      void (*sm)(struct bfa_rport_s *, enum bfa_rport_event);/*  state machine                    */
+       struct bfa_s    *bfa;           /*  backpointer to BFA              */
+       void            *rport_drv;     /*  fcs/driver rport object         */
+       u16     fw_handle;      /*  firmware rport handle           */
+@@ -388,10 +392,12 @@ void     bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
+ /*
+  * LPS - bfa lport login/logout service interface
+  */
++enum bfa_lps_event;
++
+ struct bfa_lps_s {
+       struct list_head        qe;     /*  queue element               */
+       struct bfa_s    *bfa;           /*  parent bfa instance */
+-      bfa_sm_t        sm;             /*  finite state machine        */
++      void (*sm)(struct bfa_lps_s *, enum bfa_lps_event);/*  finite state machine     */
+       u8              bfa_tag;        /*  lport tag           */
+       u8              fw_tag;         /*  lport fw tag                */
+       u8              reqq;           /*  lport request queue */
+@@ -450,9 +456,11 @@ void      bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+ /*
+  * Link notification data structure
+  */
++enum bfa_fcport_ln_sm_event;
++
+ struct bfa_fcport_ln_s {
+       struct bfa_fcport_s     *fcport;
+-      bfa_sm_t                sm;
++      void (*sm)(struct bfa_fcport_ln_s *, enum bfa_fcport_ln_sm_event);
+       struct bfa_cb_qe_s      ln_qe;  /*  BFA callback queue elem for ln */
+       enum bfa_port_linkstate ln_event; /*  ln event for callback */
+ };
+@@ -466,7 +474,7 @@ struct bfa_fcport_trunk_s {
+  */
+ struct bfa_fcport_s {
+       struct bfa_s            *bfa;   /*  parent BFA instance */
+-      bfa_sm_t                sm;     /*  port state machine */
++      void (*sm)(struct bfa_fcport_s *, enum bfa_fcport_sm_event);    /*  port state machine */
+       wwn_t                   nwwn;   /*  node wwn of physical port */
+       wwn_t                   pwwn;   /*  port wwn of physical oprt */
+       enum bfa_port_speed speed_sup;
+@@ -714,9 +722,11 @@ struct bfa_fcdiag_lb_s {
+       u32        status;
+ };
++enum bfa_dport_sm_event;
++
+ struct bfa_dport_s {
+       struct bfa_s    *bfa;           /* Back pointer to BFA  */
+-      bfa_sm_t        sm;             /* finite state machine */
++      void (*sm)(struct bfa_dport_s *, enum bfa_dport_sm_event);/* finite state machine */
+       struct bfa_reqq_wait_s reqq_wait;
+       bfa_cb_diag_t   cbfn;
+       void            *cbarg;
+diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
+index 9d253cb..bb533ea 100644
+--- a/drivers/scsi/bfa/bfad.c
++++ b/drivers/scsi/bfa/bfad.c
+@@ -408,6 +408,16 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
+       complete(&fcomp->comp);
+ }
++void
++bfad_stats_comp(void *arg, bfa_boolean_t _status)
++{
++      struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
++      bfa_status_t status = (bfa_status_t)_status;
++
++      fcomp->status = status;
++      complete(&fcomp->comp);
++}
++
+ /*
+  * bfa_init callback
+  */
+@@ -1442,7 +1452,7 @@ bfad_pci_remove(struct pci_dev *pdev)
+  * PCI Error Recovery entry, error detected.
+  */
+ static pci_ers_result_t
+-bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++bfad_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct bfad_s *bfad = pci_get_drvdata(pdev);
+       unsigned long   flags;
+diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
+index d1ad020..661c0f9 100644
+--- a/drivers/scsi/bfa/bfad_bsg.c
++++ b/drivers/scsi/bfa/bfad_bsg.c
+@@ -2145,7 +2145,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+       struct bfa_cb_pending_q_s cb_qe;
+       init_completion(&fcomp.comp);
+-      bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
++      bfa_pending_q_init(&cb_qe, bfad_stats_comp,
+                          &fcomp, &iocmd->stats);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+@@ -2169,7 +2169,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+       struct bfa_cb_pending_q_s cb_qe;
+       init_completion(&fcomp.comp);
+-      bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
++      bfa_pending_q_init(&cb_qe, bfad_stats_comp, &fcomp, NULL);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+@@ -2453,7 +2453,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       init_completion(&fcomp.comp);
+-      bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
++      bfa_pending_q_init(&cb_qe, bfad_stats_comp,
+                          &fcomp, &iocmd->stats);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+@@ -2484,7 +2484,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+       init_completion(&fcomp.comp);
+-      bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
++      bfa_pending_q_init(&cb_qe, bfad_stats_comp,
+                          &fcomp, NULL);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
+index f9e8620..807a983 100644
+--- a/drivers/scsi/bfa/bfad_drv.h
++++ b/drivers/scsi/bfa/bfad_drv.h
+@@ -187,8 +187,10 @@ union bfad_tmp_buf {
+ /*
+  * BFAD (PCI function) data structure
+  */
++enum bfad_sm_event;
++
+ struct bfad_s {
+-      bfa_sm_t        sm;     /* state machine */
++      void (*sm)(struct bfad_s *, enum bfad_sm_event); /* state machine */
+       struct list_head list_entry;
+       struct bfa_s    bfa;
+       struct bfa_fcs_s bfa_fcs;
+@@ -309,6 +311,7 @@ void               bfad_fcs_stop(struct bfad_s *bfad);
+ void          bfad_remove_intr(struct bfad_s *bfad);
+ void          bfad_hal_mem_release(struct bfad_s *bfad);
+ void          bfad_hcb_comp(void *arg, bfa_status_t status);
++void          bfad_stats_comp(void *arg, bfa_boolean_t _status);
+ int           bfad_setup_intr(struct bfad_s *bfad);
+ void          bfad_remove_intr(struct bfad_s *bfad);
+diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
+index c38017b..3268e62 100644
+--- a/drivers/scsi/csiostor/csio_defs.h
++++ b/drivers/scsi/csiostor/csio_defs.h
+@@ -73,7 +73,8 @@ csio_list_deleted(struct list_head *list)
+ #define csio_list_prev(elem)  (((struct list_head *)(elem))->prev)
+ /* State machine */
+-typedef void (*csio_sm_state_t)(void *, uint32_t);
++struct csio_sm;
++typedef void (*csio_sm_state_t)(struct csio_sm *, uint32_t);
+ struct csio_sm {
+       struct list_head        sm_list;
+@@ -81,9 +82,9 @@ struct csio_sm {
+ };
+ static inline void
+-csio_set_state(void *smp, void *state)
++csio_set_state(struct csio_sm *smp, csio_sm_state_t state)
+ {
+-      ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
++      smp->sm_state = state;
+ }
+ static inline void
+@@ -93,21 +94,21 @@ csio_init_state(struct csio_sm *smp, void *state)
+ }
+ static inline void
+-csio_post_event(void *smp, uint32_t evt)
++csio_post_event(struct csio_sm *smp, uint32_t evt)
+ {
+-      ((struct csio_sm *)smp)->sm_state(smp, evt);
++      smp->sm_state(smp, evt);
+ }
+ static inline csio_sm_state_t
+-csio_get_state(void *smp)
++csio_get_state(struct csio_sm *smp)
+ {
+-      return ((struct csio_sm *)smp)->sm_state;
++      return smp->sm_state;
+ }
+ static inline bool
+-csio_match_state(void *smp, void *state)
++csio_match_state(struct csio_sm *smp, csio_sm_state_t state)
+ {
+-      return (csio_get_state(smp) == (csio_sm_state_t)state);
++      return (csio_get_state(smp) == state);
+ }
+ #define       CSIO_ASSERT(cond)               BUG_ON(!(cond))
+diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
+index 622bdab..1a31d41 100644
+--- a/drivers/scsi/csiostor/csio_hw.c
++++ b/drivers/scsi/csiostor/csio_hw.c
+@@ -89,15 +89,15 @@ static void csio_mgmtm_cleanup(struct csio_mgmtm *);
+ static void csio_hw_mbm_cleanup(struct csio_hw *);
+ /* State machine forward declarations */
+-static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
+-static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
++static void csio_hws_uninit(struct csio_sm *, uint32_t);
++static void csio_hws_configuring(struct csio_sm *, uint32_t);
++static void csio_hws_initializing(struct csio_sm *, uint32_t);
++static void csio_hws_ready(struct csio_sm *, uint32_t);
++static void csio_hws_quiescing(struct csio_sm *, uint32_t);
++static void csio_hws_quiesced(struct csio_sm *, uint32_t);
++static void csio_hws_resetting(struct csio_sm *, uint32_t);
++static void csio_hws_removing(struct csio_sm *, uint32_t);
++static void csio_hws_pcierr(struct csio_sm *, uint32_t);
+ static void csio_hw_initialize(struct csio_hw *hw);
+ static void csio_evtq_stop(struct csio_hw *hw);
+@@ -105,12 +105,12 @@ static void csio_evtq_start(struct csio_hw *hw);
+ int csio_is_hw_ready(struct csio_hw *hw)
+ {
+-      return csio_match_state(hw, csio_hws_ready);
++      return csio_match_state(&hw->sm, csio_hws_ready);
+ }
+ int csio_is_hw_removing(struct csio_hw *hw)
+ {
+-      return csio_match_state(hw, csio_hws_removing);
++      return csio_match_state(&hw->sm, csio_hws_removing);
+ }
+@@ -2326,8 +2326,11 @@ csio_hw_fatal_err(struct csio_hw *hw)
+  *
+  */
+ static void
+-csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_uninit(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+@@ -2351,8 +2354,11 @@ csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_configuring(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+@@ -2389,8 +2395,11 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_initializing(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+@@ -2427,8 +2436,11 @@ csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_ready(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       /* Remember the event */
+       hw->evtflag = evt;
+@@ -2476,8 +2488,11 @@ csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_quiescing(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+@@ -2536,8 +2551,11 @@ csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_quiesced(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+@@ -2561,8 +2579,11 @@ csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_resetting(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+@@ -2587,8 +2608,11 @@ csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_removing(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+@@ -2622,8 +2646,11 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
+  *
+  */
+ static void
+-csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
++csio_hws_pcierr(struct csio_sm *_hw, uint32_t _evt)
+ {
++      struct csio_hw *hw = container_of(_hw, struct csio_hw, sm);
++      enum csio_hw_ev evt = _evt;
++
+       hw->prev_evt = hw->cur_evt;
+       hw->cur_evt = evt;
+       CSIO_INC_STATS(hw, n_evt_sm[evt]);
+diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
+index dbe416f..25a9a5b 100644
+--- a/drivers/scsi/csiostor/csio_init.c
++++ b/drivers/scsi/csiostor/csio_init.c
+@@ -1053,7 +1053,7 @@ static void csio_remove_one(struct pci_dev *pdev)
+  *
+  */
+ static pci_ers_result_t
+-csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++csio_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct csio_hw *hw = pci_get_drvdata(pdev);
+diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
+index c00b2ff..da72dbc 100644
+--- a/drivers/scsi/csiostor/csio_lnode.c
++++ b/drivers/scsi/csiostor/csio_lnode.c
+@@ -55,10 +55,10 @@ int csio_fdmi_enable = 1;
+ #define PORT_ID_PTR(_x)         ((uint8_t *)(&_x) + 1)
+ /* Lnode SM declarations */
+-static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
+-static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
+-static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
+-static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
++static void csio_lns_uninit(struct csio_sm *, uint32_t);
++static void csio_lns_online(struct csio_sm *, uint32_t);
++static void csio_lns_ready(struct csio_sm *, uint32_t);
++static void csio_lns_offline(struct csio_sm *, uint32_t);
+ static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
+               void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+@@ -1077,7 +1077,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ int
+ csio_is_lnode_ready(struct csio_lnode *ln)
+ {
+-      return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
++      return (csio_get_state(&ln->sm) == csio_lns_ready);
+ }
+ /*****************************************************************************/
+@@ -1093,8 +1093,10 @@ csio_is_lnode_ready(struct csio_lnode *ln)
+  * Return - none.
+  */
+ static void
+-csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
++csio_lns_uninit(struct csio_sm *_ln, uint32_t _evt)
+ {
++      struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm);
++      enum csio_ln_ev evt = _evt;
+       struct csio_hw *hw = csio_lnode_to_hw(ln);
+       struct csio_lnode *rln = hw->rln;
+       int rv;
+@@ -1146,8 +1148,10 @@ csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
+  * Return - none.
+  */
+ static void
+-csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
++csio_lns_online(struct csio_sm *_ln, uint32_t _evt)
+ {
++      struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm);
++      enum csio_ln_ev evt = _evt;
+       struct csio_hw *hw = csio_lnode_to_hw(ln);
+       CSIO_INC_STATS(ln, n_evt_sm[evt]);
+@@ -1198,8 +1202,10 @@ csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
+  * Return - none.
+  */
+ static void
+-csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
++csio_lns_ready(struct csio_sm *_ln, uint32_t _evt)
+ {
++      struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm);
++      enum csio_ln_ev evt = _evt;
+       struct csio_hw *hw = csio_lnode_to_hw(ln);
+       CSIO_INC_STATS(ln, n_evt_sm[evt]);
+@@ -1272,8 +1278,10 @@ csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
+  * Return - none.
+  */
+ static void
+-csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
++csio_lns_offline(struct csio_sm *_ln, uint32_t _evt)
+ {
++      struct csio_lnode *ln = container_of(_ln, struct csio_lnode, sm);
++      enum csio_ln_ev evt = _evt;
+       struct csio_hw *hw = csio_lnode_to_hw(ln);
+       struct csio_lnode *rln = hw->rln;
+       int rv;
+@@ -1349,15 +1357,15 @@ csio_free_fcfinfo(struct kref *kref)
+ void
+ csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
+ {
+-      if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
++      if (csio_get_state(&ln->sm) == csio_lns_uninit) {
+               strcpy(str, "UNINIT");
+               return;
+       }
+-      if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
++      if (csio_get_state(&ln->sm) == csio_lns_ready) {
+               strcpy(str, "READY");
+               return;
+       }
+-      if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
++      if (csio_get_state(&ln->sm) == csio_lns_offline) {
+               strcpy(str, "OFFLINE");
+               return;
+       }
+diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
+index e9c3b04..4ba3a59 100644
+--- a/drivers/scsi/csiostor/csio_rnode.c
++++ b/drivers/scsi/csiostor/csio_rnode.c
+@@ -46,10 +46,10 @@ static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
+ static void csio_rnode_exit(struct csio_rnode *);
+ /* Static machine forward declarations */
+-static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
+-static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
+-static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
+-static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
++static void csio_rns_uninit(struct csio_sm *, uint32_t);
++static void csio_rns_ready(struct csio_sm *, uint32_t);
++static void csio_rns_offline(struct csio_sm *, uint32_t);
++static void csio_rns_disappeared(struct csio_sm *, uint32_t);
+ /* RNF event mapping */
+ static enum csio_rn_ev fwevt_to_rnevt[] = {
+@@ -88,13 +88,13 @@ static enum csio_rn_ev fwevt_to_rnevt[] = {
+ int
+ csio_is_rnode_ready(struct csio_rnode *rn)
+ {
+-      return csio_match_state(rn, csio_rns_ready);
++      return csio_match_state(&rn->sm, csio_rns_ready);
+ }
+ static int
+ csio_is_rnode_uninit(struct csio_rnode *rn)
+ {
+-      return csio_match_state(rn, csio_rns_uninit);
++      return csio_match_state(&rn->sm, csio_rns_uninit);
+ }
+ static int
+@@ -601,8 +601,10 @@ __csio_unreg_rnode(struct csio_rnode *rn)
+  *
+  */
+ static void
+-csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
++csio_rns_uninit(struct csio_sm *_rn, uint32_t _evt)
+ {
++      struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm);
++      enum csio_rn_ev evt = _evt;
+       struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+       int ret = 0;
+@@ -641,8 +643,10 @@ csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
+  *
+  */
+ static void
+-csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
++csio_rns_ready(struct csio_sm *_rn, uint32_t _evt)
+ {
++      struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm);
++      enum csio_rn_ev evt = _evt;
+       struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+       int ret = 0;
+@@ -726,8 +730,10 @@ csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
+  *
+  */
+ static void
+-csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
++csio_rns_offline(struct csio_sm *_rn, uint32_t _evt)
+ {
++      struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm);
++      enum csio_rn_ev evt = _evt;
+       struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+       int ret = 0;
+@@ -785,8 +791,10 @@ csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
+  *
+  */
+ static void
+-csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
++csio_rns_disappeared(struct csio_sm *_rn, uint32_t _evt)
+ {
++      struct csio_rnode *rn = container_of(_rn, struct csio_rnode, sm);
++      enum csio_rn_ev evt = _evt;
+       struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+       int ret = 0;
+diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
+index c2a6f9f..5a37cc4 100644
+--- a/drivers/scsi/csiostor/csio_scsi.c
++++ b/drivers/scsi/csiostor/csio_scsi.c
+@@ -65,12 +65,12 @@ static int csio_ddp_descs = 128;
+ static int csio_do_abrt_cls(struct csio_hw *,
+                                     struct csio_ioreq *, bool);
+-static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
+-static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
+-static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
+-static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
+-static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
+-static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
++static void csio_scsis_uninit(struct csio_sm *, uint32_t);
++static void csio_scsis_io_active(struct csio_sm *, uint32_t);
++static void csio_scsis_tm_active(struct csio_sm *, uint32_t);
++static void csio_scsis_aborting(struct csio_sm *, uint32_t);
++static void csio_scsis_closing(struct csio_sm *, uint32_t);
++static void csio_scsis_shost_cmpl_await(struct csio_sm *, uint32_t);
+ /*
+  * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
+@@ -700,8 +700,10 @@ csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
+ /* START: SCSI SM                                                            */
+ /*****************************************************************************/
+ static void
+-csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
++csio_scsis_uninit(struct csio_sm *_req, uint32_t _evt)
+ {
++      struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm);
++      enum csio_scsi_ev evt = _evt;
+       struct csio_hw *hw = req->lnode->hwp;
+       struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+@@ -770,8 +772,10 @@ csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
+ }
+ static void
+-csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
++csio_scsis_io_active(struct csio_sm *_req, uint32_t _evt)
+ {
++      struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm);
++      enum csio_scsi_ev evt = _evt;
+       struct csio_hw *hw = req->lnode->hwp;
+       struct csio_scsim *scm = csio_hw_to_scsim(hw);
+       struct csio_rnode *rn;
+@@ -842,8 +846,10 @@ csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+ }
+ static void
+-csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
++csio_scsis_tm_active(struct csio_sm *_req, uint32_t _evt)
+ {
++      struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm);
++      enum csio_scsi_ev evt = _evt;
+       struct csio_hw *hw = req->lnode->hwp;
+       struct csio_scsim *scm = csio_hw_to_scsim(hw);
+@@ -885,8 +891,10 @@ csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+ }
+ static void
+-csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
++csio_scsis_aborting(struct csio_sm *_req, uint32_t _evt)
+ {
++      struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm);
++      enum csio_scsi_ev evt = _evt;
+       struct csio_hw *hw = req->lnode->hwp;
+       struct csio_scsim *scm = csio_hw_to_scsim(hw);
+@@ -982,8 +990,10 @@ csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
+ }
+ static void
+-csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
++csio_scsis_closing(struct csio_sm *_req, uint32_t _evt)
+ {
++      struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm);
++      enum csio_scsi_ev evt = _evt;
+       struct csio_hw *hw = req->lnode->hwp;
+       struct csio_scsim *scm = csio_hw_to_scsim(hw);
+@@ -1046,8 +1056,11 @@ csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
+ }
+ static void
+-csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
++csio_scsis_shost_cmpl_await(struct csio_sm *_req, uint32_t _evt)
+ {
++      struct csio_ioreq *req = container_of(_req, struct csio_ioreq, sm);
++      enum csio_scsi_ev evt = _evt;
++
+       switch (evt) {
+       case CSIO_SCSIE_ABORT:
+       case CSIO_SCSIE_CLOSE:
+diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
+index 78ce4d61..b3f6ff60b 100644
+--- a/drivers/scsi/esas2r/esas2r_init.c
++++ b/drivers/scsi/esas2r/esas2r_init.c
+@@ -237,7 +237,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)
+               flags |= IRQF_SHARED;
+       esas2r_log(ESAS2R_LOG_INFO,
+-                 "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
++                 "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
+                  a->pcid->irq, a, a->name, flags);
+       if (request_irq(a->pcid->irq,
+diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
+index 3e84834..34976f9 100644
+--- a/drivers/scsi/esas2r/esas2r_ioctl.c
++++ b/drivers/scsi/esas2r/esas2r_ioctl.c
+@@ -1301,7 +1301,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
+       ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
+       if (ioctl == NULL) {
+               esas2r_log(ESAS2R_LOG_WARN,
+-                         "ioctl_handler kzalloc failed for %d bytes",
++                         "ioctl_handler kzalloc failed for %lu bytes",
+                          sizeof(struct atto_express_ioctl));
+               return -ENOMEM;
+       }
+diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
+index 7b6397b..75b9d23 100644
+--- a/drivers/scsi/esas2r/esas2r_log.h
++++ b/drivers/scsi/esas2r/esas2r_log.h
+@@ -61,8 +61,8 @@ enum {
+ #endif
+ };
+-int esas2r_log(const long level, const char *format, ...);
+-int esas2r_log_dev(const long level,
++__printf(2, 3) int esas2r_log(const long level, const char *format, ...);
++__printf(3, 4) int esas2r_log_dev(const long level,
+                  const struct device *dev,
+                  const char *format,
+                  ...);
+diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
+index 2aca4d1..cdee863 100644
+--- a/drivers/scsi/esas2r/esas2r_main.c
++++ b/drivers/scsi/esas2r/esas2r_main.c
+@@ -198,7 +198,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
+                                             GFP_KERNEL);
+               if (a->local_atto_ioctl == NULL) {
+                       esas2r_log(ESAS2R_LOG_WARN,
+-                                 "write_hw kzalloc failed for %d bytes",
++                                 "write_hw kzalloc failed for %lu bytes",
+                                  sizeof(struct atto_ioctl));
+                       return -ENOMEM;
+               }
+@@ -1186,7 +1186,7 @@ retry:
+               } else {
+                       esas2r_log(ESAS2R_LOG_CRIT,
+                                  "unable to allocate a request for a "
+-                                 "device reset (%d:%d)!",
++                                 "device reset (%d:%llu)!",
+                                  cmd->device->id,
+                                  cmd->device->lun);
+               }
+diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
+index 0675fd1..bbebe90 100644
+--- a/drivers/scsi/fcoe/fcoe_sysfs.c
++++ b/drivers/scsi/fcoe/fcoe_sysfs.c
+@@ -33,8 +33,8 @@
+  */
+ #include "libfcoe.h"
+-static atomic_t ctlr_num;
+-static atomic_t fcf_num;
++static atomic_unchecked_t ctlr_num;
++static atomic_unchecked_t fcf_num;
+ /*
+  * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+@@ -724,7 +724,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+       if (!ctlr)
+               goto out;
+-      ctlr->id = atomic_inc_return(&ctlr_num) - 1;
++      ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
+       ctlr->f = f;
+       ctlr->mode = FIP_CONN_TYPE_FABRIC;
+       INIT_LIST_HEAD(&ctlr->fcfs);
+@@ -941,7 +941,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+       fcf->dev.parent = &ctlr->dev;
+       fcf->dev.bus = &fcoe_bus_type;
+       fcf->dev.type = &fcoe_fcf_device_type;
+-      fcf->id = atomic_inc_return(&fcf_num) - 1;
++      fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
+       fcf->state = FCOE_FCF_STATE_UNKNOWN;
+       fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+@@ -977,8 +977,8 @@ int __init fcoe_sysfs_setup(void)
+ {
+       int error;
+-      atomic_set(&ctlr_num, 0);
+-      atomic_set(&fcf_num, 0);
++      atomic_set_unchecked(&ctlr_num, 0);
++      atomic_set_unchecked(&fcf_num, 0);
+       error = bus_register(&fcoe_bus_type);
+       if (error)
+diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
+index 7028dd3..7392dc6 100644
+--- a/drivers/scsi/fcoe/fcoe_transport.c
++++ b/drivers/scsi/fcoe/fcoe_transport.c
+@@ -32,13 +32,13 @@ MODULE_AUTHOR("Open-FCoE.org");
+ MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
+ MODULE_LICENSE("GPL v2");
+-static int fcoe_transport_create(const char *, struct kernel_param *);
+-static int fcoe_transport_destroy(const char *, struct kernel_param *);
++static int fcoe_transport_create(const char *, const struct kernel_param *);
++static int fcoe_transport_destroy(const char *, const struct kernel_param *);
+ static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
+ static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
+ static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
+-static int fcoe_transport_enable(const char *, struct kernel_param *);
+-static int fcoe_transport_disable(const char *, struct kernel_param *);
++static int fcoe_transport_enable(const char *, const struct kernel_param *);
++static int fcoe_transport_disable(const char *, const struct kernel_param *);
+ static int libfcoe_device_notification(struct notifier_block *notifier,
+                                   ulong event, void *ptr);
+@@ -846,7 +846,7 @@ EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
+  *
+  * Returns: 0 for success
+  */
+-static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
++static int fcoe_transport_create(const char *buffer, const struct kernel_param *kp)
+ {
+       int rc = -ENODEV;
+       struct net_device *netdev = NULL;
+@@ -911,7 +911,7 @@ out_nodev:
+  *
+  * Returns: 0 for success
+  */
+-static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
++static int fcoe_transport_destroy(const char *buffer, const struct kernel_param *kp)
+ {
+       int rc = -ENODEV;
+       struct net_device *netdev = NULL;
+@@ -955,7 +955,7 @@ out_nodev:
+  *
+  * Returns: 0 for success
+  */
+-static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
++static int fcoe_transport_disable(const char *buffer, const struct kernel_param *kp)
+ {
+       int rc = -ENODEV;
+       struct net_device *netdev = NULL;
+@@ -989,7 +989,7 @@ out_nodev:
+  *
+  * Returns: 0 for success
+  */
+-static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
++static int fcoe_transport_enable(const char *buffer, const struct kernel_param *kp)
+ {
+       int rc = -ENODEV;
+       struct net_device *netdev = NULL;
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 030d002..cbf90d1 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -942,10 +942,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
+       struct reply_queue_buffer *rq = &h->reply_queue[q];
+       if (h->transMethod & CFGTBL_Trans_io_accel1)
+-              return h->access.command_completed(h, q);
++              return h->access->command_completed(h, q);
+       if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+-              return h->access.command_completed(h, q);
++              return h->access->command_completed(h, q);
+       if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+               a = rq->head[rq->current_entry];
+@@ -1127,7 +1127,7 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
+               break;
+       default:
+               set_performant_mode(h, c, reply_queue);
+-              h->access.submit_command(h, c);
++              h->access->submit_command(h, c);
+       }
+ }
+@@ -7020,17 +7020,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
+ static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
+ {
+-      return h->access.command_completed(h, q);
++      return h->access->command_completed(h, q);
+ }
+ static inline bool interrupt_pending(struct ctlr_info *h)
+ {
+-      return h->access.intr_pending(h);
++      return h->access->intr_pending(h);
+ }
+ static inline long interrupt_not_for_us(struct ctlr_info *h)
+ {
+-      return (h->access.intr_pending(h) == 0) ||
++      return (h->access->intr_pending(h) == 0) ||
+               (h->interrupts_enabled == 0);
+ }
+@@ -7958,7 +7958,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
+       if (prod_index < 0)
+               return prod_index;
+       h->product_name = products[prod_index].product_name;
+-      h->access = *(products[prod_index].access);
++      h->access = products[prod_index].access;
+       h->needs_abort_tags_swizzled =
+               ctlr_needs_abort_tags_swizzled(h->board_id);
+@@ -8357,7 +8357,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
+       unsigned long flags;
+       u32 lockup_detected;
+-      h->access.set_intr_mask(h, HPSA_INTR_OFF);
++      h->access->set_intr_mask(h, HPSA_INTR_OFF);
+       spin_lock_irqsave(&h->lock, flags);
+       lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+       if (!lockup_detected) {
+@@ -8695,7 +8695,7 @@ reinit_after_soft_reset:
+       }
+       /* make sure the board interrupts are off */
+-      h->access.set_intr_mask(h, HPSA_INTR_OFF);
++      h->access->set_intr_mask(h, HPSA_INTR_OFF);
+       rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
+       if (rc)
+@@ -8748,7 +8748,7 @@ reinit_after_soft_reset:
+                * fake ones to scoop up any residual completions.
+                */
+               spin_lock_irqsave(&h->lock, flags);
+-              h->access.set_intr_mask(h, HPSA_INTR_OFF);
++              h->access->set_intr_mask(h, HPSA_INTR_OFF);
+               spin_unlock_irqrestore(&h->lock, flags);
+               hpsa_free_irqs(h);
+               rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
+@@ -8778,9 +8778,9 @@ reinit_after_soft_reset:
+               dev_info(&h->pdev->dev, "Board READY.\n");
+               dev_info(&h->pdev->dev,
+                       "Waiting for stale completions to drain.\n");
+-              h->access.set_intr_mask(h, HPSA_INTR_ON);
++              h->access->set_intr_mask(h, HPSA_INTR_ON);
+               msleep(10000);
+-              h->access.set_intr_mask(h, HPSA_INTR_OFF);
++              h->access->set_intr_mask(h, HPSA_INTR_OFF);
+               rc = controller_reset_failed(h->cfgtable);
+               if (rc)
+@@ -8807,7 +8807,7 @@ reinit_after_soft_reset:
+       /* Turn the interrupts on so we can service requests */
+-      h->access.set_intr_mask(h, HPSA_INTR_ON);
++      h->access->set_intr_mask(h, HPSA_INTR_ON);
+       hpsa_hba_inquiry(h);
+@@ -8833,7 +8833,7 @@ reinit_after_soft_reset:
+ clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+       hpsa_free_performant_mode(h);
+-      h->access.set_intr_mask(h, HPSA_INTR_OFF);
++      h->access->set_intr_mask(h, HPSA_INTR_OFF);
+ clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
+       hpsa_free_sg_chain_blocks(h);
+ clean5: /* cmd, irq, shost, pci, lu, aer/h */
+@@ -8968,7 +8968,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+        * To write all data in the battery backed cache to disks
+        */
+       hpsa_flush_cache(h);
+-      h->access.set_intr_mask(h, HPSA_INTR_OFF);
++      h->access->set_intr_mask(h, HPSA_INTR_OFF);
+       hpsa_free_irqs(h);                      /* init_one 4 */
+       hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
+ }
+@@ -9110,7 +9110,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+                               CFGTBL_Trans_enable_directed_msix |
+                       (trans_support & (CFGTBL_Trans_io_accel1 |
+                               CFGTBL_Trans_io_accel2));
+-      struct access_method access = SA5_performant_access;
++      struct access_method *access = &SA5_performant_access;
+       /* This is a bit complicated.  There are 8 registers on
+        * the controller which we write to to tell it 8 different
+@@ -9152,7 +9152,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+        * perform the superfluous readl() after each command submission.
+        */
+       if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
+-              access = SA5_performant_access_no_read;
++              access = &SA5_performant_access_no_read;
+       /* Controller spec: zero out this buffer. */
+       for (i = 0; i < h->nreply_queues; i++)
+@@ -9182,12 +9182,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+        * enable outbound interrupt coalescing in accelerator mode;
+        */
+       if (trans_support & CFGTBL_Trans_io_accel1) {
+-              access = SA5_ioaccel_mode1_access;
++              access = &SA5_ioaccel_mode1_access;
+               writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+               writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+       } else {
+               if (trans_support & CFGTBL_Trans_io_accel2) {
+-                      access = SA5_ioaccel_mode2_access;
++                      access = &SA5_ioaccel_mode2_access;
+                       writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+                       writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+               }
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index a1487e6..53a2c5d 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -179,7 +179,7 @@ struct ctlr_info {
+       unsigned int msix_vector;
+       unsigned int msi_vector;
+       int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
+-      struct access_method access;
++      struct access_method *access;
+       /* queue and queue Info */
+       unsigned int Qdepth;
+@@ -579,38 +579,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
+ }
+ static struct access_method SA5_access = {
+-      SA5_submit_command,
+-      SA5_intr_mask,
+-      SA5_intr_pending,
+-      SA5_completed,
++      .submit_command = SA5_submit_command,
++      .set_intr_mask = SA5_intr_mask,
++      .intr_pending = SA5_intr_pending,
++      .command_completed = SA5_completed,
+ };
+ static struct access_method SA5_ioaccel_mode1_access = {
+-      SA5_submit_command,
+-      SA5_performant_intr_mask,
+-      SA5_ioaccel_mode1_intr_pending,
+-      SA5_ioaccel_mode1_completed,
++      .submit_command = SA5_submit_command,
++      .set_intr_mask = SA5_performant_intr_mask,
++      .intr_pending = SA5_ioaccel_mode1_intr_pending,
++      .command_completed = SA5_ioaccel_mode1_completed,
+ };
+ static struct access_method SA5_ioaccel_mode2_access = {
+-      SA5_submit_command_ioaccel2,
+-      SA5_performant_intr_mask,
+-      SA5_performant_intr_pending,
+-      SA5_performant_completed,
++      .submit_command = SA5_submit_command_ioaccel2,
++      .set_intr_mask = SA5_performant_intr_mask,
++      .intr_pending = SA5_performant_intr_pending,
++      .command_completed = SA5_performant_completed,
+ };
+ static struct access_method SA5_performant_access = {
+-      SA5_submit_command,
+-      SA5_performant_intr_mask,
+-      SA5_performant_intr_pending,
+-      SA5_performant_completed,
++      .submit_command = SA5_submit_command,
++      .set_intr_mask = SA5_performant_intr_mask,
++      .intr_pending = SA5_performant_intr_pending,
++      .command_completed = SA5_performant_completed,
+ };
+ static struct access_method SA5_performant_access_no_read = {
+-      SA5_submit_command_no_read,
+-      SA5_performant_intr_mask,
+-      SA5_performant_intr_pending,
+-      SA5_performant_completed,
++      .submit_command = SA5_submit_command_no_read,
++      .set_intr_mask = SA5_performant_intr_mask,
++      .intr_pending = SA5_performant_intr_pending,
++      .command_completed = SA5_performant_completed,
+ };
+ struct board_type {
+diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
+index a83f705..b40c5e6 100644
+--- a/drivers/scsi/hptiop.c
++++ b/drivers/scsi/hptiop.c
+@@ -1082,7 +1082,6 @@ static const char *hptiop_info(struct Scsi_Host *host)
+ static int hptiop_reset_hba(struct hptiop_hba *hba)
+ {
+       if (atomic_xchg(&hba->resetting, 1) == 0) {
+-              atomic_inc(&hba->reset_count);
+               hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
+       }
+@@ -1340,7 +1339,6 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+       hba->iopintf_v2 = 0;
+       atomic_set(&hba->resetting, 0);
+-      atomic_set(&hba->reset_count, 0);
+       init_waitqueue_head(&hba->reset_wq);
+       init_waitqueue_head(&hba->ioctl_wq);
+diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
+index 4d1c511..d5744cb 100644
+--- a/drivers/scsi/hptiop.h
++++ b/drivers/scsi/hptiop.h
+@@ -330,7 +330,6 @@ struct hptiop_hba {
+       void        *dma_coherent[HPTIOP_MAX_REQUESTS];
+       dma_addr_t  dma_coherent_handle[HPTIOP_MAX_REQUESTS];
+-      atomic_t    reset_count;
+       atomic_t    resetting;
+       wait_queue_head_t reset_wq;
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 17d04c7..17a2948 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -948,7 +948,7 @@ static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
+  **/
+ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
+                      void (*done) (struct ipr_cmnd *),
+-                     void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
++                     void (*timeout_func) (unsigned long), u32 timeout)
+ {
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+@@ -956,7 +956,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
+       ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+       ipr_cmd->timer.expires = jiffies + timeout;
+-      ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
++      ipr_cmd->timer.function = timeout_func;
+       add_timer(&ipr_cmd->timer);
+@@ -1038,7 +1038,7 @@ static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
+  *    none
+  **/
+ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
+-                                void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
++                                void (*timeout_func) (unsigned long ipr_cmd),
+                                 u32 timeout)
+ {
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+@@ -1058,7 +1058,7 @@ static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+       if (ioa_cfg->hrrq_num == 1)
+               hrrq = 0;
+       else {
+-              hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
++              hrrq = atomic_add_return_unchecked(1, &ioa_cfg->hrrq_index);
+               hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+       }
+       return hrrq;
+@@ -2601,8 +2601,9 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
+  * Return value:
+  *    none
+  **/
+-static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
++static void ipr_timeout(unsigned long _ipr_cmd)
+ {
++      struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd;
+       unsigned long lock_flags = 0;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+@@ -2633,8 +2634,9 @@ static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
+  * Return value:
+  *    none
+  **/
+-static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
++static void ipr_oper_timeout(unsigned long _ipr_cmd)
+ {
++      struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd;
+       unsigned long lock_flags = 0;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+@@ -5269,8 +5271,9 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
+  * Return value:
+  *    none
+  **/
+-static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
++static void ipr_abort_timeout(unsigned long _ipr_cmd)
+ {
++      struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd;
+       struct ipr_cmnd *reset_cmd;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_cmd_pkt *cmd_pkt;
+@@ -8042,8 +8045,9 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
+  * Return value:
+  *    none
+  **/
+-static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
++static void ipr_reset_timer_done(unsigned long _ipr_cmd)
+ {
++      struct ipr_cmnd *ipr_cmd = (struct ipr_cmnd *)_ipr_cmd;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       unsigned long lock_flags = 0;
+@@ -8081,7 +8085,7 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
+       ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+       ipr_cmd->timer.expires = jiffies + timeout;
+-      ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
++      ipr_cmd->timer.function = ipr_reset_timer_done;
+       add_timer(&ipr_cmd->timer);
+ }
+@@ -8111,9 +8115,9 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
+       ioa_cfg->identify_hrrq_index = 0;
+       if (ioa_cfg->hrrq_num == 1)
+-              atomic_set(&ioa_cfg->hrrq_index, 0);
++              atomic_set_unchecked(&ioa_cfg->hrrq_index, 0);
+       else
+-              atomic_set(&ioa_cfg->hrrq_index, 1);
++              atomic_set_unchecked(&ioa_cfg->hrrq_index, 1);
+       /* Zero out config table */
+       memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
+@@ -8167,7 +8171,7 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
+       ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+       ipr_cmd->timer.expires = jiffies + stage_time * HZ;
+-      ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
++      ipr_cmd->timer.function = ipr_oper_timeout;
+       ipr_cmd->done = ipr_reset_ioa_job;
+       add_timer(&ipr_cmd->timer);
+@@ -8239,7 +8243,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
+       ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+       ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
+-      ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
++      ipr_cmd->timer.function = ipr_oper_timeout;
+       ipr_cmd->done = ipr_reset_ioa_job;
+       add_timer(&ipr_cmd->timer);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+@@ -9227,7 +9231,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
+  *    PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+  */
+ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
+-                                             pci_channel_state_t state)
++                                             enum pci_channel_state state)
+ {
+       switch (state) {
+       case pci_channel_io_frozen:
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index cdb5196..f7e8eff 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -1539,7 +1539,7 @@ struct ipr_ioa_cfg {
+       struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
+       u32 hrrq_num;
+-      atomic_t  hrrq_index;
++      atomic_unchecked_t  hrrq_index;
+       u16 identify_hrrq_index;
+       struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index e72673b..977ed14 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -101,12 +101,12 @@ struct fc_exch_mgr {
+       u16             pool_max_index;
+       struct {
+-              atomic_t no_free_exch;
+-              atomic_t no_free_exch_xid;
+-              atomic_t xid_not_found;
+-              atomic_t xid_busy;
+-              atomic_t seq_not_found;
+-              atomic_t non_bls_resp;
++              atomic_unchecked_t no_free_exch;
++              atomic_unchecked_t no_free_exch_xid;
++              atomic_unchecked_t xid_not_found;
++              atomic_unchecked_t xid_busy;
++              atomic_unchecked_t seq_not_found;
++              atomic_unchecked_t non_bls_resp;
+       } stats;
+ };
+@@ -809,7 +809,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+       /* allocate memory for exchange */
+       ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+       if (!ep) {
+-              atomic_inc(&mp->stats.no_free_exch);
++              atomic_inc_unchecked(&mp->stats.no_free_exch);
+               goto out;
+       }
+       memset(ep, 0, sizeof(*ep));
+@@ -872,7 +872,7 @@ out:
+       return ep;
+ err:
+       spin_unlock_bh(&pool->lock);
+-      atomic_inc(&mp->stats.no_free_exch_xid);
++      atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
+       mempool_free(ep, mp->ep_pool);
+       return NULL;
+ }
+@@ -1029,7 +1029,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+               xid = ntohs(fh->fh_ox_id);      /* we originated exch */
+               ep = fc_exch_find(mp, xid);
+               if (!ep) {
+-                      atomic_inc(&mp->stats.xid_not_found);
++                      atomic_inc_unchecked(&mp->stats.xid_not_found);
+                       reject = FC_RJT_OX_ID;
+                       goto out;
+               }
+@@ -1059,7 +1059,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+               ep = fc_exch_find(mp, xid);
+               if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+                       if (ep) {
+-                              atomic_inc(&mp->stats.xid_busy);
++                              atomic_inc_unchecked(&mp->stats.xid_busy);
+                               reject = FC_RJT_RX_ID;
+                               goto rel;
+                       }
+@@ -1070,7 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+                       }
+                       xid = ep->xid;  /* get our XID */
+               } else if (!ep) {
+-                      atomic_inc(&mp->stats.xid_not_found);
++                      atomic_inc_unchecked(&mp->stats.xid_not_found);
+                       reject = FC_RJT_RX_ID;  /* XID not found */
+                       goto out;
+               }
+@@ -1088,7 +1088,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+       } else {
+               sp = &ep->seq;
+               if (sp->id != fh->fh_seq_id) {
+-                      atomic_inc(&mp->stats.seq_not_found);
++                      atomic_inc_unchecked(&mp->stats.seq_not_found);
+                       if (f_ctl & FC_FC_END_SEQ) {
+                               /*
+                                * Update sequence_id based on incoming last
+@@ -1539,22 +1539,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+       ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+       if (!ep) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto out;
+       }
+       if (ep->esb_stat & ESB_ST_COMPLETE) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto rel;
+       }
+       if (ep->rxid == FC_XID_UNKNOWN)
+               ep->rxid = ntohs(fh->fh_rx_id);
+       if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto rel;
+       }
+       if (ep->did != ntoh24(fh->fh_s_id) &&
+           ep->did != FC_FID_FLOGI) {
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+               goto rel;
+       }
+       sof = fr_sof(fp);
+@@ -1563,7 +1563,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+               sp->ssb_stat |= SSB_ST_RESP;
+               sp->id = fh->fh_seq_id;
+       } else if (sp->id != fh->fh_seq_id) {
+-              atomic_inc(&mp->stats.seq_not_found);
++              atomic_inc_unchecked(&mp->stats.seq_not_found);
+               goto rel;
+       }
+@@ -1626,9 +1626,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+       sp = fc_seq_lookup_orig(mp, fp);        /* doesn't hold sequence */
+       if (!sp)
+-              atomic_inc(&mp->stats.xid_not_found);
++              atomic_inc_unchecked(&mp->stats.xid_not_found);
+       else
+-              atomic_inc(&mp->stats.non_bls_resp);
++              atomic_inc_unchecked(&mp->stats.non_bls_resp);
+       fc_frame_free(fp);
+ }
+@@ -2269,13 +2269,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
+       list_for_each_entry(ema, &lport->ema_list, ema_list) {
+               mp = ema->mp;
+-              st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
++              st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
+               st->fc_no_free_exch_xid +=
+-                              atomic_read(&mp->stats.no_free_exch_xid);
+-              st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
+-              st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
+-              st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
+-              st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
++                              atomic_read_unchecked(&mp->stats.no_free_exch_xid);
++              st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
++              st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
++              st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
++              st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
+       }
+ }
+ EXPORT_SYMBOL(fc_exch_update_stats);
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 763f012..641a55a 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -532,7 +532,7 @@ static struct ata_port_operations sas_sata_ops = {
+       .postreset              = ata_std_postreset,
+       .error_handler          = ata_std_error_handler,
+       .post_internal_cmd      = sas_ata_post_internal,
+-      .qc_defer               = ata_std_qc_defer,
++      .qc_defer               = ata_std_qc_defer,
+       .qc_prep                = ata_noop_qc_prep,
+       .qc_issue               = sas_ata_qc_issue,
+       .qc_fill_rtf            = sas_ata_qc_fill_rtf,
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index b484859..1ea4db4 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -430,7 +430,7 @@ struct lpfc_vport {
+       struct dentry *debug_nodelist;
+       struct dentry *vport_debugfs_root;
+       struct lpfc_debugfs_trc *disc_trc;
+-      atomic_t disc_trc_cnt;
++      atomic_unchecked_t disc_trc_cnt;
+ #endif
+       uint8_t stat_data_enabled;
+       uint8_t stat_data_blocked;
+@@ -898,8 +898,8 @@ struct lpfc_hba {
+       struct timer_list fabric_block_timer;
+       unsigned long bit_flags;
+ #define       FABRIC_COMANDS_BLOCKED  0
+-      atomic_t num_rsrc_err;
+-      atomic_t num_cmd_success;
++      atomic_unchecked_t num_rsrc_err;
++      atomic_unchecked_t num_cmd_success;
+       unsigned long last_rsrc_error_time;
+       unsigned long last_ramp_down_time;
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+@@ -934,7 +934,7 @@ struct lpfc_hba {
+       struct dentry *debug_slow_ring_trc;
+       struct lpfc_debugfs_trc *slow_ring_trc;
+-      atomic_t slow_ring_trc_cnt;
++      atomic_unchecked_t slow_ring_trc_cnt;
+       /* iDiag debugfs sub-directory */
+       struct dentry *idiag_root;
+       struct dentry *idiag_pci_cfg;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index a63542b..80692ee 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+ #include <linux/debugfs.h>
+-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+ static unsigned long lpfc_debugfs_start_time = 0L;
+ /* iDiag */
+@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+       lpfc_debugfs_enable = 0;
+       len = 0;
+-      index = (atomic_read(&vport->disc_trc_cnt) + 1) &
++      index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
+               (lpfc_debugfs_max_disc_trc - 1);
+       for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+               dtp = vport->disc_trc + i;
+@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
+       lpfc_debugfs_enable = 0;
+       len = 0;
+-      index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
++      index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
+               (lpfc_debugfs_max_slow_ring_trc - 1);
+       for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+               dtp = phba->slow_ring_trc + i;
+@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+               !vport || !vport->disc_trc)
+               return;
+-      index = atomic_inc_return(&vport->disc_trc_cnt) &
++      index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
+               (lpfc_debugfs_max_disc_trc - 1);
+       dtp = vport->disc_trc + index;
+       dtp->fmt = fmt;
+       dtp->data1 = data1;
+       dtp->data2 = data2;
+       dtp->data3 = data3;
+-      dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++      dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+       dtp->jif = jiffies;
+ #endif
+       return;
+@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
+               !phba || !phba->slow_ring_trc)
+               return;
+-      index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
++      index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
+               (lpfc_debugfs_max_slow_ring_trc - 1);
+       dtp = phba->slow_ring_trc + index;
+       dtp->fmt = fmt;
+       dtp->data1 = data1;
+       dtp->data2 = data2;
+       dtp->data3 = data3;
+-      dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++      dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+       dtp->jif = jiffies;
+ #endif
+       return;
+@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+                                                "slow_ring buffer\n");
+                               goto debug_failed;
+                       }
+-                      atomic_set(&phba->slow_ring_trc_cnt, 0);
++                      atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
+                       memset(phba->slow_ring_trc, 0,
+                               (sizeof(struct lpfc_debugfs_trc) *
+                               lpfc_debugfs_max_slow_ring_trc));
+@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+                                "buffer\n");
+               goto debug_failed;
+       }
+-      atomic_set(&vport->disc_trc_cnt, 0);
++      atomic_set_unchecked(&vport->disc_trc_cnt, 0);
+       snprintf(name, sizeof(name), "discovery_trace");
+       vport->debug_disc_trc =
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 734a042..5f4c380 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11127,7 +11127,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
+  *    PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+  **/
+ static pci_ers_result_t
+-lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++lpfc_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+@@ -11434,8 +11434,10 @@ lpfc_init(void)
+               printk(KERN_ERR "Could not register lpfcmgmt device, "
+                       "misc_register returned with status %d", error);
+-      lpfc_transport_functions.vport_create = lpfc_vport_create;
+-      lpfc_transport_functions.vport_delete = lpfc_vport_delete;
++      pax_open_kernel();
++      const_cast(lpfc_transport_functions.vport_create) = lpfc_vport_create;
++      const_cast(lpfc_transport_functions.vport_delete) = lpfc_vport_delete;
++      pax_close_kernel();
+       lpfc_transport_template =
+                               fc_attach_transport(&lpfc_transport_functions);
+       if (lpfc_transport_template == NULL)
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index d197aa1..c1178a6 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
+       unsigned long expires;
+       spin_lock_irqsave(&phba->hbalock, flags);
+-      atomic_inc(&phba->num_rsrc_err);
++      atomic_inc_unchecked(&phba->num_rsrc_err);
+       phba->last_rsrc_error_time = jiffies;
+       expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
+@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+       unsigned long num_rsrc_err, num_cmd_success;
+       int i;
+-      num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+-      num_cmd_success = atomic_read(&phba->num_cmd_success);
++      num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
++      num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
+       /*
+        * The error and success command counters are global per
+@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+                       }
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+-      atomic_set(&phba->num_rsrc_err, 0);
+-      atomic_set(&phba->num_cmd_success, 0);
++      atomic_set_unchecked(&phba->num_rsrc_err, 0);
++      atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+ /**
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index ca86c88..175be62 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -2048,7 +2048,7 @@ struct megasas_instance {
+       s8 init_id;
+       u16 max_num_sge;
+-      u16 max_fw_cmds;
++      u16 max_fw_cmds __intentional_overflow(-1);
+       u16 max_mfi_cmds;
+       u16 max_scsi_cmds;
+       u16 ldio_threshold;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 750f82c..956cdf0 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -105,7 +105,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+  *
+  */
+ static int
+-_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
++_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
+ {
+       int ret = param_set_int(val, kp);
+       struct MPT3SAS_ADAPTER *ioc;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 4cb7990..66bfb63 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -280,7 +280,7 @@ struct _scsi_io_transfer {
+  * Note: The logging levels are defined in mpt3sas_debug.h.
+  */
+ static int
+-_scsih_set_debug_level(const char *val, struct kernel_param *kp)
++_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
+ {
+       int ret = param_set_int(val, kp);
+       struct MPT3SAS_ADAPTER *ioc;
+@@ -8934,7 +8934,7 @@ scsih_resume(struct pci_dev *pdev)
+  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+  */
+ pci_ers_result_t
+-scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++scsih_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
+index b2a88200..d66f0cc 100644
+--- a/drivers/scsi/pmcraid.c
++++ b/drivers/scsi/pmcraid.c
+@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+               res->scsi_dev = scsi_dev;
+               scsi_dev->hostdata = res;
+               res->change_detected = 0;
+-              atomic_set(&res->read_failures, 0);
+-              atomic_set(&res->write_failures, 0);
++              atomic_set_unchecked(&res->read_failures, 0);
++              atomic_set_unchecked(&res->write_failures, 0);
+               rc = 0;
+       }
+       spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+@@ -557,8 +557,9 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
+ static void pmcraid_ioa_reset(struct pmcraid_cmd *);
+-static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
++static void pmcraid_bist_done(unsigned long _cmd)
+ {
++      struct pmcraid_cmd *cmd = (struct pmcraid_cmd *)_cmd;
+       struct pmcraid_instance *pinstance = cmd->drv_inst;
+       unsigned long lock_flags;
+       int rc;
+@@ -573,8 +574,7 @@ static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
+               cmd->timer.expires = jiffies + cmd->time_left;
+               cmd->time_left = 0;
+               cmd->timer.data = (unsigned long)cmd;
+-              cmd->timer.function =
+-                      (void (*)(unsigned long))pmcraid_bist_done;
++              cmd->timer.function = pmcraid_bist_done;
+               add_timer(&cmd->timer);
+       } else {
+               cmd->time_left = 0;
+@@ -607,7 +607,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
+       cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+       cmd->timer.data = (unsigned long)cmd;
+       cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+-      cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
++      cmd->timer.function = pmcraid_bist_done;
+       add_timer(&cmd->timer);
+ }
+@@ -617,8 +617,9 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
+  * Return value
+  *  None
+  */
+-static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
++static void pmcraid_reset_alert_done(unsigned long _cmd)
+ {
++      struct pmcraid_cmd *cmd = (struct pmcraid_cmd *)_cmd;
+       struct pmcraid_instance *pinstance = cmd->drv_inst;
+       u32 status = ioread32(pinstance->ioa_status);
+       unsigned long lock_flags;
+@@ -639,8 +640,7 @@ static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
+               cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+               cmd->timer.data = (unsigned long)cmd;
+               cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+-              cmd->timer.function =
+-                      (void (*)(unsigned long))pmcraid_reset_alert_done;
++              cmd->timer.function = pmcraid_reset_alert_done;
+               add_timer(&cmd->timer);
+       }
+ }
+@@ -678,8 +678,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
+               cmd->time_left = PMCRAID_RESET_TIMEOUT;
+               cmd->timer.data = (unsigned long)cmd;
+               cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+-              cmd->timer.function =
+-                      (void (*)(unsigned long))pmcraid_reset_alert_done;
++              cmd->timer.function = pmcraid_reset_alert_done;
+               add_timer(&cmd->timer);
+               iowrite32(DOORBELL_IOA_RESET_ALERT,
+@@ -704,8 +703,9 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
+  * Return value:
+  *   None
+  */
+-static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
++static void pmcraid_timeout_handler(unsigned long _cmd)
+ {
++      struct pmcraid_cmd *cmd = (struct pmcraid_cmd *)_cmd;
+       struct pmcraid_instance *pinstance = cmd->drv_inst;
+       unsigned long lock_flags;
+@@ -920,7 +920,7 @@ static void pmcraid_send_cmd(
+       struct pmcraid_cmd *cmd,
+       void (*cmd_done) (struct pmcraid_cmd *),
+       unsigned long timeout,
+-      void (*timeout_func) (struct pmcraid_cmd *)
++      void (*timeout_func) (unsigned long)
+ )
+ {
+       /* initialize done function */
+@@ -930,7 +930,7 @@ static void pmcraid_send_cmd(
+               /* setup timeout handler */
+               cmd->timer.data = (unsigned long)cmd;
+               cmd->timer.expires = jiffies + timeout;
+-              cmd->timer.function = (void (*)(unsigned long))timeout_func;
++              cmd->timer.function = timeout_func;
+               add_timer(&cmd->timer);
+       }
+@@ -1968,7 +1968,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
+       cmd->timer.data = (unsigned long)cmd;
+       cmd->timer.expires = jiffies +
+                            msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
+-      cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
++      cmd->timer.function = pmcraid_timeout_handler;
+       if (!timer_pending(&cmd->timer))
+               add_timer(&cmd->timer);
+@@ -2641,9 +2641,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
+       /* If this was a SCSI read/write command keep count of errors */
+       if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
+-              atomic_inc(&res->read_failures);
++              atomic_inc_unchecked(&res->read_failures);
+       else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
+-              atomic_inc(&res->write_failures);
++              atomic_inc_unchecked(&res->write_failures);
+       if (!RES_IS_GSCSI(res->cfg_entry) &&
+               masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
+@@ -3469,7 +3469,7 @@ static int pmcraid_queuecommand_lck(
+        * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+        * hrrq_id assigned here in queuecommand
+        */
+-      ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++      ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+                         pinstance->num_hrrq;
+       cmd->cmd_done = pmcraid_io_done;
+@@ -3783,7 +3783,7 @@ static long pmcraid_ioctl_passthrough(
+        * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+        * hrrq_id assigned here in queuecommand
+        */
+-      ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++      ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+                         pinstance->num_hrrq;
+       if (request_size) {
+@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
+       pinstance = container_of(workp, struct pmcraid_instance, worker_q);
+       /* add resources only after host is added into system */
+-      if (!atomic_read(&pinstance->expose_resources))
++      if (!atomic_read_unchecked(&pinstance->expose_resources))
+               return;
+       fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
+       init_waitqueue_head(&pinstance->reset_wait_q);
+       atomic_set(&pinstance->outstanding_cmds, 0);
+-      atomic_set(&pinstance->last_message_id, 0);
+-      atomic_set(&pinstance->expose_resources, 0);
++      atomic_set_unchecked(&pinstance->last_message_id, 0);
++      atomic_set_unchecked(&pinstance->expose_resources, 0);
+       INIT_LIST_HEAD(&pinstance->free_res_q);
+       INIT_LIST_HEAD(&pinstance->used_res_q);
+@@ -5949,7 +5949,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
+       /* Schedule worker thread to handle CCN and take care of adding and
+        * removing devices to OS
+        */
+-      atomic_set(&pinstance->expose_resources, 1);
++      atomic_set_unchecked(&pinstance->expose_resources, 1);
+       schedule_work(&pinstance->worker_q);
+       return rc;
+diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
+index e1d150f..6c6df44 100644
+--- a/drivers/scsi/pmcraid.h
++++ b/drivers/scsi/pmcraid.h
+@@ -748,7 +748,7 @@ struct pmcraid_instance {
+       struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
+       /* Message id as filled in last fired IOARCB, used to identify HRRQ */
+-      atomic_t last_message_id;
++      atomic_unchecked_t last_message_id;
+       /* configuration table */
+       struct pmcraid_config_table *cfg_table;
+@@ -777,7 +777,7 @@ struct pmcraid_instance {
+       atomic_t outstanding_cmds;
+       /* should add/delete resources to mid-layer now ?*/
+-      atomic_t expose_resources;
++      atomic_unchecked_t expose_resources;
+@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
+               struct pmcraid_config_table_entry_ext cfg_entry_ext;
+       };
+       struct scsi_device *scsi_dev;   /* Link scsi_device structure */
+-      atomic_t read_failures;         /* count of failed READ commands */
+-      atomic_t write_failures;        /* count of failed WRITE commands */
++      atomic_unchecked_t read_failures;       /* count of failed READ commands */
++      atomic_unchecked_t write_failures;      /* count of failed WRITE commands */
+       /* To indicate add/delete/modify during CCN */
+       u8 change_detected;
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index fe7469c..91e0c0b 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2186,7 +2186,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
+       return 0;
+ }
+-struct fc_function_template qla2xxx_transport_functions = {
++fc_function_template_no_const qla2xxx_transport_functions = {
+       .show_host_node_name = 1,
+       .show_host_port_name = 1,
+@@ -2234,7 +2234,7 @@ struct fc_function_template qla2xxx_transport_functions = {
+       .bsg_timeout = qla24xx_bsg_timeout,
+ };
+-struct fc_function_template qla2xxx_transport_vport_functions = {
++fc_function_template_no_const qla2xxx_transport_vport_functions = {
+       .show_host_node_name = 1,
+       .show_host_port_name = 1,
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index 6ca0081..fbb9efd 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -178,8 +178,8 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+  */
+ extern struct scsi_host_template qla2xxx_driver_template;
+ extern struct scsi_transport_template *qla2xxx_transport_vport_template;
+-extern void qla2x00_timer(scsi_qla_host_t *);
+-extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
++extern void qla2x00_timer(unsigned long);
++extern void qla2x00_start_timer(scsi_qla_host_t *, void (*)(unsigned long), unsigned long);
+ extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
+ extern int qla24xx_disable_vp (scsi_qla_host_t *);
+ extern int qla24xx_enable_vp (scsi_qla_host_t *);
+@@ -583,8 +583,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+ struct device_attribute;
+ extern struct device_attribute *qla2x00_host_attrs[];
+ struct fc_function_template;
+-extern struct fc_function_template qla2xxx_transport_functions;
+-extern struct fc_function_template qla2xxx_transport_vport_functions;
++extern fc_function_template_no_const qla2xxx_transport_functions;
++extern fc_function_template_no_const qla2xxx_transport_vport_functions;
+ extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
+ extern void qla2x00_init_host_attr(scsi_qla_host_t *);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 2674f4c..1e15020 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -301,12 +301,12 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
+  */
+ __inline__ void
+-qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
++qla2x00_start_timer(scsi_qla_host_t *vha, void (*func)(unsigned long), unsigned long interval)
+ {
+       init_timer(&vha->timer);
+       vha->timer.expires = jiffies + interval * HZ;
+       vha->timer.data = (unsigned long)vha;
+-      vha->timer.function = (void (*)(unsigned long))func;
++      vha->timer.function = func;
+       add_timer(&vha->timer);
+       vha->timer_active = 1;
+ }
+@@ -1510,8 +1510,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
+                   !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+                       /* Ok, a 64bit DMA mask is applicable. */
+                       ha->flags.enable_64bit_addressing = 1;
+-                      ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+-                      ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
++                      pax_open_kernel();
++                      const_cast(ha->isp_ops->calc_req_entries) = qla2x00_calc_iocbs_64;
++                      const_cast(ha->isp_ops->build_iocbs) = qla2x00_build_scsi_iocbs_64;
++                      pax_close_kernel();
+                       return;
+               }
+       }
+@@ -5381,8 +5383,9 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
+ * Context: Interrupt
+ ***************************************************************************/
+ void
+-qla2x00_timer(scsi_qla_host_t *vha)
++qla2x00_timer(unsigned long _vha)
+ {
++      scsi_qla_host_t *vha = (scsi_qla_host_t *)_vha;
+       unsigned long   cpu_flags = 0;
+       int             start_dpc = 0;
+       int             index;
+@@ -5644,7 +5647,7 @@ qla2x00_release_firmware(void)
+ }
+ static pci_ers_result_t
+-qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++qla2xxx_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       scsi_qla_host_t *vha = pci_get_drvdata(pdev);
+       struct qla_hw_data *ha = vha->hw;
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index bff9689..8caa187 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -678,7 +678,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+       loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+       if (loop_id == 0xFFFF) {
+               /* Global event */
+-              atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
++              atomic_inc_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+               spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+               qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+               spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+@@ -845,8 +845,9 @@ static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+       sess->deleted = 0;
+ }
+-static void qlt_del_sess_work_fn(struct delayed_work *work)
++static void qlt_del_sess_work_fn(struct work_struct *_work)
+ {
++      struct delayed_work *work = container_of(_work, struct delayed_work, work);
+       struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+           sess_del_work);
+       struct scsi_qla_host *vha = tgt->vha;
+@@ -5825,7 +5826,7 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+ retry:
+       global_resets =
+-          atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
++          atomic_read_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+       rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+       if (rc != 0) {
+@@ -5864,12 +5865,12 @@ retry:
+       }
+       if (global_resets !=
+-          atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
++          atomic_read_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+                   "qla_target(%d): global reset during session discovery "
+                   "(counter was %d, new %d), retrying", vha->vp_idx,
+                   global_resets,
+-                  atomic_read(&vha->vha_tgt.
++                  atomic_read_unchecked(&vha->vha_tgt.
+                       qla_tgt->tgt_global_resets_count));
+               goto retry;
+       }
+@@ -6080,8 +6081,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+       init_waitqueue_head(&tgt->waitQ);
+       INIT_LIST_HEAD(&tgt->sess_list);
+       INIT_LIST_HEAD(&tgt->del_sess_list);
+-      INIT_DELAYED_WORK(&tgt->sess_del_work,
+-              (void (*)(struct work_struct *))qlt_del_sess_work_fn);
++      INIT_DELAYED_WORK(&tgt->sess_del_work, qlt_del_sess_work_fn);
+       spin_lock_init(&tgt->sess_work_lock);
+       INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+       INIT_LIST_HEAD(&tgt->sess_works_list);
+@@ -6089,7 +6089,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+       INIT_LIST_HEAD(&tgt->srr_ctio_list);
+       INIT_LIST_HEAD(&tgt->srr_imm_list);
+       INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+-      atomic_set(&tgt->tgt_global_resets_count, 0);
++      atomic_set_unchecked(&tgt->tgt_global_resets_count, 0);
+       base_vha->vha_tgt.qla_tgt = tgt;
+diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
+index f26c5f6..e88e9c5 100644
+--- a/drivers/scsi/qla2xxx/qla_target.h
++++ b/drivers/scsi/qla2xxx/qla_target.h
+@@ -876,7 +876,7 @@ struct qla_tgt {
+       struct list_head srr_imm_list;
+       struct work_struct srr_work;
+-      atomic_t tgt_global_resets_count;
++      atomic_unchecked_t tgt_global_resets_count;
+       struct list_head tgt_list_entry;
+ };
+diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
+index a7cfc27..151f483 100644
+--- a/drivers/scsi/qla4xxx/ql4_def.h
++++ b/drivers/scsi/qla4xxx/ql4_def.h
+@@ -306,7 +306,7 @@ struct ddb_entry {
+                                          * (4000 only) */
+       atomic_t relogin_timer;           /* Max Time to wait for
+                                          * relogin to complete */
+-      atomic_t relogin_retry_count;     /* Num of times relogin has been
++      atomic_unchecked_t relogin_retry_count;   /* Num of times relogin has been
+                                          * retried */
+       uint32_t default_time2wait;       /* Default Min time between
+                                          * relogins (+aens) */
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 01c3610..f287da9 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -3956,7 +3956,7 @@ exit_session_conn_param:
+  * Timer routines
+  */
+-static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
++static void qla4xxx_start_timer(struct scsi_qla_host *ha, void (*func)(unsigned long),
+                               unsigned long interval)
+ {
+       DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
+@@ -3964,7 +3964,7 @@ static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
+       init_timer(&ha->timer);
+       ha->timer.expires = jiffies + interval * HZ;
+       ha->timer.data = (unsigned long)ha;
+-      ha->timer.function = (void (*)(unsigned long))func;
++      ha->timer.function = func;
+       add_timer(&ha->timer);
+       ha->timer_active = 1;
+ }
+@@ -4490,12 +4490,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+                */
+               if (!iscsi_is_session_online(cls_sess)) {
+                       /* Reset retry relogin timer */
+-                      atomic_inc(&ddb_entry->relogin_retry_count);
++                      atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                               "%s: index[%d] relogin timed out-retrying"
+                               " relogin (%d), retry (%d)\n", __func__,
+                               ddb_entry->fw_ddb_index,
+-                              atomic_read(&ddb_entry->relogin_retry_count),
++                              atomic_read_unchecked(&ddb_entry->relogin_retry_count),
+                               ddb_entry->default_time2wait + 4));
+                       set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                       atomic_set(&ddb_entry->retry_relogin_timer,
+@@ -4508,8 +4508,9 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+  * qla4xxx_timer - checks every second for work to do.
+  * @ha: Pointer to host adapter structure.
+  **/
+-static void qla4xxx_timer(struct scsi_qla_host *ha)
++static void qla4xxx_timer(unsigned long _ha)
+ {
++      struct scsi_qla_host *ha = (struct scsi_qla_host *)_ha;
+       int start_dpc = 0;
+       uint16_t w;
+@@ -6603,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+       atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+       atomic_set(&ddb_entry->relogin_timer, 0);
+-      atomic_set(&ddb_entry->relogin_retry_count, 0);
++      atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
+       def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+       ddb_entry->default_relogin_timeout =
+               (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+@@ -9557,7 +9558,7 @@ exit_host_reset:
+  * RECOVERED - driver's pci_resume()
+  */
+ static pci_ers_result_t
+-qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++qla4xxx_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state state)
+ {
+       struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 1deb6ad..3057db5 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -591,7 +591,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
+       good_bytes = scsi_bufflen(cmd);
+         if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+-              int old_good_bytes = good_bytes;
++              unsigned int old_good_bytes = good_bytes;
+               drv = scsi_cmd_to_driver(cmd);
+               if (drv->done)
+                       good_bytes = drv->done(cmd);
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 6a219a0..fd669fd 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -289,10 +289,10 @@ struct sdebug_queue {
+       atomic_t blocked;       /* to temporarily stop more being queued */
+ };
+-static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
+-static atomic_t sdebug_completions;  /* count of deferred completions */
+-static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
+-static atomic_t sdebug_a_tsf;      /* 'almost task set full' counter */
++static atomic_unchecked_t sdebug_cmnd_count;   /* number of incoming commands */
++static atomic_unchecked_t sdebug_completions;  /* count of deferred completions */
++static atomic_unchecked_t sdebug_miss_cpus;    /* submission + completion cpus differ */
++static atomic_unchecked_t sdebug_a_tsf;            /* 'almost task set full' counter */
+ struct opcode_info_t {
+       u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
+@@ -3492,9 +3492,9 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
+       qc_idx = sd_dp->qc_idx;
+       sqp = sdebug_q_arr + sd_dp->sqa_idx;
+       if (sdebug_statistics) {
+-              atomic_inc(&sdebug_completions);
++              atomic_inc_unchecked(&sdebug_completions);
+               if (raw_smp_processor_id() != sd_dp->issuing_cpu)
+-                      atomic_inc(&sdebug_miss_cpus);
++                      atomic_inc_unchecked(&sdebug_miss_cpus);
+       }
+       if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
+               pr_err("wild qc_idx=%d\n", qc_idx);
+@@ -3966,23 +3966,23 @@ static void tweak_cmnd_count(void)
+       if (modulo < 2)
+               return;
+       block_unblock_all_queues(true);
+-      count = atomic_read(&sdebug_cmnd_count);
+-      atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
++      count = atomic_read_unchecked(&sdebug_cmnd_count);
++      atomic_set_unchecked(&sdebug_cmnd_count, (count / modulo) * modulo);
+       block_unblock_all_queues(false);
+ }
+ static void clear_queue_stats(void)
+ {
+-      atomic_set(&sdebug_cmnd_count, 0);
+-      atomic_set(&sdebug_completions, 0);
+-      atomic_set(&sdebug_miss_cpus, 0);
+-      atomic_set(&sdebug_a_tsf, 0);
++      atomic_set_unchecked(&sdebug_cmnd_count, 0);
++      atomic_set_unchecked(&sdebug_completions, 0);
++      atomic_set_unchecked(&sdebug_miss_cpus, 0);
++      atomic_set_unchecked(&sdebug_a_tsf, 0);
+ }
+ static void setup_inject(struct sdebug_queue *sqp,
+                        struct sdebug_queued_cmd *sqcp)
+ {
+-      if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
++      if ((atomic_read_unchecked(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
+               return;
+       sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
+       sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
+@@ -4039,9 +4039,9 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
+                           (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
+                           (scsi_result == 0))) {
+               if ((num_in_q == (qdepth - 1)) &&
+-                  (atomic_inc_return(&sdebug_a_tsf) >=
++                  (atomic_inc_return_unchecked(&sdebug_a_tsf) >=
+                    abs(sdebug_every_nth))) {
+-                      atomic_set(&sdebug_a_tsf, 0);
++                      atomic_set_unchecked(&sdebug_a_tsf, 0);
+                       inject = 1;
+                       scsi_result = device_qfull_result;
+               }
+@@ -4296,10 +4296,10 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
+                  TICK_NSEC / 1000, "statistics", sdebug_statistics,
+                  sdebug_mq_active);
+       seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
+-                 atomic_read(&sdebug_cmnd_count),
+-                 atomic_read(&sdebug_completions),
+-                 "miss_cpus", atomic_read(&sdebug_miss_cpus),
+-                 atomic_read(&sdebug_a_tsf));
++                 atomic_read_unchecked(&sdebug_cmnd_count),
++                 atomic_read_unchecked(&sdebug_completions),
++                 "miss_cpus", atomic_read_unchecked(&sdebug_miss_cpus),
++                 atomic_read_unchecked(&sdebug_a_tsf));
+       seq_printf(m, "submit_queues=%d\n", submit_queues);
+       for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+@@ -5252,7 +5252,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
+ static bool fake_timeout(struct scsi_cmnd *scp)
+ {
+-      if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
++      if (0 == (atomic_read_unchecked(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
+               if (sdebug_every_nth < -1)
+                       sdebug_every_nth = -1;
+               if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
+@@ -5283,7 +5283,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
+       scsi_set_resid(scp, 0);
+       if (sdebug_statistics)
+-              atomic_inc(&sdebug_cmnd_count);
++              atomic_inc_unchecked(&sdebug_cmnd_count);
+       if (unlikely(sdebug_verbose &&
+                    !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
+               char b[120];
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index c71344a..94f1f9e 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1513,7 +1513,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
+       shost = sdev->host;
+       scsi_init_cmd_errh(cmd);
+       cmd->result = DID_NO_CONNECT << 16;
+-      atomic_inc(&cmd->device->iorequest_cnt);
++      atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+       /*
+        * SCSI request completion path will do scsi_device_unbusy(),
+@@ -1536,9 +1536,9 @@ static void scsi_softirq_done(struct request *rq)
+       INIT_LIST_HEAD(&cmd->eh_entry);
+-      atomic_inc(&cmd->device->iodone_cnt);
++      atomic_inc_unchecked(&cmd->device->iodone_cnt);
+       if (cmd->result)
+-              atomic_inc(&cmd->device->ioerr_cnt);
++              atomic_inc_unchecked(&cmd->device->ioerr_cnt);
+       disposition = scsi_decide_disposition(cmd);
+       if (disposition != SUCCESS &&
+@@ -1579,7 +1579,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+       struct Scsi_Host *host = cmd->device->host;
+       int rtn = 0;
+-      atomic_inc(&cmd->device->iorequest_cnt);
++      atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+       /* check if the device is still usable */
+       if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 0734927..427833a 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -848,7 +848,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr,     \
+                   char *buf)                                          \
+ {                                                                     \
+       struct scsi_device *sdev = to_scsi_device(dev);                 \
+-      unsigned long long count = atomic_read(&sdev->field);           \
++      unsigned long long count = atomic_read_unchecked(&sdev->field); \
+       return snprintf(buf, 20, "0x%llx\n", count);                    \
+ }                                                                     \
+ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
+diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
+index 0f3a386..1616cee 100644
+--- a/drivers/scsi/scsi_transport_fc.c
++++ b/drivers/scsi/scsi_transport_fc.c
+@@ -502,7 +502,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
+  * Netlink Infrastructure
+  */
+-static atomic_t fc_event_seq;
++static atomic_unchecked_t fc_event_seq;
+ /**
+  * fc_get_event_number - Obtain the next sequential FC event number
+@@ -515,7 +515,7 @@ static atomic_t fc_event_seq;
+ u32
+ fc_get_event_number(void)
+ {
+-      return atomic_add_return(1, &fc_event_seq);
++      return atomic_add_return_unchecked(1, &fc_event_seq);
+ }
+ EXPORT_SYMBOL(fc_get_event_number);
+@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
+ {
+       int error;
+-      atomic_set(&fc_event_seq, 0);
++      atomic_set_unchecked(&fc_event_seq, 0);
+       error = transport_class_register(&fc_host_class);
+       if (error)
+@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
+       char *cp;
+       *val = simple_strtoul(buf, &cp, 0);
+-      if ((*cp && (*cp != '\n')) || (*val < 0))
++      if (*cp && (*cp != '\n'))
+               return -EINVAL;
+       /*
+        * Check for overflow; dev_loss_tmo is u32
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 42bca61..ceceb5d 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -79,7 +79,7 @@ struct iscsi_internal {
+       struct transport_container session_cont;
+ };
+-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
+ static struct workqueue_struct *iscsi_eh_timer_workq;
+ static DEFINE_IDA(iscsi_sess_ida);
+@@ -2073,7 +2073,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+       int id = 0;
+       int err;
+-      session->sid = atomic_add_return(1, &iscsi_session_nr);
++      session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
+       if (target_id == ISCSI_MAX_TARGET) {
+               id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+@@ -4523,7 +4523,7 @@ static __init int iscsi_transport_init(void)
+       printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
+               ISCSI_TRANSPORT_VERSION);
+-      atomic_set(&iscsi_session_nr, 0);
++      atomic_set_unchecked(&iscsi_session_nr, 0);
+       err = class_register(&iscsi_transport_class);
+       if (err)
+diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
+index 319868f..a00cda5 100644
+--- a/drivers/scsi/scsi_transport_spi.c
++++ b/drivers/scsi/scsi_transport_spi.c
+@@ -758,7 +758,7 @@ spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
+ static enum spi_compare_returns
+ spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr,
+              enum spi_compare_returns 
+-             (*compare_fn)(struct scsi_device *, u8 *, u8 *, int))
++             (*compare_fn)(struct scsi_device *, u8 *, u8 *, const int))
+ {
+       struct spi_internal *i = to_spi_internal(sdev->host->transportt);
+       struct scsi_target *starget = sdev->sdev_target;
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index e3cd3ec..97ab643 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -35,7 +35,7 @@
+ #include "scsi_priv.h"
+ struct srp_host_attrs {
+-      atomic_t next_port_id;
++      atomic_unchecked_t next_port_id;
+ };
+ #define to_srp_host_attrs(host)       ((struct srp_host_attrs *)(host)->shost_data)
+@@ -105,7 +105,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
+       struct Scsi_Host *shost = dev_to_shost(dev);
+       struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
+-      atomic_set(&srp_host->next_port_id, 0);
++      atomic_set_unchecked(&srp_host->next_port_id, 0);
+       return 0;
+ }
+@@ -226,7 +226,7 @@ static ssize_t show_reconnect_delay(struct device *dev,
+ static ssize_t store_reconnect_delay(struct device *dev,
+                                    struct device_attribute *attr,
+-                                   const char *buf, const size_t count)
++                                   const char *buf, size_t count)
+ {
+       struct srp_rport *rport = transport_class_to_srp_rport(dev);
+       int res, delay;
+@@ -752,7 +752,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
+                         rport_fast_io_fail_timedout);
+       INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
+-      id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
++      id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
+       dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
+       transport_setup_device(&rport->dev);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index d3e852a..5a04bed 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -112,7 +112,7 @@ static int sd_resume(struct device *);
+ static void sd_rescan(struct device *);
+ static int sd_init_command(struct scsi_cmnd *SCpnt);
+ static void sd_uninit_command(struct scsi_cmnd *SCpnt);
+-static int sd_done(struct scsi_cmnd *);
++static unsigned int sd_done(struct scsi_cmnd *);
+ static int sd_eh_action(struct scsi_cmnd *, int);
+ static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
+ static void scsi_disk_release(struct device *cdev);
+@@ -1767,7 +1767,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+  *
+  *    Note: potentially run from within an ISR. Must not block.
+  **/
+-static int sd_done(struct scsi_cmnd *SCpnt)
++static unsigned int sd_done(struct scsi_cmnd *SCpnt)
+ {
+       int result = SCpnt->result;
+       unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
+@@ -3080,7 +3080,7 @@ static int sd_probe(struct device *dev)
+       sdkp->disk = gd;
+       sdkp->index = index;
+       atomic_set(&sdkp->openers, 0);
+-      atomic_set(&sdkp->device->ioerr_cnt, 0);
++      atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
+       if (!sdp->request_queue->rq_timeout) {
+               if (sdp->type != TYPE_MOD)
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index ae7d9bd..77e1f04 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1090,7 +1090,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+                                      sdp->disk->disk_name,
+                                      MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
+                                      NULL,
+-                                     (char *)arg);
++                                     (char __user *)arg);
+       case BLKTRACESTART:
+               return blk_trace_startstop(sdp->device->request_queue, 1);
+       case BLKTRACESTOP:
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index ed17934..108678b 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -80,7 +80,7 @@ static DEFINE_MUTEX(sr_mutex);
+ static int sr_probe(struct device *);
+ static int sr_remove(struct device *);
+ static int sr_init_command(struct scsi_cmnd *SCpnt);
+-static int sr_done(struct scsi_cmnd *);
++static unsigned int sr_done(struct scsi_cmnd *);
+ static int sr_runtime_suspend(struct device *dev);
+ static struct dev_pm_ops sr_pm_ops = {
+@@ -315,13 +315,13 @@ do_tur:
+  * It will be notified on the end of a SCSI read / write, and will take one
+  * of several actions based on success or failure.
+  */
+-static int sr_done(struct scsi_cmnd *SCpnt)
++static unsigned int sr_done(struct scsi_cmnd *SCpnt)
+ {
+       int result = SCpnt->result;
+-      int this_count = scsi_bufflen(SCpnt);
+-      int good_bytes = (result == 0 ? this_count : 0);
+-      int block_sectors = 0;
+-      long error_sector;
++      unsigned int this_count = scsi_bufflen(SCpnt);
++      unsigned int good_bytes = (result == 0 ? this_count : 0);
++      unsigned int block_sectors = 0;
++      sector_t error_sector;
+       struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+ #ifdef DEBUG
+@@ -354,9 +354,12 @@ static int sr_done(struct scsi_cmnd *SCpnt)
+                       if (cd->device->sector_size == 2048)
+                               error_sector <<= 2;
+                       error_sector &= ~(block_sectors - 1);
+-                      good_bytes = (error_sector -
+-                                    blk_rq_pos(SCpnt->request)) << 9;
+-                      if (good_bytes < 0 || good_bytes >= this_count)
++                      if (error_sector >= blk_rq_pos(SCpnt->request)) {
++                              good_bytes = (error_sector -
++                                            blk_rq_pos(SCpnt->request)) << 9;
++                              if (good_bytes >= this_count)
++                                      good_bytes = 0;
++                      } else
+                               good_bytes = 0;
+                       /*
+                        * The SCSI specification allows for the value
+diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
+index de2c1bf..60b8563 100644
+--- a/drivers/soc/tegra/fuse/fuse-tegra.c
++++ b/drivers/soc/tegra/fuse/fuse-tegra.c
+@@ -72,7 +72,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
+       return i;
+ }
+-static struct bin_attribute fuse_bin_attr = {
++static bin_attribute_no_const fuse_bin_attr = {
+       .attr = { .name = "fuse", .mode = S_IRUGO, },
+       .read = fuse_read,
+ };
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 200ca22..170ab80 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2982,7 +2982,7 @@ int spi_bus_unlock(struct spi_master *master)
+ EXPORT_SYMBOL_GPL(spi_bus_unlock);
+ /* portable code must never pass more than 32 bytes */
+-#define       SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
++#define       SPI_BUFSIZ      max(32UL, SMP_CACHE_BYTES)
+ static u8     *buf;
+diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
+index 4c281df..1960930 100644
+--- a/drivers/staging/fbtft/fbtft-core.c
++++ b/drivers/staging/fbtft/fbtft-core.c
+@@ -649,7 +649,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
+ {
+       struct fb_info *info;
+       struct fbtft_par *par;
+-      struct fb_ops *fbops = NULL;
++      fb_ops_no_const *fbops = NULL;
+       struct fb_deferred_io *fbdefio = NULL;
+       u8 *vmem = NULL;
+       void *txbuf = NULL;
+diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
+index d3bc394..7fa336d 100644
+--- a/drivers/staging/fbtft/fbtft.h
++++ b/drivers/staging/fbtft/fbtft.h
+@@ -93,7 +93,7 @@ struct fbtft_ops {
+       int (*set_var)(struct fbtft_par *par);
+       int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
+-};
++} __no_const;
+ /**
+  * struct fbtft_display - Describes the display properties
+diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
+index bb55219..789b758 100644
+--- a/drivers/staging/gdm724x/gdm_lte.c
++++ b/drivers/staging/gdm724x/gdm_lte.c
+@@ -410,7 +410,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
+       return nic_type;
+ }
+-static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct nic *nic = netdev_priv(dev);
+       u32 nic_type;
+diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
+index eb7e252..b7bd5e5 100644
+--- a/drivers/staging/gdm724x/gdm_tty.c
++++ b/drivers/staging/gdm724x/gdm_tty.c
+@@ -44,7 +44,7 @@
+ #define gdm_tty_send_control(n, r, v, d, l) (\
+       n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
+-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
++#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
+ static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
+ static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
+diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c
+index 46d957c..d590c95 100644
+--- a/drivers/staging/i4l/icn/icn.c
++++ b/drivers/staging/i4l/icn/icn.c
+@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
+               if (count > len)
+                       count = len;
+               if (user) {
+-                      if (copy_from_user(msg, buf, count))
++                      if (count > sizeof msg || copy_from_user(msg, buf, count))
+                               return -EFAULT;
+               } else
+                       memcpy(msg, buf, count);
+diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
+index 2177f1d..f226336 100644
+--- a/drivers/staging/iio/adc/ad7280a.c
++++ b/drivers/staging/iio/adc/ad7280a.c
+@@ -547,8 +547,8 @@ static int ad7280_attr_init(struct ad7280_state *st)
+ {
+       int dev, ch, cnt;
+-      st->iio_attr = kcalloc(2, sizeof(*st->iio_attr) *
+-                             (st->slave_num + 1) * AD7280A_CELLS_PER_DEV,
++      st->iio_attr = kcalloc(sizeof(*st->iio_attr) *
++                             (st->slave_num + 1) * AD7280A_CELLS_PER_DEV, 2,
+                              GFP_KERNEL);
+       if (!st->iio_attr)
+               return -ENOMEM;
+diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
+index 1e21eb1..d3f9dd7 100644
+--- a/drivers/staging/ks7010/ks_wlan_net.c
++++ b/drivers/staging/ks7010/ks_wlan_net.c
+@@ -181,9 +181,10 @@ int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get protocol name */
+ static int ks_wlan_get_name(struct net_device *dev,
+-                          struct iw_request_info *info, char *cwrq,
++                          struct iw_request_info *info, union iwreq_data *_cwrq,
+                           char *extra)
+ {
++      char *cwrq = _cwrq->name;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -207,9 +208,10 @@ static int ks_wlan_get_name(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set frequency */
+ static int ks_wlan_set_freq(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_freq *fwrq,
++                          struct iw_request_info *info, union iwreq_data *_fwrq,
+                           char *extra)
+ {
++      struct iw_freq *fwrq = &_fwrq->freq;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int rc = -EINPROGRESS;  /* Call commit handler */
+@@ -255,9 +257,10 @@ static int ks_wlan_set_freq(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get frequency */
+ static int ks_wlan_get_freq(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_freq *fwrq,
++                          struct iw_request_info *info, union iwreq_data *_fwrq,
+                           char *extra)
+ {
++      struct iw_freq *fwrq = &_fwrq->freq;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int f;
+@@ -280,8 +283,9 @@ static int ks_wlan_get_freq(struct net_device *dev,
+ /* Wireless Handler : set ESSID */
+ static int ks_wlan_set_essid(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_point *dwrq, char *extra)
++                           union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->essid;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       size_t len;
+@@ -340,8 +344,9 @@ static int ks_wlan_set_essid(struct net_device *dev,
+ /* Wireless Handler : get ESSID */
+ static int ks_wlan_get_essid(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_point *dwrq, char *extra)
++                           union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->essid;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -383,8 +388,9 @@ static int ks_wlan_get_essid(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set AP address */
+ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
+-                         struct sockaddr *ap_addr, char *extra)
++                         union iwreq_data *_ap_addr, char *extra)
+ {
++      struct sockaddr *ap_addr = &_ap_addr->ap_addr;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -421,8 +427,9 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get AP address */
+ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
+-                         struct sockaddr *awrq, char *extra)
++                         union iwreq_data *_awrq, char *extra)
+ {
++      struct sockaddr *awrq = &_awrq->ap_addr;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -444,9 +451,10 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set Nickname */
+ static int ks_wlan_set_nick(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_point *dwrq,
++                          struct iw_request_info *info, union iwreq_data *_dwrq,
+                           char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -468,9 +476,10 @@ static int ks_wlan_set_nick(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get Nickname */
+ static int ks_wlan_get_nick(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_point *dwrq,
++                          struct iw_request_info *info, union iwreq_data *_dwrq,
+                           char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -488,9 +497,10 @@ static int ks_wlan_get_nick(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set Bit-Rate */
+ static int ks_wlan_set_rate(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_param *vwrq,
++                          struct iw_request_info *info, union iwreq_data *_vwrq,
+                           char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->bitrate;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int i = 0;
+@@ -723,9 +733,10 @@ static int ks_wlan_set_rate(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get Bit-Rate */
+ static int ks_wlan_get_rate(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_param *vwrq,
++                          struct iw_request_info *info, union iwreq_data *_vwrq,
+                           char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->bitrate;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -751,8 +762,9 @@ static int ks_wlan_get_rate(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set RTS threshold */
+ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
+-                         struct iw_param *vwrq, char *extra)
++                         union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->rts;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int rthr = vwrq->value;
+@@ -775,8 +787,9 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get RTS threshold */
+ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
+-                         struct iw_param *vwrq, char *extra)
++                         union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->rts;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -794,9 +807,10 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set Fragmentation threshold */
+ static int ks_wlan_set_frag(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_param *vwrq,
++                          struct iw_request_info *info, union iwreq_data *_vwrq,
+                           char *extra)
+ {
++      struct iw_param *vwrq =&_vwrq->frag;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int fthr = vwrq->value;
+@@ -820,9 +834,10 @@ static int ks_wlan_set_frag(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get Fragmentation threshold */
+ static int ks_wlan_get_frag(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_param *vwrq,
++                          struct iw_request_info *info, union iwreq_data *_vwrq,
+                           char *extra)
+ {
++      struct iw_param *vwrq =&_vwrq->frag;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -840,9 +855,10 @@ static int ks_wlan_get_frag(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set Mode of Operation */
+ static int ks_wlan_set_mode(struct net_device *dev,
+-                          struct iw_request_info *info, __u32 * uwrq,
++                          struct iw_request_info *info, union iwreq_data *_uwrq,
+                           char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -876,9 +892,10 @@ static int ks_wlan_set_mode(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get Mode of Operation */
+ static int ks_wlan_get_mode(struct net_device *dev,
+-                          struct iw_request_info *info, __u32 * uwrq,
++                          struct iw_request_info *info, union iwreq_data *_uwrq,
+                           char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -906,8 +923,9 @@ static int ks_wlan_get_mode(struct net_device *dev,
+ /* Wireless Handler : set Encryption Key */
+ static int ks_wlan_set_encode(struct net_device *dev,
+                             struct iw_request_info *info,
+-                            struct iw_point *dwrq, char *extra)
++                            union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->encoding;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -1024,8 +1042,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
+ /* Wireless Handler : get Encryption Key */
+ static int ks_wlan_get_encode(struct net_device *dev,
+                             struct iw_request_info *info,
+-                            struct iw_point *dwrq, char *extra)
++                            union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->encoding;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       char zeros[16];
+@@ -1080,7 +1099,7 @@ static int ks_wlan_get_encode(struct net_device *dev,
+ /* Wireless Handler : set Tx-Power */
+ static int ks_wlan_set_txpow(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_param *vwrq, char *extra)
++                           union iwreq_data *vwrq, char *extra)
+ {
+       return -EOPNOTSUPP;     /* Not Support */
+ }
+@@ -1089,8 +1108,10 @@ static int ks_wlan_set_txpow(struct net_device *dev,
+ /* Wireless Handler : get Tx-Power */
+ static int ks_wlan_get_txpow(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_param *vwrq, char *extra)
++                           union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->txpower;
++
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+@@ -1107,7 +1128,7 @@ static int ks_wlan_get_txpow(struct net_device *dev,
+ /* Wireless Handler : set Retry limits */
+ static int ks_wlan_set_retry(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_param *vwrq, char *extra)
++                           union iwreq_data *vwrq, char *extra)
+ {
+       return -EOPNOTSUPP;     /* Not Support */
+ }
+@@ -1116,8 +1137,10 @@ static int ks_wlan_set_retry(struct net_device *dev,
+ /* Wireless Handler : get Retry limits */
+ static int ks_wlan_get_retry(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_param *vwrq, char *extra)
++                           union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq =&_vwrq->retry;
++
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+@@ -1135,8 +1158,9 @@ static int ks_wlan_get_retry(struct net_device *dev,
+ /* Wireless Handler : get range info */
+ static int ks_wlan_get_range(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_point *dwrq, char *extra)
++                           union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_range *range = (struct iw_range *)extra;
+@@ -1266,8 +1290,9 @@ static int ks_wlan_get_range(struct net_device *dev,
+ /* Wireless Handler : set Power Management */
+ static int ks_wlan_set_power(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_param *vwrq, char *extra)
++                           union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq =&_vwrq->power;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       short enabled;
+@@ -1301,8 +1326,9 @@ static int ks_wlan_set_power(struct net_device *dev,
+ /* Wireless Handler : get Power Management */
+ static int ks_wlan_get_power(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_param *vwrq, char *extra)
++                           union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq =&_vwrq->power;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -1322,8 +1348,9 @@ static int ks_wlan_get_power(struct net_device *dev,
+ /* Wireless Handler : get wirless statistics */
+ static int ks_wlan_get_iwstats(struct net_device *dev,
+                              struct iw_request_info *info,
+-                             struct iw_quality *vwrq, char *extra)
++                             union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_quality *vwrq = &_vwrq->qual;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -1343,7 +1370,7 @@ static int ks_wlan_get_iwstats(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : set Sensitivity */
+ static int ks_wlan_set_sens(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_param *vwrq,
++                          struct iw_request_info *info, union iwreq_data *vwrq,
+                           char *extra)
+ {
+       return -EOPNOTSUPP;     /* Not Support */
+@@ -1352,9 +1379,11 @@ static int ks_wlan_set_sens(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : get Sensitivity */
+ static int ks_wlan_get_sens(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_param *vwrq,
++                          struct iw_request_info *info, union iwreq_data *_vwrq,
+                           char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->sens;
++
+       /* Not Support */
+       vwrq->value = 0;
+       vwrq->disabled = (vwrq->value == 0);
+@@ -1368,8 +1397,9 @@ static int ks_wlan_get_sens(struct net_device *dev,
+ /* Note : this is deprecated in favor of IWSCAN */
+ static int ks_wlan_get_aplist(struct net_device *dev,
+                             struct iw_request_info *info,
+-                            struct iw_point *dwrq, char *extra)
++                            union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct sockaddr *address = (struct sockaddr *)extra;
+@@ -1596,9 +1626,10 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Wireless Handler : Read Scan Results */
+ static int ks_wlan_get_scan(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_point *dwrq,
++                          struct iw_request_info *info, union iwreq_data *_dwrq,
+                           char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int i;
+@@ -1655,7 +1686,7 @@ static int ks_wlan_get_scan(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Commit handler : called after a bunch of SET operations */
+ static int ks_wlan_config_commit(struct net_device *dev,
+-                               struct iw_request_info *info, void *zwrq,
++                               struct iw_request_info *info, union iwreq_data *zwrq,
+                                char *extra)
+ {
+       struct ks_wlan_private *priv =
+@@ -1673,8 +1704,9 @@ static int ks_wlan_config_commit(struct net_device *dev,
+ /* Wireless handler : set association ie params */
+ static int ks_wlan_set_genie(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_point *dwrq, char *extra)
++                           union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq =&_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -1692,8 +1724,9 @@ static int ks_wlan_set_genie(struct net_device *dev,
+ /* Wireless handler : set authentication mode params */
+ static int ks_wlan_set_auth_mode(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *vwrq, char *extra)
++                               union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->param;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int index = (vwrq->flags & IW_AUTH_INDEX);
+@@ -1832,8 +1865,9 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
+ /* Wireless handler : get authentication mode params */
+ static int ks_wlan_get_auth_mode(struct net_device *dev,
+                                struct iw_request_info *info,
+-                               struct iw_param *vwrq, char *extra)
++                               union iwreq_data *_vwrq, char *extra)
+ {
++      struct iw_param *vwrq = &_vwrq->param;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int index = (vwrq->flags & IW_AUTH_INDEX);
+@@ -1878,8 +1912,9 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
+ /* Wireless Handler : set encoding token & mode (WPA)*/
+ static int ks_wlan_set_encode_ext(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_point *dwrq, char *extra)
++                                union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->encoding;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_encode_ext *enc;
+@@ -1986,8 +2021,9 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
+ /* Wireless Handler : get encoding token & mode (WPA)*/
+ static int ks_wlan_get_encode_ext(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                struct iw_point *dwrq, char *extra)
++                                union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->encoding;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2009,8 +2045,9 @@ static int ks_wlan_get_encode_ext(struct net_device *dev,
+ /* Wireless Handler : PMKSA cache operation (WPA2) */
+ static int ks_wlan_set_pmksa(struct net_device *dev,
+                            struct iw_request_info *info,
+-                           struct iw_point *dwrq, char *extra)
++                           union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_pmksa *pmksa;
+@@ -2168,9 +2205,10 @@ static int ks_wlan_set_stop_request(struct net_device *dev,
+ /* Wireless Handler : set MLME */
+ #include <linux/ieee80211.h>
+ static int ks_wlan_set_mlme(struct net_device *dev,
+-                          struct iw_request_info *info, struct iw_point *dwrq,
++                          struct iw_request_info *info, union iwreq_data *_dwrq,
+                           char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_mlme *mlme = (struct iw_mlme *)extra;
+@@ -2199,8 +2237,9 @@ static int ks_wlan_set_mlme(struct net_device *dev,
+ /* Private handler : get firemware version */
+ static int ks_wlan_get_firmware_version(struct net_device *dev,
+                                       struct iw_request_info *info,
+-                                      struct iw_point *dwrq, char *extra)
++                                      union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       strcpy(extra, &(priv->firmware_version[0]));
+@@ -2270,9 +2309,10 @@ static int ks_wlan_get_connect(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set preamble */
+ static int ks_wlan_set_preamble(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2295,9 +2335,10 @@ static int ks_wlan_set_preamble(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get preamble */
+ static int ks_wlan_get_preamble(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2312,9 +2353,10 @@ static int ks_wlan_get_preamble(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set power save mode */
+ static int ks_wlan_set_powermgt(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2345,9 +2387,10 @@ static int ks_wlan_set_powermgt(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get power save made */
+ static int ks_wlan_get_powermgt(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2362,9 +2405,10 @@ static int ks_wlan_get_powermgt(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set scan type */
+ static int ks_wlan_set_scan_type(struct net_device *dev,
+-                               struct iw_request_info *info, __u32 * uwrq,
++                               struct iw_request_info *info, union iwreq_data *_uwrq,
+                                char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2385,9 +2429,10 @@ static int ks_wlan_set_scan_type(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get scan type */
+ static int ks_wlan_get_scan_type(struct net_device *dev,
+-                               struct iw_request_info *info, __u32 * uwrq,
++                               struct iw_request_info *info, union iwreq_data *_uwrq,
+                                char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2536,9 +2581,10 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set beacon lost count */
+ static int ks_wlan_set_beacon_lost(struct net_device *dev,
+-                                 struct iw_request_info *info, __u32 * uwrq,
++                                 struct iw_request_info *info, union iwreq_data *_uwrq,
+                                  char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2561,9 +2607,10 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get beacon lost count */
+ static int ks_wlan_get_beacon_lost(struct net_device *dev,
+-                                 struct iw_request_info *info, __u32 * uwrq,
++                                 struct iw_request_info *info, union iwreq_data *_uwrq,
+                                  char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2578,9 +2625,10 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set phy type */
+ static int ks_wlan_set_phy_type(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2604,9 +2652,10 @@ static int ks_wlan_set_phy_type(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get phy type */
+ static int ks_wlan_get_phy_type(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2621,9 +2670,10 @@ static int ks_wlan_get_phy_type(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set cts mode */
+ static int ks_wlan_set_cts_mode(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2649,9 +2699,10 @@ static int ks_wlan_set_cts_mode(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get cts mode */
+ static int ks_wlan_get_cts_mode(struct net_device *dev,
+-                              struct iw_request_info *info, __u32 * uwrq,
++                              struct iw_request_info *info, union iwreq_data *_uwrq,
+                               char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2667,8 +2718,9 @@ static int ks_wlan_get_cts_mode(struct net_device *dev,
+ /* Private handler : set sleep mode */
+ static int ks_wlan_set_sleep_mode(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                __u32 * uwrq, char *extra)
++                                union iwreq_data *_uwrq, char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2697,8 +2749,9 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
+ /* Private handler : get sleep mode */
+ static int ks_wlan_get_sleep_mode(struct net_device *dev,
+                                 struct iw_request_info *info,
+-                                __u32 * uwrq, char *extra)
++                                union iwreq_data *_uwrq, char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2752,9 +2805,10 @@ static int ks_wlan_get_phy_information_timer(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set WPS enable */
+ static int ks_wlan_set_wps_enable(struct net_device *dev,
+-                                struct iw_request_info *info, __u32 * uwrq,
++                                struct iw_request_info *info, union iwreq_data *_uwrq,
+                                 char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       DPRINTK(2, "\n");
+@@ -2776,9 +2830,10 @@ static int ks_wlan_set_wps_enable(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get WPS enable */
+ static int ks_wlan_get_wps_enable(struct net_device *dev,
+-                                struct iw_request_info *info, __u32 * uwrq,
++                                struct iw_request_info *info, union iwreq_data *_uwrq,
+                                 char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       DPRINTK(2, "\n");
+@@ -2797,8 +2852,9 @@ static int ks_wlan_get_wps_enable(struct net_device *dev,
+ /* Private handler : set WPS probe req */
+ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
+                                    struct iw_request_info *info,
+-                                   struct iw_point *dwrq, char *extra)
++                                   union iwreq_data *_dwrq, char *extra)
+ {
++      struct iw_point *dwrq = &_dwrq->data;
+       uint8_t *p = extra;
+       unsigned char len;
+       struct ks_wlan_private *priv =
+@@ -2855,9 +2911,10 @@ static int ks_wlan_get_wps_probe_req(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set tx gain control value */
+ static int ks_wlan_set_tx_gain(struct net_device *dev,
+-                             struct iw_request_info *info, __u32 * uwrq,
++                             struct iw_request_info *info, union iwreq_data *_uwrq,
+                              char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2882,9 +2939,10 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get tx gain control value */
+ static int ks_wlan_get_tx_gain(struct net_device *dev,
+-                             struct iw_request_info *info, __u32 * uwrq,
++                             struct iw_request_info *info, union iwreq_data *_uwrq,
+                              char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2900,9 +2958,10 @@ static int ks_wlan_get_tx_gain(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : set rx gain control value */
+ static int ks_wlan_set_rx_gain(struct net_device *dev,
+-                             struct iw_request_info *info, __u32 * uwrq,
++                             struct iw_request_info *info, union iwreq_data *_uwrq,
+                              char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2927,9 +2986,10 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get rx gain control value */
+ static int ks_wlan_get_rx_gain(struct net_device *dev,
+-                             struct iw_request_info *info, __u32 * uwrq,
++                             struct iw_request_info *info, union iwreq_data *_uwrq,
+                              char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -2968,9 +3028,10 @@ static int ks_wlan_set_region(struct net_device *dev,
+ /*------------------------------------------------------------------*/
+ /* Private handler : get eeprom checksum result */
+ static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
+-                                  struct iw_request_info *info, __u32 * uwrq,
++                                  struct iw_request_info *info, union iwreq_data *_uwrq,
+                                   char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -3095,8 +3156,9 @@ static void print_hif_event(int event)
+ /*------------------------------------------------------------------*/
+ /* Private handler : get host command history */
+ static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
+-                       __u32 * uwrq, char *extra)
++                       union iwreq_data *_uwrq, char *extra)
+ {
++      __u32 *uwrq = &_uwrq->mode;
+       int i, event;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+@@ -3167,119 +3229,119 @@ static const struct iw_priv_args ks_wlan_private_args[] = {
+ };
+ static const iw_handler ks_wlan_handler[] = {
+-      (iw_handler) ks_wlan_config_commit,     /* SIOCSIWCOMMIT */
+-      (iw_handler) ks_wlan_get_name,  /* SIOCGIWNAME */
+-      (iw_handler) NULL,      /* SIOCSIWNWID */
+-      (iw_handler) NULL,      /* SIOCGIWNWID */
+-      (iw_handler) ks_wlan_set_freq,  /* SIOCSIWFREQ */
+-      (iw_handler) ks_wlan_get_freq,  /* SIOCGIWFREQ */
+-      (iw_handler) ks_wlan_set_mode,  /* SIOCSIWMODE */
+-      (iw_handler) ks_wlan_get_mode,  /* SIOCGIWMODE */
++      ks_wlan_config_commit,  /* SIOCSIWCOMMIT */
++      ks_wlan_get_name,       /* SIOCGIWNAME */
++      NULL,   /* SIOCSIWNWID */
++      NULL,   /* SIOCGIWNWID */
++      ks_wlan_set_freq,       /* SIOCSIWFREQ */
++      ks_wlan_get_freq,       /* SIOCGIWFREQ */
++      ks_wlan_set_mode,       /* SIOCSIWMODE */
++      ks_wlan_get_mode,       /* SIOCGIWMODE */
+ #ifndef KSC_OPNOTSUPP
+-      (iw_handler) ks_wlan_set_sens,  /* SIOCSIWSENS */
+-      (iw_handler) ks_wlan_get_sens,  /* SIOCGIWSENS */
++      ks_wlan_set_sens,       /* SIOCSIWSENS */
++      ks_wlan_get_sens,       /* SIOCGIWSENS */
+ #else /* KSC_OPNOTSUPP */
+-      (iw_handler) NULL,      /* SIOCSIWSENS */
+-      (iw_handler) NULL,      /* SIOCGIWSENS */
++      NULL,   /* SIOCSIWSENS */
++      NULL,   /* SIOCGIWSENS */
+ #endif /* KSC_OPNOTSUPP */
+-      (iw_handler) NULL,      /* SIOCSIWRANGE */
+-      (iw_handler) ks_wlan_get_range, /* SIOCGIWRANGE */
+-      (iw_handler) NULL,      /* SIOCSIWPRIV */
+-      (iw_handler) NULL,      /* SIOCGIWPRIV */
+-      (iw_handler) NULL,      /* SIOCSIWSTATS */
+-      (iw_handler) ks_wlan_get_iwstats,       /* SIOCGIWSTATS */
+-      (iw_handler) NULL,      /* SIOCSIWSPY */
+-      (iw_handler) NULL,      /* SIOCGIWSPY */
+-      (iw_handler) NULL,      /* SIOCSIWTHRSPY */
+-      (iw_handler) NULL,      /* SIOCGIWTHRSPY */
+-      (iw_handler) ks_wlan_set_wap,   /* SIOCSIWAP */
+-      (iw_handler) ks_wlan_get_wap,   /* SIOCGIWAP */
+-//      (iw_handler) NULL,                      /* SIOCSIWMLME */
+-      (iw_handler) ks_wlan_set_mlme,  /* SIOCSIWMLME */
+-      (iw_handler) ks_wlan_get_aplist,        /* SIOCGIWAPLIST */
+-      (iw_handler) ks_wlan_set_scan,  /* SIOCSIWSCAN */
+-      (iw_handler) ks_wlan_get_scan,  /* SIOCGIWSCAN */
+-      (iw_handler) ks_wlan_set_essid, /* SIOCSIWESSID */
+-      (iw_handler) ks_wlan_get_essid, /* SIOCGIWESSID */
+-      (iw_handler) ks_wlan_set_nick,  /* SIOCSIWNICKN */
+-      (iw_handler) ks_wlan_get_nick,  /* SIOCGIWNICKN */
+-      (iw_handler) NULL,      /* -- hole -- */
+-      (iw_handler) NULL,      /* -- hole -- */
+-      (iw_handler) ks_wlan_set_rate,  /* SIOCSIWRATE */
+-      (iw_handler) ks_wlan_get_rate,  /* SIOCGIWRATE */
+-      (iw_handler) ks_wlan_set_rts,   /* SIOCSIWRTS */
+-      (iw_handler) ks_wlan_get_rts,   /* SIOCGIWRTS */
+-      (iw_handler) ks_wlan_set_frag,  /* SIOCSIWFRAG */
+-      (iw_handler) ks_wlan_get_frag,  /* SIOCGIWFRAG */
++      NULL,   /* SIOCSIWRANGE */
++      ks_wlan_get_range,      /* SIOCGIWRANGE */
++      NULL,   /* SIOCSIWPRIV */
++      NULL,   /* SIOCGIWPRIV */
++      NULL,   /* SIOCSIWSTATS */
++      ks_wlan_get_iwstats,    /* SIOCGIWSTATS */
++      NULL,   /* SIOCSIWSPY */
++      NULL,   /* SIOCGIWSPY */
++      NULL,   /* SIOCSIWTHRSPY */
++      NULL,   /* SIOCGIWTHRSPY */
++      ks_wlan_set_wap,        /* SIOCSIWAP */
++      ks_wlan_get_wap,        /* SIOCGIWAP */
++//      NULL,                      /* SIOCSIWMLME */
++      ks_wlan_set_mlme,       /* SIOCSIWMLME */
++      ks_wlan_get_aplist,     /* SIOCGIWAPLIST */
++      ks_wlan_set_scan,       /* SIOCSIWSCAN */
++      ks_wlan_get_scan,       /* SIOCGIWSCAN */
++      ks_wlan_set_essid,      /* SIOCSIWESSID */
++      ks_wlan_get_essid,      /* SIOCGIWESSID */
++      ks_wlan_set_nick,       /* SIOCSIWNICKN */
++      ks_wlan_get_nick,       /* SIOCGIWNICKN */
++      NULL,   /* -- hole -- */
++      NULL,   /* -- hole -- */
++      ks_wlan_set_rate,       /* SIOCSIWRATE */
++      ks_wlan_get_rate,       /* SIOCGIWRATE */
++      ks_wlan_set_rts,        /* SIOCSIWRTS */
++      ks_wlan_get_rts,        /* SIOCGIWRTS */
++      ks_wlan_set_frag,       /* SIOCSIWFRAG */
++      ks_wlan_get_frag,       /* SIOCGIWFRAG */
+ #ifndef KSC_OPNOTSUPP
+-      (iw_handler) ks_wlan_set_txpow, /* SIOCSIWTXPOW */
+-      (iw_handler) ks_wlan_get_txpow, /* SIOCGIWTXPOW */
+-      (iw_handler) ks_wlan_set_retry, /* SIOCSIWRETRY */
+-      (iw_handler) ks_wlan_get_retry, /* SIOCGIWRETRY */
++      ks_wlan_set_txpow,      /* SIOCSIWTXPOW */
++      ks_wlan_get_txpow,      /* SIOCGIWTXPOW */
++      ks_wlan_set_retry,      /* SIOCSIWRETRY */
++      ks_wlan_get_retry,      /* SIOCGIWRETRY */
+ #else /* KSC_OPNOTSUPP */
+-      (iw_handler) NULL,      /* SIOCSIWTXPOW */
+-      (iw_handler) NULL,      /* SIOCGIWTXPOW */
+-      (iw_handler) NULL,      /* SIOCSIWRETRY */
+-      (iw_handler) NULL,      /* SIOCGIWRETRY */
++      NULL,   /* SIOCSIWTXPOW */
++      NULL,   /* SIOCGIWTXPOW */
++      NULL,   /* SIOCSIWRETRY */
++      NULL,   /* SIOCGIWRETRY */
+ #endif /* KSC_OPNOTSUPP */
+-      (iw_handler) ks_wlan_set_encode,        /* SIOCSIWENCODE */
+-      (iw_handler) ks_wlan_get_encode,        /* SIOCGIWENCODE */
+-      (iw_handler) ks_wlan_set_power, /* SIOCSIWPOWER */
+-      (iw_handler) ks_wlan_get_power, /* SIOCGIWPOWER */
+-      (iw_handler) NULL,      /* -- hole -- */
+-      (iw_handler) NULL,      /* -- hole -- */
+-//      (iw_handler) NULL,                      /* SIOCSIWGENIE */
+-      (iw_handler) ks_wlan_set_genie, /* SIOCSIWGENIE */
+-      (iw_handler) NULL,      /* SIOCGIWGENIE */
+-      (iw_handler) ks_wlan_set_auth_mode,     /* SIOCSIWAUTH */
+-      (iw_handler) ks_wlan_get_auth_mode,     /* SIOCGIWAUTH */
+-      (iw_handler) ks_wlan_set_encode_ext,    /* SIOCSIWENCODEEXT */
+-      (iw_handler) ks_wlan_get_encode_ext,    /* SIOCGIWENCODEEXT */
+-      (iw_handler) ks_wlan_set_pmksa, /* SIOCSIWPMKSA */
+-      (iw_handler) NULL,      /* -- hole -- */
++      ks_wlan_set_encode,     /* SIOCSIWENCODE */
++      ks_wlan_get_encode,     /* SIOCGIWENCODE */
++      ks_wlan_set_power,      /* SIOCSIWPOWER */
++      ks_wlan_get_power,      /* SIOCGIWPOWER */
++      NULL,   /* -- hole -- */
++      NULL,   /* -- hole -- */
++//      NULL,                      /* SIOCSIWGENIE */
++      ks_wlan_set_genie,      /* SIOCSIWGENIE */
++      NULL,   /* SIOCGIWGENIE */
++      ks_wlan_set_auth_mode,  /* SIOCSIWAUTH */
++      ks_wlan_get_auth_mode,  /* SIOCGIWAUTH */
++      ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */
++      ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */
++      ks_wlan_set_pmksa,      /* SIOCSIWPMKSA */
++      NULL,   /* -- hole -- */
+ };
+ /* private_handler */
+ static const iw_handler ks_wlan_private_handler[] = {
+-      (iw_handler) NULL,      /*  0 */
+-      (iw_handler) NULL,      /*  1, used to be: KS_WLAN_GET_DRIVER_VERSION */
+-      (iw_handler) NULL,      /*  2 */
+-      (iw_handler) ks_wlan_get_firmware_version,      /*  3 KS_WLAN_GET_FIRM_VERSION */
++      NULL,   /*  0 */
++      NULL,   /*  1, used to be: KS_WLAN_GET_DRIVER_VERSION */
++      NULL,   /*  2 */
++      ks_wlan_get_firmware_version,   /*  3 KS_WLAN_GET_FIRM_VERSION */
+ #ifdef WPS
+-      (iw_handler) ks_wlan_set_wps_enable,    /*  4 KS_WLAN_SET_WPS_ENABLE  */
+-      (iw_handler) ks_wlan_get_wps_enable,    /*  5 KS_WLAN_GET_WPS_ENABLE  */
+-      (iw_handler) ks_wlan_set_wps_probe_req, /*  6 KS_WLAN_SET_WPS_PROBE_REQ */
++      ks_wlan_set_wps_enable, /*  4 KS_WLAN_SET_WPS_ENABLE  */
++      ks_wlan_get_wps_enable, /*  5 KS_WLAN_GET_WPS_ENABLE  */
++      ks_wlan_set_wps_probe_req,      /*  6 KS_WLAN_SET_WPS_PROBE_REQ */
+ #else
+-      (iw_handler) NULL,      /*  4 */
+-      (iw_handler) NULL,      /*  5 */
+-      (iw_handler) NULL,      /*  6 */
++      NULL,   /*  4 */
++      NULL,   /*  5 */
++      NULL,   /*  6 */
+ #endif /* WPS */
+-      (iw_handler) ks_wlan_get_eeprom_cksum,  /*  7 KS_WLAN_GET_CONNECT */
+-      (iw_handler) ks_wlan_set_preamble,      /*  8 KS_WLAN_SET_PREAMBLE */
+-      (iw_handler) ks_wlan_get_preamble,      /*  9 KS_WLAN_GET_PREAMBLE */
+-      (iw_handler) ks_wlan_set_powermgt,      /* 10 KS_WLAN_SET_POWER_SAVE */
+-      (iw_handler) ks_wlan_get_powermgt,      /* 11 KS_WLAN_GET_POWER_SAVE */
+-      (iw_handler) ks_wlan_set_scan_type,     /* 12 KS_WLAN_SET_SCAN_TYPE */
+-      (iw_handler) ks_wlan_get_scan_type,     /* 13 KS_WLAN_GET_SCAN_TYPE */
+-      (iw_handler) ks_wlan_set_rx_gain,       /* 14 KS_WLAN_SET_RX_GAIN */
+-      (iw_handler) ks_wlan_get_rx_gain,       /* 15 KS_WLAN_GET_RX_GAIN */
+-      (iw_handler) ks_wlan_hostt,     /* 16 KS_WLAN_HOSTT */
+-      (iw_handler) NULL,      /* 17 */
+-      (iw_handler) ks_wlan_set_beacon_lost,   /* 18 KS_WLAN_SET_BECAN_LOST */
+-      (iw_handler) ks_wlan_get_beacon_lost,   /* 19 KS_WLAN_GET_BECAN_LOST */
+-      (iw_handler) ks_wlan_set_tx_gain,       /* 20 KS_WLAN_SET_TX_GAIN */
+-      (iw_handler) ks_wlan_get_tx_gain,       /* 21 KS_WLAN_GET_TX_GAIN */
+-      (iw_handler) ks_wlan_set_phy_type,      /* 22 KS_WLAN_SET_PHY_TYPE */
+-      (iw_handler) ks_wlan_get_phy_type,      /* 23 KS_WLAN_GET_PHY_TYPE */
+-      (iw_handler) ks_wlan_set_cts_mode,      /* 24 KS_WLAN_SET_CTS_MODE */
+-      (iw_handler) ks_wlan_get_cts_mode,      /* 25 KS_WLAN_GET_CTS_MODE */
+-      (iw_handler) NULL,      /* 26 */
+-      (iw_handler) NULL,      /* 27 */
+-      (iw_handler) ks_wlan_set_sleep_mode,    /* 28 KS_WLAN_SET_SLEEP_MODE */
+-      (iw_handler) ks_wlan_get_sleep_mode,    /* 29 KS_WLAN_GET_SLEEP_MODE */
+-      (iw_handler) NULL,      /* 30 */
+-      (iw_handler) NULL,      /* 31 */
++      ks_wlan_get_eeprom_cksum,       /*  7 KS_WLAN_GET_CONNECT */
++      ks_wlan_set_preamble,   /*  8 KS_WLAN_SET_PREAMBLE */
++      ks_wlan_get_preamble,   /*  9 KS_WLAN_GET_PREAMBLE */
++      ks_wlan_set_powermgt,   /* 10 KS_WLAN_SET_POWER_SAVE */
++      ks_wlan_get_powermgt,   /* 11 KS_WLAN_GET_POWER_SAVE */
++      ks_wlan_set_scan_type,  /* 12 KS_WLAN_SET_SCAN_TYPE */
++      ks_wlan_get_scan_type,  /* 13 KS_WLAN_GET_SCAN_TYPE */
++      ks_wlan_set_rx_gain,    /* 14 KS_WLAN_SET_RX_GAIN */
++      ks_wlan_get_rx_gain,    /* 15 KS_WLAN_GET_RX_GAIN */
++      ks_wlan_hostt,  /* 16 KS_WLAN_HOSTT */
++      NULL,   /* 17 */
++      ks_wlan_set_beacon_lost,        /* 18 KS_WLAN_SET_BECAN_LOST */
++      ks_wlan_get_beacon_lost,        /* 19 KS_WLAN_GET_BECAN_LOST */
++      ks_wlan_set_tx_gain,    /* 20 KS_WLAN_SET_TX_GAIN */
++      ks_wlan_get_tx_gain,    /* 21 KS_WLAN_GET_TX_GAIN */
++      ks_wlan_set_phy_type,   /* 22 KS_WLAN_SET_PHY_TYPE */
++      ks_wlan_get_phy_type,   /* 23 KS_WLAN_GET_PHY_TYPE */
++      ks_wlan_set_cts_mode,   /* 24 KS_WLAN_SET_CTS_MODE */
++      ks_wlan_get_cts_mode,   /* 25 KS_WLAN_GET_CTS_MODE */
++      NULL,   /* 26 */
++      NULL,   /* 27 */
++      ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
++      ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
++      NULL,   /* 30 */
++      NULL,   /* 31 */
+ };
+ static const struct iw_handler_def ks_wlan_handler_def = {
+@@ -3287,8 +3349,8 @@ static const struct iw_handler_def ks_wlan_handler_def = {
+       .num_private = sizeof(ks_wlan_private_handler) / sizeof(iw_handler),
+       .num_private_args =
+           sizeof(ks_wlan_private_args) / sizeof(struct iw_priv_args),
+-      .standard = (iw_handler *) ks_wlan_handler,
+-      .private = (iw_handler *) ks_wlan_private_handler,
++      .standard = ks_wlan_handler,
++      .private = ks_wlan_private_handler,
+       .private_args = (struct iw_priv_args *)ks_wlan_private_args,
+       .get_wireless_stats = ks_get_wireless_stats,
+ };
+@@ -3359,7 +3421,7 @@ void ks_wlan_tx_timeout(struct net_device *dev)
+ }
+ static
+-int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
++netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ks_wlan_private *priv = netdev_priv(dev);
+       int rc = 0;
+diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+index a56632b..5d236d8 100644
+--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
++++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+@@ -305,10 +305,8 @@ struct ksock_conn {
+       struct ksock_route *ksnc_route;       /* owning route */
+       struct list_head   ksnc_list;         /* stash on peer's conn list */
+       struct socket      *ksnc_sock;        /* actual socket */
+-      void               *ksnc_saved_data_ready;  /* socket's original
+-                                                   * data_ready() callback */
+-      void               *ksnc_saved_write_space; /* socket's original
+-                                                   * write_space() callback */
++      void               (*ksnc_saved_data_ready)(struct sock *sk); /* socket's original data_ready() callback */
++      void               (*ksnc_saved_write_space)(struct sock *sk); /* socket's original write_space() callback */
+       atomic_t           ksnc_conn_refcount;/* conn refcount */
+       atomic_t           ksnc_sock_refcount;/* sock refcount */
+       struct ksock_sched *ksnc_scheduler;     /* who schedules this connection
+diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
+index 13d0454..f18459d 100644
+--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
++++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
+@@ -324,7 +324,7 @@ brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
+               CERROR("BRW RPC to %s failed with %d\n",
+                      libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
+               if (!tsi->tsi_stopping) /* rpc could have been aborted */
+-                      atomic_inc(&sn->sn_brw_errors);
++                      atomic_inc_unchecked(&sn->sn_brw_errors);
+               return;
+       }
+@@ -338,7 +338,7 @@ brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
+              libcfs_id2str(rpc->crpc_dest), reply->brw_status);
+       if (reply->brw_status) {
+-              atomic_inc(&sn->sn_brw_errors);
++              atomic_inc_unchecked(&sn->sn_brw_errors);
+               rpc->crpc_status = -(int)reply->brw_status;
+               return;
+       }
+@@ -349,7 +349,7 @@ brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
+       if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) {
+               CERROR("Bulk data from %s is corrupted!\n",
+                      libcfs_id2str(rpc->crpc_dest));
+-              atomic_inc(&sn->sn_brw_errors);
++              atomic_inc_unchecked(&sn->sn_brw_errors);
+               rpc->crpc_status = -EBADMSG;
+       }
+ }
+@@ -484,14 +484,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
+       return 0;
+ }
+-struct sfw_test_client_ops brw_test_client;
+-
+-void brw_init_test_client(void)
+-{
+-      brw_test_client.tso_init = brw_client_init;
+-      brw_test_client.tso_fini = brw_client_fini;
+-      brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
+-      brw_test_client.tso_done_rpc = brw_client_done_rpc;
++struct sfw_test_client_ops brw_test_client = {
++      .tso_init = brw_client_init,
++      .tso_fini = brw_client_fini,
++      .tso_prep_rpc = brw_client_prep_rpc,
++      .tso_done_rpc = brw_client_done_rpc,
+ };
+ struct srpc_service brw_test_service;
+diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
+index c2f121f..c315572 100644
+--- a/drivers/staging/lustre/lnet/selftest/framework.c
++++ b/drivers/staging/lustre/lnet/selftest/framework.c
+@@ -262,8 +262,8 @@ sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
+       INIT_LIST_HEAD(&sn->sn_list);
+       INIT_LIST_HEAD(&sn->sn_batches);
+       atomic_set(&sn->sn_refcount, 1);        /* +1 for caller */
+-      atomic_set(&sn->sn_brw_errors, 0);
+-      atomic_set(&sn->sn_ping_errors, 0);
++      atomic_set_unchecked(&sn->sn_brw_errors, 0);
++      atomic_set_unchecked(&sn->sn_ping_errors, 0);
+       strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
+       sn->sn_timer_active = 0;
+@@ -383,8 +383,8 @@ sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
+        * with 32 bits to send, this is ~49 days
+        */
+       cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started);
+-      cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
+-      cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
++      cnt->brw_errors = atomic_read_unchecked(&sn->sn_brw_errors);
++      cnt->ping_errors = atomic_read_unchecked(&sn->sn_ping_errors);
+       cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
+       cnt->active_batches = 0;
+@@ -1655,12 +1655,10 @@ sfw_startup(void)
+       INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
+       INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
+-      brw_init_test_client();
+       brw_init_test_service();
+       rc = sfw_register_test(&brw_test_service, &brw_test_client);
+       LASSERT(!rc);
+-      ping_init_test_client();
+       ping_init_test_service();
+       rc = sfw_register_test(&ping_test_service, &ping_test_client);
+       LASSERT(!rc);
+diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
+index 9331ca4..23511db 100644
+--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
++++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
+@@ -74,7 +74,7 @@ ping_client_fini(struct sfw_test_instance *tsi)
+       LASSERT(sn);
+       LASSERT(tsi->tsi_is_client);
+-      errors = atomic_read(&sn->sn_ping_errors);
++      errors = atomic_read_unchecked(&sn->sn_ping_errors);
+       if (errors)
+               CWARN("%d pings have failed.\n", errors);
+       else
+@@ -126,7 +126,7 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
+       if (rpc->crpc_status) {
+               if (!tsi->tsi_stopping) /* rpc could have been aborted */
+-                      atomic_inc(&sn->sn_ping_errors);
++                      atomic_inc_unchecked(&sn->sn_ping_errors);
+               CERROR("Unable to ping %s (%d): %d\n",
+                      libcfs_id2str(rpc->crpc_dest),
+                      reqst->pnr_seq, rpc->crpc_status);
+@@ -141,7 +141,7 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
+       if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
+               rpc->crpc_status = -EBADMSG;
+-              atomic_inc(&sn->sn_ping_errors);
++              atomic_inc_unchecked(&sn->sn_ping_errors);
+               CERROR("Bad magic %u from %s, %u expected.\n",
+                      reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
+                      LST_PING_TEST_MAGIC);
+@@ -150,7 +150,7 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
+       if (reply->pnr_seq != reqst->pnr_seq) {
+               rpc->crpc_status = -EBADMSG;
+-              atomic_inc(&sn->sn_ping_errors);
++              atomic_inc_unchecked(&sn->sn_ping_errors);
+               CERROR("Bad seq %u from %s, %u expected.\n",
+                      reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
+                      reqst->pnr_seq);
+@@ -206,15 +206,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
+       return 0;
+ }
+-struct sfw_test_client_ops ping_test_client;
+-
+-void ping_init_test_client(void)
+-{
+-      ping_test_client.tso_init = ping_client_init;
+-      ping_test_client.tso_fini = ping_client_fini;
+-      ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
+-      ping_test_client.tso_done_rpc = ping_client_done_rpc;
+-}
++struct sfw_test_client_ops ping_test_client = {
++      .tso_init = ping_client_init,
++      .tso_fini = ping_client_fini,
++      .tso_prep_rpc = ping_client_prep_rpc,
++      .tso_done_rpc = ping_client_done_rpc,
++};
+ struct srpc_service ping_test_service;
+diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
+index d033ac0..528a102 100644
+--- a/drivers/staging/lustre/lnet/selftest/selftest.h
++++ b/drivers/staging/lustre/lnet/selftest/selftest.h
+@@ -328,8 +328,8 @@ struct sfw_session {
+       struct list_head sn_batches; /* list of batches */
+       char             sn_name[LST_NAME_SIZE];
+       atomic_t         sn_refcount;
+-      atomic_t         sn_brw_errors;
+-      atomic_t         sn_ping_errors;
++      atomic_unchecked_t sn_brw_errors;
++      atomic_unchecked_t sn_ping_errors;
+       unsigned long    sn_started;
+ };
+@@ -607,13 +607,11 @@ srpc_wait_service_shutdown(struct srpc_service *sv)
+ }
+ extern struct sfw_test_client_ops brw_test_client;
+-void brw_init_test_client(void);
+ extern struct srpc_service brw_test_service;
+ void brw_init_test_service(void);
+ extern struct sfw_test_client_ops ping_test_client;
+-void ping_init_test_client(void);
+ extern struct srpc_service ping_test_service;
+ void ping_init_test_service(void);
+diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+index 051864c..72aca9b 100644
+--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
++++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+@@ -784,7 +784,7 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid)
+       return fid_seq(fid);
+ }
+-void lustre_swab_ost_id(struct ost_id *oid);
++void lustre_swab_ost_id(void *oid);
+ /**
+  * Get inode generation from a igif.
+@@ -851,8 +851,8 @@ static inline int fid_is_zero(const struct lu_fid *fid)
+       return fid_seq(fid) == 0 && fid_oid(fid) == 0;
+ }
+-void lustre_swab_lu_fid(struct lu_fid *fid);
+-void lustre_swab_lu_seq_range(struct lu_seq_range *range);
++void lustre_swab_lu_fid(void *fid);
++void lustre_swab_lu_seq_range(void *range);
+ static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
+ {
+@@ -1157,7 +1157,7 @@ struct ptlrpc_body_v2 {
+       __u64 pb_padding[4];
+ };
+-void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
++void lustre_swab_ptlrpc_body(void *pb);
+ /* message body offset for lustre_msg_v2 */
+ /* ptlrpc body offset in all request/reply messages */
+@@ -1398,7 +1398,7 @@ struct obd_connect_data {
+  * reserve the flag for future use.
+  */
+-void lustre_swab_connect(struct obd_connect_data *ocd);
++void lustre_swab_connect(void *ocd);
+ /*
+  * Supported checksum algorithms. Up to 32 checksum types are supported.
+@@ -1752,10 +1752,10 @@ struct hsm_state_set {
+       __u64   hss_clearmask;
+ };
+-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+-void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
++void lustre_swab_hsm_user_state(void *hus);
++void lustre_swab_hsm_state_set(void *hss);
+-void lustre_swab_obd_statfs(struct obd_statfs *os);
++void lustre_swab_obd_statfs(void *os);
+ /* ost_body.data values for OST_BRW */
+@@ -1802,7 +1802,7 @@ struct obd_ioobj {
+ #define ioobj_max_brw_set(ioo, num)                                   \
+ do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
+-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
++void lustre_swab_obd_ioobj(void *ioo);
+ /* multiple of 8 bytes => can array */
+ struct niobuf_remote {
+@@ -1811,7 +1811,7 @@ struct niobuf_remote {
+       __u32 flags;
+ };
+-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
++void lustre_swab_niobuf_remote(void *nbr);
+ /* lock value block communicated between the filter and llite */
+@@ -1876,7 +1876,7 @@ struct obd_quotactl {
+       struct obd_dqblk        qc_dqblk;
+ };
+-void lustre_swab_obd_quotactl(struct obd_quotactl *q);
++void lustre_swab_obd_quotactl(void *q);
+ #define Q_QUOTACHECK  0x800100 /* deprecated as of 2.4 */
+ #define Q_INITQUOTA   0x800101 /* deprecated as of 2.4  */
+@@ -1988,7 +1988,7 @@ enum mdt_reint_cmd {
+       REINT_MAX
+ };
+-void lustre_swab_generic_32s(__u32 *val);
++void lustre_swab_generic_32s(void *val);
+ /* the disposition of the intent outlines what was executed */
+ #define DISP_IT_EXECD 0x00000001
+@@ -2147,7 +2147,7 @@ struct mdt_body {
+       __u64     padding_10;
+ }; /* 216 */
+-void lustre_swab_mdt_body(struct mdt_body *b);
++void lustre_swab_mdt_body(void *b);
+ struct mdt_ioepoch {
+       struct lustre_handle handle;
+@@ -2156,7 +2156,7 @@ struct mdt_ioepoch {
+       __u32  padding;
+ };
+-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
++void lustre_swab_mdt_ioepoch(void *b);
+ /* permissions for md_perm.mp_perm */
+ enum {
+@@ -2465,7 +2465,7 @@ struct mdt_rec_reint {
+       __u32      rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
+ };
+-void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
++void lustre_swab_mdt_rec_reint(void *rr);
+ /* lmv structures */
+ struct lmv_desc {
+@@ -2664,13 +2664,13 @@ union ldlm_gl_desc {
+       struct ldlm_gl_lquota_desc      lquota_desc;
+ };
+-void lustre_swab_gl_desc(union ldlm_gl_desc *);
++void lustre_swab_gl_desc(void *);
+ struct ldlm_intent {
+       __u64 opc;
+ };
+-void lustre_swab_ldlm_intent(struct ldlm_intent *i);
++void lustre_swab_ldlm_intent(void *i);
+ struct ldlm_resource_desc {
+       enum ldlm_type lr_type;
+@@ -2695,7 +2695,7 @@ struct ldlm_request {
+       struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
+ };
+-void lustre_swab_ldlm_request(struct ldlm_request *rq);
++void lustre_swab_ldlm_request(void *rq);
+ /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
+  * Otherwise, 2 are available.
+@@ -2718,7 +2718,7 @@ struct ldlm_reply {
+       __u64  lock_policy_res2;
+ };
+-void lustre_swab_ldlm_reply(struct ldlm_reply *r);
++void lustre_swab_ldlm_reply(void *r);
+ #define ldlm_flags_to_wire(flags)    ((__u32)(flags))
+ #define ldlm_flags_from_wire(flags)  ((__u64)(flags))
+@@ -2763,7 +2763,7 @@ struct mgs_target_info {
+       char         mti_params[MTI_PARAM_MAXLEN];
+ };
+-void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
++void lustre_swab_mgs_target_info(void *oinfo);
+ struct mgs_nidtbl_entry {
+       __u64      mne_version;    /* table version of this entry */
+@@ -2790,14 +2790,14 @@ struct mgs_config_body {
+       __u32    mcb_units;     /* # of units for bulk transfer */
+ };
+-void lustre_swab_mgs_config_body(struct mgs_config_body *body);
++void lustre_swab_mgs_config_body(void *body);
+ struct mgs_config_res {
+       __u64    mcr_offset;    /* index of last config log */
+       __u64    mcr_size;      /* size of the log */
+ };
+-void lustre_swab_mgs_config_res(struct mgs_config_res *body);
++void lustre_swab_mgs_config_res(void *body);
+ /* Config marker flags (in config log) */
+ #define CM_START       0x01
+@@ -3224,9 +3224,9 @@ struct ll_fiemap_info_key {
+       struct  ll_user_fiemap fiemap;
+ };
+-void lustre_swab_ost_body(struct ost_body *b);
+-void lustre_swab_ost_last_id(__u64 *id);
+-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
++void lustre_swab_ost_body(void *b);
++void lustre_swab_ost_last_id(void *id);
++void lustre_swab_fiemap(void *fiemap);
+ void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
+ void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
+@@ -3235,19 +3235,19 @@ void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+ void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+ /* llog_swab.c */
+-void lustre_swab_llogd_body(struct llogd_body *d);
+-void lustre_swab_llog_hdr(struct llog_log_hdr *h);
+-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
++void lustre_swab_llogd_body(void *d);
++void lustre_swab_llog_hdr(void *h);
++void lustre_swab_llogd_conn_body(void *d);
+ void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+ struct lustre_cfg;
+ void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
+ /* Functions for dumping PTLRPC fields */
+-void dump_rniobuf(struct niobuf_remote *rnb);
+-void dump_ioo(struct obd_ioobj *nb);
+-void dump_ost_body(struct ost_body *ob);
+-void dump_rcs(__u32 *rc);
++void dump_rniobuf(void *rnb);
++void dump_ioo(void *nb);
++void dump_ost_body(void *ob);
++void dump_rcs(void *rc);
+ /* security opcodes */
+ enum sec_cmd {
+@@ -3280,7 +3280,7 @@ struct lustre_capa {
+       __u8        lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
+ } __packed;
+-void lustre_swab_lustre_capa(struct lustre_capa *c);
++void lustre_swab_lustre_capa(void *c);
+ /** lustre_capa::lc_opc */
+ enum {
+@@ -3364,7 +3364,7 @@ struct layout_intent {
+       __u64 li_end;
+ };
+-void lustre_swab_layout_intent(struct layout_intent *li);
++void lustre_swab_layout_intent(void *li);
+ /**
+  * On the wire version of hsm_progress structure.
+@@ -3384,12 +3384,10 @@ struct hsm_progress_kernel {
+       __u64                   hpk_padding2;
+ } __packed;
+-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+-void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+-void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+-void lustre_swab_hsm_request(struct hsm_request *hr);
++void lustre_swab_hsm_current_action(void *action);
++void lustre_swab_hsm_progress_kernel(void *hpk);
++void lustre_swab_hsm_user_item(void *hui);
++void lustre_swab_hsm_request(void *hr);
+ /** layout swap request structure
+  * fid1 and fid2 are in mdt_body
+@@ -3398,7 +3396,7 @@ struct mdc_swap_layouts {
+       __u64      msl_flags;
+ } __packed;
+-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
++void lustre_swab_swap_layouts(void *msl);
+ struct close_data {
+       struct lustre_handle    cd_handle;
+@@ -3407,7 +3405,7 @@ struct close_data {
+       __u64                   cd_reserved[8];
+ };
+-void lustre_swab_close_data(struct close_data *data);
++void lustre_swab_close_data(void *data);
+ #endif
+ /** @} lustreidl */
+diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
+index 60051a5..76ac7a7 100644
+--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
++++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
+@@ -964,9 +964,9 @@ struct ldlm_ast_work {
+ struct ldlm_enqueue_info {
+       __u32 ei_type;   /** Type of the lock being enqueued. */
+       __u32 ei_mode;   /** Mode of the lock being enqueued. */
+-      void *ei_cb_bl;  /** blocking lock callback */
+-      void *ei_cb_cp;  /** lock completion callback */
+-      void *ei_cb_gl;  /** lock glimpse callback */
++      ldlm_blocking_callback ei_cb_bl;  /** blocking lock callback */
++      ldlm_completion_callback ei_cb_cp;  /** lock completion callback */
++      ldlm_glimpse_callback ei_cb_gl;  /** lock glimpse callback */
+       void *ei_cbdata; /** Data to be passed into callbacks. */
+ };
+@@ -1060,7 +1060,7 @@ struct ldlm_callback_suite {
+       ldlm_completion_callback lcs_completion;
+       ldlm_blocking_callback   lcs_blocking;
+       ldlm_glimpse_callback    lcs_glimpse;
+-};
++} __no_const;
+ /* ldlm_lockd.c */
+ int ldlm_get_ref(void);
+diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
+index d5debd6..ea5c42e 100644
+--- a/drivers/staging/lustre/lustre/include/lustre_net.h
++++ b/drivers/staging/lustre/lustre/include/lustre_net.h
+@@ -2641,7 +2641,7 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
+ void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
+ int lustre_msg_buflen(struct lustre_msg *m, int n);
+ int lustre_msg_bufcount(struct lustre_msg *m);
+-char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
++void *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
+ __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
+ void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
+ __u32 lustre_msg_get_flags(struct lustre_msg *msg);
+diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
+index a1bc2c4..6cd1797 100644
+--- a/drivers/staging/lustre/lustre/include/obd.h
++++ b/drivers/staging/lustre/lustre/include/obd.h
+@@ -1133,7 +1133,7 @@ struct md_ops {
+        * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
+        * wrapper function in include/linux/obd_class.h.
+        */
+-};
++} __no_const;
+ struct lsm_operations {
+       void (*lsm_free)(struct lov_stripe_md *);
+diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+index d6b61bc..3e4f655 100644
+--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
++++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+@@ -143,7 +143,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
+       int added = (mode == LCK_NL);
+       int overlaps = 0;
+       int splitted = 0;
+-      const struct ldlm_callback_suite null_cbs = { NULL };
++      const struct ldlm_callback_suite null_cbs = { };
+       CDEBUG(D_DLMTRACE,
+              "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
+diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+index af487f9..533b121 100644
+--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
++++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+@@ -1853,8 +1853,9 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
+ static int replay_lock_interpret(const struct lu_env *env,
+                                struct ptlrpc_request *req,
+-                               struct ldlm_async_args *aa, int rc)
++                               void *_aa, int rc)
+ {
++      struct ldlm_async_args *aa = _aa;
+       struct ldlm_lock     *lock;
+       struct ldlm_reply    *reply;
+       struct obd_export    *exp;
+@@ -1981,7 +1982,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
+       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+       aa = ptlrpc_req_async_args(req);
+       aa->lock_handle = body->lock_handle[0];
+-      req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
++      req->rq_interpret_reply = replay_lock_interpret;
+       ptlrpcd_add_req(req);
+       return 0;
+diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
+index 5b38177..929e628 100644
+--- a/drivers/staging/lustre/lustre/llite/dir.c
++++ b/drivers/staging/lustre/lustre/llite/dir.c
+@@ -136,7 +136,7 @@
+  */
+ /* returns the page unlocked, but with a reference */
+-static int ll_dir_filler(void *_hash, struct page *page0)
++static int ll_dir_filler(struct file *_hash, struct page *page0)
+ {
+       struct inode *inode = page0->mapping->host;
+       int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
+diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
+index 4d6d589..f0268e9 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
++++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
+@@ -478,13 +478,13 @@ struct ll_sb_info {
+       /* metadata stat-ahead */
+       unsigned int          ll_sa_max;     /* max statahead RPCs */
+-      atomic_t                  ll_sa_total;   /* statahead thread started
++      atomic_unchecked_t        ll_sa_total;   /* statahead thread started
+                                                 * count
+                                                 */
+-      atomic_t                  ll_sa_wrong;   /* statahead thread stopped for
++      atomic_unchecked_t        ll_sa_wrong;   /* statahead thread stopped for
+                                                 * low hit ratio
+                                                 */
+-      atomic_t                  ll_agl_total;  /* AGL thread started count */
++      atomic_unchecked_t        ll_agl_total;  /* AGL thread started count */
+       dev_t                     ll_sdev_orig; /* save s_dev before assign for
+                                                * clustered nfs
+diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
+index 546063e..5955697 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
++++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
+@@ -113,9 +113,9 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
+       /* metadata statahead is enabled by default */
+       sbi->ll_sa_max = LL_SA_RPC_DEF;
+-      atomic_set(&sbi->ll_sa_total, 0);
+-      atomic_set(&sbi->ll_sa_wrong, 0);
+-      atomic_set(&sbi->ll_agl_total, 0);
++      atomic_set_unchecked(&sbi->ll_sa_total, 0);
++      atomic_set_unchecked(&sbi->ll_sa_wrong, 0);
++      atomic_set_unchecked(&sbi->ll_agl_total, 0);
+       sbi->ll_flags |= LL_SBI_AGL_ENABLED;
+       sbi->ll_sb = sb;
+diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
+index e86bf3c..c2a3f39 100644
+--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
++++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
+@@ -680,9 +680,9 @@ static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
+                  "statahead total: %u\n"
+                  "statahead wrong: %u\n"
+                  "agl total: %u\n",
+-                 atomic_read(&sbi->ll_sa_total),
+-                 atomic_read(&sbi->ll_sa_wrong),
+-                 atomic_read(&sbi->ll_agl_total));
++                 atomic_read_unchecked(&sbi->ll_sa_total),
++                 atomic_read_unchecked(&sbi->ll_sa_wrong),
++                 atomic_read_unchecked(&sbi->ll_agl_total));
+       return 0;
+ }
+diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
+index c1cb6b1..62f954b 100644
+--- a/drivers/staging/lustre/lustre/llite/statahead.c
++++ b/drivers/staging/lustre/lustre/llite/statahead.c
+@@ -945,7 +945,7 @@ static int ll_agl_thread(void *arg)
+       CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
+              sai, parent);
+-      atomic_inc(&sbi->ll_agl_total);
++      atomic_inc_unchecked(&sbi->ll_agl_total);
+       spin_lock(&plli->lli_agl_lock);
+       sai->sai_agl_valid = 1;
+       if (thread_is_init(thread))
+@@ -1049,7 +1049,7 @@ static int ll_statahead_thread(void *arg)
+       if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
+               ll_start_agl(parent, sai);
+-      atomic_inc(&sbi->ll_sa_total);
++      atomic_inc_unchecked(&sbi->ll_sa_total);
+       spin_lock(&plli->lli_sa_lock);
+       if (thread_is_init(thread))
+               /* If someone else has changed the thread state
+@@ -1472,7 +1472,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+               sai->sai_miss++;
+               sai->sai_consecutive_miss++;
+               if (sa_low_hit(sai) && thread_is_running(thread)) {
+-                      atomic_inc(&sbi->ll_sa_wrong);
++                      atomic_inc_unchecked(&sbi->ll_sa_wrong);
+                       CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
+                              PFID(&lli->lli_fid), sai->sai_hit,
+                              sai->sai_miss, sai->sai_sent,
+diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
+index 12bd511..45e526d 100644
+--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
++++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
+@@ -107,9 +107,9 @@ struct lov_request_set {
+        */
+       struct obd_device               *set_obd;
+       int                             set_count;
+-      atomic_t                        set_completes;
+-      atomic_t                        set_success;
+-      atomic_t                        set_finish_checked;
++      atomic_unchecked_t              set_completes;
++      atomic_unchecked_t              set_success;
++      atomic_unchecked_t              set_finish_checked;
+       struct llog_cookie              *set_cookies;
+       int                             set_cookie_sent;
+       struct list_head                        set_list;
+diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
+index 84032a5..8aa6085 100644
+--- a/drivers/staging/lustre/lustre/lov/lov_io.c
++++ b/drivers/staging/lustre/lustre/lov/lov_io.c
+@@ -810,12 +810,32 @@ static void lov_empty_io_fini(const struct lu_env *env,
+ }
+ static void lov_empty_impossible(const struct lu_env *env,
+-                               struct cl_io_slice *ios)
++                               const struct cl_io_slice *ios)
+ {
+       LBUG();
+ }
+-#define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
++static int lov_empty_impossible2(const struct lu_env *env,
++                               const struct cl_io_slice *ios)
++{
++      LBUG();
++}
++
++static int lov_empty_impossible3(const struct lu_env *env,
++                               const struct cl_io_slice *slice,
++                               enum cl_req_type crt,
++                               struct cl_2queue *queue)
++{
++      LBUG();
++}
++
++static int lov_empty_impossible4(const struct lu_env *env,
++                               const struct cl_io_slice *slice,
++                               struct cl_page_list *queue, int from, int to,
++                               cl_commit_cbt cb)
++{
++      LBUG();
++}
+ /**
+  * An io operation vector for files without stripes.
+@@ -825,32 +845,32 @@ static const struct cl_io_operations lov_empty_io_ops = {
+               [CIT_READ] = {
+                       .cio_fini       = lov_empty_io_fini,
+ #if 0
+-                      .cio_iter_init  = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_lock       = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_start      = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_end        = LOV_EMPTY_IMPOSSIBLE
++                      .cio_iter_init  = lov_empty_impossible2,
++                      .cio_lock       = lov_empty_impossible2,
++                      .cio_start      = lov_empty_impossible2,
++                      .cio_end        = lov_empty_impossible
+ #endif
+               },
+               [CIT_WRITE] = {
+                       .cio_fini      = lov_empty_io_fini,
+-                      .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_lock      = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_start     = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_end       = LOV_EMPTY_IMPOSSIBLE
++                      .cio_iter_init = lov_empty_impossible2,
++                      .cio_lock      = lov_empty_impossible2,
++                      .cio_start     = lov_empty_impossible2,
++                      .cio_end       = lov_empty_impossible
+               },
+               [CIT_SETATTR] = {
+                       .cio_fini      = lov_empty_io_fini,
+-                      .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_lock      = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_start     = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_end       = LOV_EMPTY_IMPOSSIBLE
++                      .cio_iter_init = lov_empty_impossible2,
++                      .cio_lock      = lov_empty_impossible2,
++                      .cio_start     = lov_empty_impossible2,
++                      .cio_end       = lov_empty_impossible
+               },
+               [CIT_FAULT] = {
+                       .cio_fini      = lov_empty_io_fini,
+-                      .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_lock      = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_start     = LOV_EMPTY_IMPOSSIBLE,
+-                      .cio_end       = LOV_EMPTY_IMPOSSIBLE
++                      .cio_iter_init = lov_empty_impossible2,
++                      .cio_lock      = lov_empty_impossible2,
++                      .cio_start     = lov_empty_impossible2,
++                      .cio_end       = lov_empty_impossible
+               },
+               [CIT_FSYNC] = {
+                       .cio_fini   = lov_empty_io_fini
+@@ -859,8 +879,8 @@ static const struct cl_io_operations lov_empty_io_ops = {
+                       .cio_fini   = lov_empty_io_fini
+               }
+       },
+-      .cio_submit                    = LOV_EMPTY_IMPOSSIBLE,
+-      .cio_commit_async              = LOV_EMPTY_IMPOSSIBLE
++      .cio_submit                    = lov_empty_impossible3,
++      .cio_commit_async              = lov_empty_impossible4
+ };
+ int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
+diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
+index 9b92d55..0d56df1 100644
+--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
++++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
+@@ -1126,7 +1126,7 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
+       /* don't do attribute merge if this async op failed */
+       if (rc)
+-              atomic_set(&lovset->set_completes, 0);
++              atomic_set_unchecked(&lovset->set_completes, 0);
+       err = lov_fini_getattr_set(lovset);
+       return rc ? rc : err;
+ }
+@@ -1181,7 +1181,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
+       }
+ out:
+       if (rc)
+-              atomic_set(&lovset->set_completes, 0);
++              atomic_set_unchecked(&lovset->set_completes, 0);
+       err = lov_fini_getattr_set(lovset);
+       return rc ? rc : err;
+ }
+@@ -1193,7 +1193,7 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
+       int err;
+       if (rc)
+-              atomic_set(&lovset->set_completes, 0);
++              atomic_set_unchecked(&lovset->set_completes, 0);
+       err = lov_fini_setattr_set(lovset);
+       return rc ? rc : err;
+ }
+@@ -1255,7 +1255,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
+               int err;
+               if (rc)
+-                      atomic_set(&set->set_completes, 0);
++                      atomic_set_unchecked(&set->set_completes, 0);
+               err = lov_fini_setattr_set(set);
+               return rc ? rc : err;
+       }
+@@ -1313,7 +1313,7 @@ int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
+       int err;
+       if (rc)
+-              atomic_set(&lovset->set_completes, 0);
++              atomic_set_unchecked(&lovset->set_completes, 0);
+       err = lov_fini_statfs_set(lovset);
+       return rc ? rc : err;
+@@ -1346,7 +1346,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
+               int err;
+               if (rc)
+-                      atomic_set(&set->set_completes, 0);
++                      atomic_set_unchecked(&set->set_completes, 0);
+               err = lov_fini_statfs_set(set);
+               return rc ? rc : err;
+       }
+diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
+index 4099b51..bb809b2 100644
+--- a/drivers/staging/lustre/lustre/lov/lov_request.c
++++ b/drivers/staging/lustre/lustre/lov/lov_request.c
+@@ -41,9 +41,9 @@
+ static void lov_init_set(struct lov_request_set *set)
+ {
+       set->set_count = 0;
+-      atomic_set(&set->set_completes, 0);
+-      atomic_set(&set->set_success, 0);
+-      atomic_set(&set->set_finish_checked, 0);
++      atomic_set_unchecked(&set->set_completes, 0);
++      atomic_set_unchecked(&set->set_success, 0);
++      atomic_set_unchecked(&set->set_finish_checked, 0);
+       set->set_cookies = NULL;
+       INIT_LIST_HEAD(&set->set_list);
+       atomic_set(&set->set_refcount, 1);
+@@ -71,14 +71,14 @@ void lov_finish_set(struct lov_request_set *set)
+ static int lov_set_finished(struct lov_request_set *set, int idempotent)
+ {
+-      int completes = atomic_read(&set->set_completes);
++      int completes = atomic_read_unchecked(&set->set_completes);
+       CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count);
+       if (completes == set->set_count) {
+               if (idempotent)
+                       return 1;
+-              if (atomic_inc_return(&set->set_finish_checked) == 1)
++              if (atomic_inc_return_unchecked(&set->set_finish_checked) == 1)
+                       return 1;
+       }
+       return 0;
+@@ -90,9 +90,9 @@ static void lov_update_set(struct lov_request_set *set,
+       req->rq_complete = 1;
+       req->rq_rc = rc;
+-      atomic_inc(&set->set_completes);
++      atomic_inc_unchecked(&set->set_completes);
+       if (rc == 0)
+-              atomic_inc(&set->set_success);
++              atomic_inc_unchecked(&set->set_success);
+       wake_up(&set->set_waitq);
+ }
+@@ -192,7 +192,7 @@ static int common_attr_done(struct lov_request_set *set)
+       if (!set->set_oi->oi_oa)
+               return 0;
+-      if (!atomic_read(&set->set_success))
++      if (!atomic_read_unchecked(&set->set_success))
+               return -EIO;
+       tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+@@ -239,7 +239,7 @@ int lov_fini_getattr_set(struct lov_request_set *set)
+       if (!set)
+               return 0;
+       LASSERT(set->set_exp);
+-      if (atomic_read(&set->set_completes))
++      if (atomic_read_unchecked(&set->set_completes))
+               rc = common_attr_done(set);
+       lov_put_reqset(set);
+@@ -330,7 +330,7 @@ int lov_fini_destroy_set(struct lov_request_set *set)
+       if (!set)
+               return 0;
+       LASSERT(set->set_exp);
+-      if (atomic_read(&set->set_completes)) {
++      if (atomic_read_unchecked(&set->set_completes)) {
+               /* FIXME update qos data here */
+       }
+@@ -410,7 +410,7 @@ int lov_fini_setattr_set(struct lov_request_set *set)
+       if (!set)
+               return 0;
+       LASSERT(set->set_exp);
+-      if (atomic_read(&set->set_completes)) {
++      if (atomic_read_unchecked(&set->set_completes)) {
+               rc = common_attr_done(set);
+               /* FIXME update qos data here */
+       }
+@@ -571,9 +571,9 @@ int lov_fini_statfs_set(struct lov_request_set *set)
+       if (!set)
+               return 0;
+-      if (atomic_read(&set->set_completes)) {
++      if (atomic_read_unchecked(&set->set_completes)) {
+               rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs,
+-                                   atomic_read(&set->set_success));
++                                   atomic_read_unchecked(&set->set_success));
+       }
+       lov_put_reqset(set);
+       return rc;
+@@ -654,7 +654,7 @@ static int cb_statfs_update(void *cookie, int rc)
+       lov = &lovobd->u.lov;
+       osfs = set->set_oi->oi_osfs;
+       lov_sfs = oinfo->oi_osfs;
+-      success = atomic_read(&set->set_success);
++      success = atomic_read_unchecked(&set->set_success);
+       /* XXX: the same is done in lov_update_common_set, however
+        * lovset->set_exp is not initialized.
+        */
+@@ -682,7 +682,7 @@ out:
+       if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD &&
+           lov_set_finished(set, 0)) {
+               lov_statfs_interpret(NULL, set, set->set_count !=
+-                                   atomic_read(&set->set_success));
++                                   atomic_read_unchecked(&set->set_success));
+       }
+       return 0;
+diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+index f7b9b19..cb58105 100644
+--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
++++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+@@ -54,16 +54,20 @@ static void print_llogd_body(struct llogd_body *d)
+       CDEBUG(D_OTHER, "\tlgd_cur_offset: %#llx\n", d->lgd_cur_offset);
+ }
+-void lustre_swab_lu_fid(struct lu_fid *fid)
++void lustre_swab_lu_fid(void *_fid)
+ {
++      struct lu_fid *fid = _fid;
++
+       __swab64s(&fid->f_seq);
+       __swab32s(&fid->f_oid);
+       __swab32s(&fid->f_ver);
+ }
+ EXPORT_SYMBOL(lustre_swab_lu_fid);
+-void lustre_swab_ost_id(struct ost_id *oid)
++void lustre_swab_ost_id(void *_oid)
+ {
++      struct ost_id *oid = _oid;
++
+       if (fid_seq_is_mdt0(oid->oi.oi_seq)) {
+               __swab64s(&oid->oi.oi_id);
+               __swab64s(&oid->oi.oi_seq);
+@@ -80,8 +84,10 @@ static void lustre_swab_llog_id(struct llog_logid *log_id)
+       __swab32s(&log_id->lgl_ogen);
+ }
+-void lustre_swab_llogd_body(struct llogd_body *d)
++void lustre_swab_llogd_body(void *_d)
+ {
++      struct llogd_body *d = _d;
++
+       print_llogd_body(d);
+       lustre_swab_llog_id(&d->lgd_logid);
+       __swab32s(&d->lgd_ctxt_idx);
+@@ -94,8 +100,10 @@ void lustre_swab_llogd_body(struct llogd_body *d)
+ }
+ EXPORT_SYMBOL(lustre_swab_llogd_body);
+-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d)
++void lustre_swab_llogd_conn_body(void *_d)
+ {
++      struct llogd_conn_body *d = _d;
++
+       __swab64s(&d->lgdc_gen.mnt_cnt);
+       __swab64s(&d->lgdc_gen.conn_cnt);
+       lustre_swab_llog_id(&d->lgdc_logid);
+@@ -110,8 +118,10 @@ static void lustre_swab_ll_fid(struct ll_fid *fid)
+       __swab32s(&fid->f_type);
+ }
+-void lustre_swab_lu_seq_range(struct lu_seq_range *range)
++void lustre_swab_lu_seq_range(void *_range)
+ {
++      struct lu_seq_range *range = _range;
++
+       __swab64s(&range->lsr_start);
+       __swab64s(&range->lsr_end);
+       __swab32s(&range->lsr_index);
+@@ -290,8 +300,10 @@ static void print_llog_hdr(struct llog_log_hdr *h)
+       CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len);
+ }
+-void lustre_swab_llog_hdr(struct llog_log_hdr *h)
++void lustre_swab_llog_hdr(void *_h)
+ {
++      struct llog_log_hdr *h = _h;
++
+       print_llog_hdr(h);
+       lustre_swab_llog_rec(&h->llh_hdr);
+diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
+index 536b868..6aeaeae 100644
+--- a/drivers/staging/lustre/lustre/osc/osc_request.c
++++ b/drivers/staging/lustre/lustre/osc/osc_request.c
+@@ -208,8 +208,9 @@ static inline void osc_pack_req_body(struct ptlrpc_request *req,
+ static int osc_getattr_interpret(const struct lu_env *env,
+                                struct ptlrpc_request *req,
+-                               struct osc_async_args *aa, int rc)
++                               void *_aa, int rc)
+ {
++      struct osc_async_args *aa = _aa;
+       struct ost_body *body;
+       if (rc != 0)
+@@ -254,7 +255,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
+       osc_pack_req_body(req, oinfo);
+       ptlrpc_request_set_replen(req);
+-      req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
++      req->rq_interpret_reply = osc_getattr_interpret;
+       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+       aa = ptlrpc_req_async_args(req);
+@@ -350,8 +351,9 @@ out:
+ static int osc_setattr_interpret(const struct lu_env *env,
+                                struct ptlrpc_request *req,
+-                               struct osc_setattr_args *sa, int rc)
++                               void *_sa, int rc)
+ {
++      struct osc_setattr_args *sa = _sa;
+       struct ost_body *body;
+       if (rc != 0)
+@@ -401,8 +403,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
+               /* Do not wait for response. */
+               ptlrpcd_add_req(req);
+       } else {
+-              req->rq_interpret_reply =
+-                      (ptlrpc_interpterer_t)osc_setattr_interpret;
++              req->rq_interpret_reply = osc_setattr_interpret;
+               CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
+               sa = ptlrpc_req_async_args(req);
+@@ -545,7 +546,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+       ptlrpc_request_set_replen(req);
+-      req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
++      req->rq_interpret_reply = osc_setattr_interpret;
+       CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
+       sa = ptlrpc_req_async_args(req);
+       sa->sa_oa = oinfo->oi_oa;
+@@ -2174,8 +2175,9 @@ static int osc_enqueue_fini(struct ptlrpc_request *req,
+ static int osc_enqueue_interpret(const struct lu_env *env,
+                                struct ptlrpc_request *req,
+-                               struct osc_enqueue_args *aa, int rc)
++                               void *_aa, int rc)
+ {
++      struct osc_enqueue_args *aa = _aa;
+       struct ldlm_lock *lock;
+       struct lustre_handle *lockh = &aa->oa_lockh;
+       enum ldlm_mode mode = aa->oa_mode;
+@@ -2366,8 +2368,7 @@ no_match:
+                               aa->oa_flags = NULL;
+                       }
+-                      req->rq_interpret_reply =
+-                              (ptlrpc_interpterer_t)osc_enqueue_interpret;
++                      req->rq_interpret_reply = osc_enqueue_interpret;
+                       if (rqset == PTLRPCD_SET)
+                               ptlrpcd_add_req(req);
+                       else
+@@ -2443,8 +2444,9 @@ int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
+ static int osc_statfs_interpret(const struct lu_env *env,
+                               struct ptlrpc_request *req,
+-                              struct osc_async_args *aa, int rc)
++                              void *_aa, int rc)
+ {
++      struct osc_async_args *aa = _aa;
+       struct obd_statfs *msfs;
+       if (rc == -EBADR)
+@@ -2512,7 +2514,7 @@ static int osc_statfs_async(struct obd_export *exp,
+               req->rq_no_delay = 1;
+       }
+-      req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
++      req->rq_interpret_reply = osc_statfs_interpret;
+       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+       aa = ptlrpc_req_async_args(req);
+       aa->aa_oi = oinfo;
+diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
+index ab5d851..12e23dd 100644
+--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
++++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
+@@ -781,8 +781,8 @@ struct req_capsule;
+       .rmf_name    = (name),                                  \
+       .rmf_flags   = (flags),                                 \
+       .rmf_size    = (size),                                  \
+-      .rmf_swabber = (void (*)(void *))(swabber),             \
+-      .rmf_dumper  = (void (*)(void *))(dumper)               \
++      .rmf_swabber = (swabber),                               \
++      .rmf_dumper  = (dumper)                                 \
+ }
+ struct req_msg_field RMF_GENERIC_DATA =
+@@ -1889,8 +1889,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
+       msg = __req_msg(pill, loc);
+       LASSERT(msg);
+-      getter = (field->rmf_flags & RMF_F_STRING) ?
+-              (typeof(getter))lustre_msg_string : lustre_msg_buf;
++      getter = (field->rmf_flags & RMF_F_STRING) ?  lustre_msg_string : lustre_msg_buf;
+       if (field->rmf_flags & RMF_F_STRUCT_ARRAY) {
+               /*
+diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+index b514f18..dd4b44f 100644
+--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
++++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+@@ -689,7 +689,7 @@ int lustre_msg_bufcount(struct lustre_msg *m)
+ }
+ EXPORT_SYMBOL(lustre_msg_bufcount);
+-char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
++void *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
+ {
+       /* max_len == 0 means the string should fill the buffer */
+       char *str;
+@@ -1461,8 +1461,10 @@ EXPORT_SYMBOL(do_set_info_async);
+ /* byte flipping routines for all wire types declared in
+  * lustre_idl.h implemented here.
+  */
+-void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
++void lustre_swab_ptlrpc_body(void *_b)
+ {
++      struct ptlrpc_body *b = _b;
++
+       __swab32s(&b->pb_type);
+       __swab32s(&b->pb_version);
+       __swab32s(&b->pb_opc);
+@@ -1493,8 +1495,10 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
+ }
+ EXPORT_SYMBOL(lustre_swab_ptlrpc_body);
+-void lustre_swab_connect(struct obd_connect_data *ocd)
++void lustre_swab_connect(void *_ocd)
+ {
++      struct obd_connect_data *ocd = _ocd;
++
+       __swab64s(&ocd->ocd_connect_flags);
+       __swab32s(&ocd->ocd_version);
+       __swab32s(&ocd->ocd_grant);
+@@ -1568,8 +1572,10 @@ static void lustre_swab_obdo(struct obdo *o)
+       CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
+ }
+-void lustre_swab_obd_statfs(struct obd_statfs *os)
++void lustre_swab_obd_statfs(void *_os)
+ {
++      struct obd_statfs *os = _os;
++
+       __swab64s(&os->os_type);
+       __swab64s(&os->os_blocks);
+       __swab64s(&os->os_bfree);
+@@ -1593,42 +1599,54 @@ void lustre_swab_obd_statfs(struct obd_statfs *os)
+ }
+ EXPORT_SYMBOL(lustre_swab_obd_statfs);
+-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
++void lustre_swab_obd_ioobj(void *_ioo)
+ {
++      struct obd_ioobj *ioo = _ioo;
++
+       lustre_swab_ost_id(&ioo->ioo_oid);
+       __swab32s(&ioo->ioo_max_brw);
+       __swab32s(&ioo->ioo_bufcnt);
+ }
+ EXPORT_SYMBOL(lustre_swab_obd_ioobj);
+-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
++void lustre_swab_niobuf_remote(void *_nbr)
+ {
++      struct niobuf_remote *nbr = _nbr;
++
+       __swab64s(&nbr->offset);
+       __swab32s(&nbr->len);
+       __swab32s(&nbr->flags);
+ }
+ EXPORT_SYMBOL(lustre_swab_niobuf_remote);
+-void lustre_swab_ost_body(struct ost_body *b)
++void lustre_swab_ost_body(void *_b)
+ {
++      struct ost_body *b = _b;
++
+       lustre_swab_obdo(&b->oa);
+ }
+ EXPORT_SYMBOL(lustre_swab_ost_body);
+-void lustre_swab_ost_last_id(u64 *id)
++void lustre_swab_ost_last_id(void *_id)
+ {
++      u64 *id = _id;
++
+       __swab64s(id);
+ }
+ EXPORT_SYMBOL(lustre_swab_ost_last_id);
+-void lustre_swab_generic_32s(__u32 *val)
++void lustre_swab_generic_32s(void *_val)
+ {
++      __u32 *val = _val;
++
+       __swab32s(val);
+ }
+ EXPORT_SYMBOL(lustre_swab_generic_32s);
+-void lustre_swab_gl_desc(union ldlm_gl_desc *desc)
++void lustre_swab_gl_desc(void *_desc)
+ {
++      union ldlm_gl_desc *desc = _desc;
++
+       lustre_swab_lu_fid(&desc->lquota_desc.gl_id.qid_fid);
+       __swab64s(&desc->lquota_desc.gl_flags);
+       __swab64s(&desc->lquota_desc.gl_ver);
+@@ -1672,8 +1690,10 @@ void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
+ }
+ EXPORT_SYMBOL(lustre_swab_lquota_lvb);
+-void lustre_swab_mdt_body(struct mdt_body *b)
++void lustre_swab_mdt_body(void *_b)
+ {
++      struct mdt_body *b = _b;
++
+       lustre_swab_lu_fid(&b->fid1);
+       lustre_swab_lu_fid(&b->fid2);
+       /* handle is opaque */
+@@ -1706,8 +1726,10 @@ void lustre_swab_mdt_body(struct mdt_body *b)
+ }
+ EXPORT_SYMBOL(lustre_swab_mdt_body);
+-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
++void lustre_swab_mdt_ioepoch(void *_b)
+ {
++      struct mdt_ioepoch *b = _b;
++
+       /* handle is opaque */
+        __swab64s(&b->ioepoch);
+        __swab32s(&b->flags);
+@@ -1715,8 +1737,9 @@ void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
+ }
+ EXPORT_SYMBOL(lustre_swab_mdt_ioepoch);
+-void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
++void lustre_swab_mgs_target_info(void *_mti)
+ {
++      struct mgs_target_info *mti = _mti;
+       int i;
+       __swab32s(&mti->mti_lustre_ver);
+@@ -1754,16 +1777,20 @@ void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
+ }
+ EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
+-void lustre_swab_mgs_config_body(struct mgs_config_body *body)
++void lustre_swab_mgs_config_body(void *_body)
+ {
++      struct mgs_config_body *body = _body;
++
+       __swab64s(&body->mcb_offset);
+       __swab32s(&body->mcb_units);
+       __swab16s(&body->mcb_type);
+ }
+ EXPORT_SYMBOL(lustre_swab_mgs_config_body);
+-void lustre_swab_mgs_config_res(struct mgs_config_res *body)
++void lustre_swab_mgs_config_res(void *_body)
+ {
++      struct mgs_config_res *body = _body;
++
+       __swab64s(&body->mcr_offset);
+       __swab64s(&body->mcr_size);
+ }
+@@ -1791,8 +1818,10 @@ static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
+       CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
+ }
+-void lustre_swab_obd_quotactl(struct obd_quotactl *q)
++void lustre_swab_obd_quotactl(void *_q)
+ {
++      struct obd_quotactl *q = _q;
++
+       __swab32s(&q->qc_cmd);
+       __swab32s(&q->qc_type);
+       __swab32s(&q->qc_id);
+@@ -1820,8 +1849,9 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
+       __swab32s(&fm_extent->fe_device);
+ }
+-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
++void lustre_swab_fiemap(void *_fiemap)
+ {
++      struct ll_user_fiemap *fiemap = _fiemap;
+       int i;
+       __swab64s(&fiemap->fm_start);
+@@ -1836,8 +1866,10 @@ void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
+ }
+ EXPORT_SYMBOL(lustre_swab_fiemap);
+-void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
++void lustre_swab_mdt_rec_reint (void *_rr)
+ {
++      struct mdt_rec_reint *rr = _rr;
++
+       __swab32s(&rr->rr_opcode);
+       __swab32s(&rr->rr_cap);
+       __swab32s(&rr->rr_fsuid);
+@@ -1969,8 +2001,10 @@ static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
+       __swab32s(&d->l_flock.lfw_pid);
+ }
+-void lustre_swab_ldlm_intent(struct ldlm_intent *i)
++void lustre_swab_ldlm_intent(void *_i)
+ {
++      struct ldlm_intent *i = _i;
++
+       __swab64s(&i->opc);
+ }
+ EXPORT_SYMBOL(lustre_swab_ldlm_intent);
+@@ -1990,8 +2024,10 @@ static void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
+       lustre_swab_ldlm_policy_data(&l->l_policy_data);
+ }
+-void lustre_swab_ldlm_request(struct ldlm_request *rq)
++void lustre_swab_ldlm_request(void *_rq)
+ {
++      struct ldlm_request *rq = _rq;
++
+       __swab32s(&rq->lock_flags);
+       lustre_swab_ldlm_lock_desc(&rq->lock_desc);
+       __swab32s(&rq->lock_count);
+@@ -1999,8 +2035,10 @@ void lustre_swab_ldlm_request(struct ldlm_request *rq)
+ }
+ EXPORT_SYMBOL(lustre_swab_ldlm_request);
+-void lustre_swab_ldlm_reply(struct ldlm_reply *r)
++void lustre_swab_ldlm_reply(void *_r)
+ {
++      struct ldlm_reply *r = _r;
++
+       __swab32s(&r->lock_flags);
+       CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
+       lustre_swab_ldlm_lock_desc(&r->lock_desc);
+@@ -2011,8 +2049,10 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r)
+ EXPORT_SYMBOL(lustre_swab_ldlm_reply);
+ /* Dump functions */
+-void dump_ioo(struct obd_ioobj *ioo)
++void dump_ioo(void *_ioo)
+ {
++      struct obd_ioobj *ioo = _ioo;
++
+       CDEBUG(D_RPCTRACE,
+              "obd_ioobj: ioo_oid=" DOSTID ", ioo_max_brw=%#x, ioo_bufct=%d\n",
+              POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
+@@ -2020,8 +2060,10 @@ void dump_ioo(struct obd_ioobj *ioo)
+ }
+ EXPORT_SYMBOL(dump_ioo);
+-void dump_rniobuf(struct niobuf_remote *nb)
++void dump_rniobuf(void *_nb)
+ {
++      struct niobuf_remote *nb = _nb;
++
+       CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
+              nb->offset, nb->len, nb->flags);
+ }
+@@ -2089,14 +2131,18 @@ static void dump_obdo(struct obdo *oa)
+               CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
+ }
+-void dump_ost_body(struct ost_body *ob)
++void dump_ost_body(void *_ob)
+ {
++      struct ost_body *ob = _ob;
++
+       dump_obdo(&ob->oa);
+ }
+ EXPORT_SYMBOL(dump_ost_body);
+-void dump_rcs(__u32 *rc)
++void dump_rcs(void *_rc)
+ {
++      __u32 *rc = _rc;
++
+       CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
+ }
+ EXPORT_SYMBOL(dump_rcs);
+@@ -2173,8 +2219,10 @@ void _debug_req(struct ptlrpc_request *req,
+ }
+ EXPORT_SYMBOL(_debug_req);
+-void lustre_swab_lustre_capa(struct lustre_capa *c)
++void lustre_swab_lustre_capa(void *_c)
+ {
++      struct lustre_capa *c = _c;
++
+       lustre_swab_lu_fid(&c->lc_fid);
+       __swab64s(&c->lc_opc);
+       __swab64s(&c->lc_uid);
+@@ -2186,15 +2234,19 @@ void lustre_swab_lustre_capa(struct lustre_capa *c)
+ }
+ EXPORT_SYMBOL(lustre_swab_lustre_capa);
+-void lustre_swab_hsm_user_state(struct hsm_user_state *state)
++void lustre_swab_hsm_user_state(void *_state)
+ {
++      struct hsm_user_state *state = _state;
++
+       __swab32s(&state->hus_states);
+       __swab32s(&state->hus_archive_id);
+ }
+ EXPORT_SYMBOL(lustre_swab_hsm_user_state);
+-void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
++void lustre_swab_hsm_state_set(void *_hss)
+ {
++      struct hsm_state_set *hss = _hss;
++
+       __swab32s(&hss->hss_valid);
+       __swab64s(&hss->hss_setmask);
+       __swab64s(&hss->hss_clearmask);
+@@ -2208,23 +2260,29 @@ static void lustre_swab_hsm_extent(struct hsm_extent *extent)
+       __swab64s(&extent->length);
+ }
+-void lustre_swab_hsm_current_action(struct hsm_current_action *action)
++void lustre_swab_hsm_current_action(void *_action)
+ {
++      struct hsm_current_action *action = _action;
++
+       __swab32s(&action->hca_state);
+       __swab32s(&action->hca_action);
+       lustre_swab_hsm_extent(&action->hca_location);
+ }
+ EXPORT_SYMBOL(lustre_swab_hsm_current_action);
+-void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
++void lustre_swab_hsm_user_item(void *_hui)
+ {
++      struct hsm_user_item *hui = _hui;
++
+       lustre_swab_lu_fid(&hui->hui_fid);
+       lustre_swab_hsm_extent(&hui->hui_extent);
+ }
+ EXPORT_SYMBOL(lustre_swab_hsm_user_item);
+-void lustre_swab_layout_intent(struct layout_intent *li)
++void lustre_swab_layout_intent(void *_li)
+ {
++      struct layout_intent *li = _li;
++
+       __swab32s(&li->li_opc);
+       __swab32s(&li->li_flags);
+       __swab64s(&li->li_start);
+@@ -2232,8 +2290,10 @@ void lustre_swab_layout_intent(struct layout_intent *li)
+ }
+ EXPORT_SYMBOL(lustre_swab_layout_intent);
+-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
++void lustre_swab_hsm_progress_kernel(void *_hpk)
+ {
++      struct hsm_progress_kernel *hpk = _hpk;
++
+       lustre_swab_lu_fid(&hpk->hpk_fid);
+       __swab64s(&hpk->hpk_cookie);
+       __swab64s(&hpk->hpk_extent.offset);
+@@ -2243,8 +2303,10 @@ void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
+ }
+ EXPORT_SYMBOL(lustre_swab_hsm_progress_kernel);
+-void lustre_swab_hsm_request(struct hsm_request *hr)
++void lustre_swab_hsm_request(void *_hr)
+ {
++      struct hsm_request *hr = _hr;
++
+       __swab32s(&hr->hr_action);
+       __swab32s(&hr->hr_archive_id);
+       __swab64s(&hr->hr_flags);
+@@ -2253,14 +2315,18 @@ void lustre_swab_hsm_request(struct hsm_request *hr)
+ }
+ EXPORT_SYMBOL(lustre_swab_hsm_request);
+-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
++void lustre_swab_swap_layouts(void *_msl)
+ {
++      struct mdc_swap_layouts *msl = _msl;
++
+       __swab64s(&msl->msl_flags);
+ }
+ EXPORT_SYMBOL(lustre_swab_swap_layouts);
+-void lustre_swab_close_data(struct close_data *cd)
++void lustre_swab_close_data(void *_cd)
+ {
++      struct close_data *cd = _cd;
++
+       lustre_swab_lu_fid(&cd->cd_fid);
+       __swab64s(&cd->cd_data_version);
+ }
+diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+index 7f32b39..e24cff3 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
++++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+@@ -3978,7 +3978,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
+               _12M_RATE_, _24M_RATE_, 0xff,
+       };
+-      atomic_set(&pmlmeext->event_seq, 0);
++      atomic_set_unchecked(&pmlmeext->event_seq, 0);
+       pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
+       pmlmeext->cur_channel = padapter->registrypriv.channel;
+@@ -4171,7 +4171,7 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
+ static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
+ {
+-      u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
++      static const u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       u8 *pframe = precv_frame->rx_data;
+       if (ptable->func) {
+@@ -4190,7 +4190,7 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
+ #ifdef CONFIG_88EU_AP_MODE
+       struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ #endif /* CONFIG_88EU_AP_MODE */
+-      u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
++      static const u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       u8 *pframe = precv_frame->rx_data;
+       struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
+@@ -4215,7 +4215,7 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
+       index = GetFrameSubType(pframe) >> 4;
+-      if (index > 13) {
++      if (index > ARRAY_SIZE(mlme_sta_tbl)) {
+               RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type=%d\n", index));
+               return;
+       }
+@@ -4305,7 +4305,7 @@ void report_survey_event(struct adapter *padapter,
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct survey_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+@@ -4355,7 +4355,7 @@ void report_surveydone_event(struct adapter *padapter)
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+       psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+@@ -4399,7 +4399,7 @@ void report_join_res(struct adapter *padapter, int res)
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+       memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+@@ -4450,7 +4450,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct stadel_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+       memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+@@ -4503,7 +4503,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+       memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+index 255d6f2..52553d3 100644
+--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
++++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+@@ -30,7 +30,7 @@ int  rtl8188eu_init_recv_priv(struct adapter *padapter)
+       struct recv_buf *precvbuf;
+       tasklet_init(&precvpriv->recv_tasklet,
+-                   (void(*)(unsigned long))rtl8188eu_recv_tasklet,
++                   rtl8188eu_recv_tasklet,
+                    (unsigned long)padapter);
+       /* init recv_buf */
+diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+index ec21d8c..1c2e09c 100644
+--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
++++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+@@ -26,7 +26,7 @@ s32  rtl8188eu_init_xmit_priv(struct adapter *adapt)
+       struct xmit_priv        *pxmitpriv = &adapt->xmitpriv;
+       tasklet_init(&pxmitpriv->xmit_tasklet,
+-                   (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
++                   rtl8188eu_xmit_tasklet,
+                    (unsigned long)adapt);
+       return _SUCCESS;
+ }
+diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+index 8990748..7727f804 100644
+--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
++++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+@@ -200,17 +200,9 @@ void PHY_GetTxPowerLevel8188E(struct adapter *adapter, u32 *powerlevel);
+ void PHY_ScanOperationBackup8188E(struct adapter *Adapter, u8 Operation);
+-/*  Call after initialization */
+-void ChkFwCmdIoDone(struct adapter *adapter);
+-
+ /*  BB/MAC/RF other monitor API */
+ void PHY_SetRFPathSwitch_8188E(struct adapter *adapter,       bool main);
+-void PHY_SwitchEphyParameter(struct adapter *adapter);
+-
+-void PHY_EnableHostClkReq(struct adapter *adapter);
+-
+-bool SetAntennaConfig92C(struct adapter *adapter, u8 defaultant);
+ /*--------------------------Exported Function prototype---------------------*/
+diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
+index eaf939b..356437b 100644
+--- a/drivers/staging/rtl8188eu/include/hal_intf.h
++++ b/drivers/staging/rtl8188eu/include/hal_intf.h
+@@ -212,7 +212,7 @@ struct hal_ops {
+       void (*hal_notch_filter)(struct adapter *adapter, bool enable);
+       void (*hal_reset_security_engine)(struct adapter *adapter);
+-};
++} __no_const;
+ enum rt_eeprom_type {
+       EEPROM_93C46,
+@@ -246,7 +246,6 @@ void rtw_hal_sw_led_deinit(struct adapter *padapter);
+ u32 rtw_hal_power_on(struct adapter *padapter);
+ uint rtw_hal_init(struct adapter *padapter);
+ uint rtw_hal_deinit(struct adapter *padapter);
+-void rtw_hal_stop(struct adapter *padapter);
+ void rtw_hal_set_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+ void rtw_hal_get_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+@@ -275,8 +274,6 @@ void       rtw_hal_free_recv_priv(struct adapter *padapter);
+ void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
+ void  rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg, u8 level);
+-void  rtw_hal_clone_data(struct adapter *dst_adapt,
+-                         struct adapter *src_adapt);
+ void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
+diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
+index 9e5fe17..bdb77bb 100644
+--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
++++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
+@@ -70,7 +70,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm);
+ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
+ void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm);
+ void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm);
+-void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext);
++void odm_SwAntDivChkAntSwitchCallback(unsigned long FunctionContext);
+ void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
+ void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
+diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
+index cad3158..a1ca486 100644
+--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
++++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
+@@ -30,7 +30,6 @@ void rtw_recv_returnpacket(struct  net_device *cnxt, struct sk_buff *retpkt);
+ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
+-int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+ void rtw_free_recv_priv(struct recv_priv *precvpriv);
+ void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
+diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+index 54048bc..e86fdf4 100644
+--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
++++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+@@ -54,7 +54,7 @@ enum rx_packet_type {
+ s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
+ void rtl8188eu_free_recv_priv(struct adapter *padapter);
+ void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
+-void rtl8188eu_recv_tasklet(void *priv);
++void rtl8188eu_recv_tasklet(unsigned long _priv);
+ void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
+ void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe);
+ void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
+diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+index 65a63df..171cfed 100644
+--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
++++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+@@ -158,7 +158,7 @@ s32 rtl8188eu_hal_xmit(struct adapter *padapter, struct xmit_frame *frame);
+ s32 rtl8188eu_mgnt_xmit(struct adapter *padapter, struct xmit_frame *frame);
+ s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
+ #define hal_xmit_handler rtl8188eu_xmit_buf_handler
+-void rtl8188eu_xmit_tasklet(void *priv);
++void rtl8188eu_xmit_tasklet(unsigned long _priv);
+ s32 rtl8188eu_xmitframe_complete(struct adapter *padapter,
+                                struct xmit_priv *pxmitpriv,
+                                struct xmit_buf *pxmitbuf);
+diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
+index 08ca592..0eeed5d 100644
+--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
++++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
+@@ -368,7 +368,6 @@ void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+ void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+ void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm);
+-void rtw_getrttbl_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+ struct _cmd_callback {
+       u32     cmd_code;
+diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+index 5dd7384..337cc49 100644
+--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
++++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+@@ -116,10 +116,4 @@ struct eeprom_priv {
+       u8              efuse_eeprom_data[HWSET_MAX_SIZE_512];
+ };
+-void eeprom_write16(struct adapter *padapter, u16 reg, u16 data);
+-u16 eeprom_read16(struct adapter *padapter, u16 reg);
+-void read_eeprom_content(struct adapter *padapter);
+-void eeprom_read_sz(struct adapter *adapt, u16 reg, u8 *data, u32 sz);
+-void read_eeprom_content_by_attrib(struct adapter *padapter);
+-
+ #endif  /* __RTL871X_EEPROM_H__ */
+diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+index 3a652df..4b3ac6b 100644
+--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
++++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+@@ -103,13 +103,4 @@ static int oid_null_function(struct oid_par_priv *poid_par_priv) {
+ extern struct iw_handler_def  rtw_handlers_def;
+-int drv_query_info(struct  net_device *miniportadaptercontext, NDIS_OID oid,
+-                 void *informationbuffer, u32 informationbufferlength,
+-                 u32 *byteswritten, u32 *bytesneeded);
+-
+-int drv_set_info(struct  net_device *MiniportAdapterContext,
+-               NDIS_OID oid, void *informationbuffer,
+-               u32 informationbufferlength, u32 *bytesread,
+-               u32 *bytesneeded);
+-
+ #endif /*  #ifndef __INC_CEINFO_ */
+diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+index 27382ff..851aeb0 100644
+--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
++++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+@@ -404,7 +404,7 @@ struct p2p_oper_class_map {
+ struct mlme_ext_priv {
+       struct adapter  *padapter;
+       u8      mlmeext_init;
+-      atomic_t        event_seq;
++      atomic_unchecked_t      event_seq;
+       u16     mgnt_seq;
+       unsigned char   cur_channel;
+@@ -550,8 +550,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *addr,
+ void beacon_timing_control(struct adapter *padapter);
+ u8 set_tx_beacon_cmd(struct adapter *padapter);
+-unsigned int setup_beacon_frame(struct adapter *padapter,
+-                              unsigned char *beacon_frame);
+ void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
+ void update_mgntframe_attrib(struct adapter *padapter,
+                            struct pkt_attrib *pattrib);
+@@ -599,12 +597,6 @@ struct cmd_hdl {
+       u8 (*h2cfuns)(struct adapter  *padapter, u8 *pbuf);
+ };
+-u8 read_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+-u8 write_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+-u8 read_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+-u8 write_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+-u8 read_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+-u8 write_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+ u8 disconnect_hdl(struct adapter *padapter, u8 *pbuf);
+ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf);
+@@ -613,8 +605,6 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+ u8 setauth_hdl(struct adapter *padapter, u8 *pbuf);
+ u8 setkey_hdl(struct adapter *padapter, u8 *pbuf);
+ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf);
+-u8 set_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+-u8 del_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+ u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf);
+ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf);
+diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
+index f96ca6a..104d496 100644
+--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
++++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
+@@ -35,7 +35,7 @@ struct sta_xmit_priv;
+ struct xmit_frame;
+ struct xmit_buf;
+-int rtw_xmit_entry(struct sk_buff *pkt, struct  net_device *pnetdev);
++netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct  net_device *pnetdev);
+ void rtw_os_xmit_schedule(struct adapter *padapter);
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+index ce1e1a1..315c3e1 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+@@ -810,10 +810,10 @@ void usb_write_port_cancel(struct adapter *padapter)
+       }
+ }
+-void rtl8188eu_recv_tasklet(void *priv)
++void rtl8188eu_recv_tasklet(unsigned long priv)
+ {
+       struct sk_buff *pskb;
+-      struct adapter *adapt = priv;
++      struct adapter *adapt = (struct adapter *)priv;
+       struct recv_priv *precvpriv = &adapt->recvpriv;
+       while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
+@@ -829,10 +829,10 @@ void rtl8188eu_recv_tasklet(void *priv)
+       }
+ }
+-void rtl8188eu_xmit_tasklet(void *priv)
++void rtl8188eu_xmit_tasklet(unsigned long priv)
+ {
+       int ret = false;
+-      struct adapter *adapt = priv;
++      struct adapter *adapt = (struct adapter *)priv;
+       struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+       if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
+diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+index 221e275..bc552c9 100644
+--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+@@ -208,7 +208,7 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
+ }
+-int rtw_xmit_entry(struct sk_buff *pkt, struct  net_device *pnetdev)
++netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct  net_device *pnetdev)
+ {
+       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+index 13a5ddc..8a876d9 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+@@ -84,7 +84,7 @@ static struct pci_driver rtl8192_pci_driver = {
+ };
+ static short _rtl92e_is_tx_queue_empty(struct net_device *dev);
+-static void _rtl92e_watchdog_wq_cb(void *data);
++static void _rtl92e_watchdog_wq_cb(struct work_struct *data);
+ static void _rtl92e_watchdog_timer_cb(unsigned long data);
+ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+                                  int rate);
+@@ -92,13 +92,13 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb);
+ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb);
+ static short _rtl92e_pci_initdescring(struct net_device *dev);
+-static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv);
+-static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv);
++static void _rtl92e_irq_tx_tasklet(unsigned long priv);
++static void _rtl92e_irq_rx_tasklet(unsigned long priv);
+ static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv);
+ static int _rtl92e_up(struct net_device *dev, bool is_silent_reset);
+ static int _rtl92e_try_up(struct net_device *dev);
+ static int _rtl92e_down(struct net_device *dev, bool shutdownrf);
+-static void _rtl92e_restart(void *data);
++static void _rtl92e_restart(struct work_struct *data);
+ /****************************************************************************
+    -----------------------------IO STUFF-------------------------
+@@ -375,7 +375,7 @@ static struct rtllib_qos_parameters def_qos_parameters = {
+       {0, 0, 0, 0}
+ };
+-static void _rtl92e_update_beacon(void *data)
++static void _rtl92e_update_beacon(struct work_struct *data)
+ {
+       struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
+                                 update_beacon_wq.work);
+@@ -391,7 +391,7 @@ static void _rtl92e_update_beacon(void *data)
+       _rtl92e_update_cap(dev, net->capability);
+ }
+-static void _rtl92e_qos_activate(void *data)
++static void _rtl92e_qos_activate(struct work_struct *data)
+ {
+       struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
+                                 qos_activate);
+@@ -527,8 +527,9 @@ static int _rtl92e_handle_assoc_response(struct net_device *dev,
+       return 0;
+ }
+-static void _rtl92e_prepare_beacon(struct r8192_priv *priv)
++static void _rtl92e_prepare_beacon(unsigned long _priv)
+ {
++      struct r8192_priv *priv = (struct r8192_priv *)_priv;
+       struct net_device *dev = priv->rtllib->dev;
+       struct sk_buff *pskb = NULL, *pnewskb = NULL;
+       struct cb_desc *tcb_desc = NULL;
+@@ -1002,30 +1003,30 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
+ {
+       struct r8192_priv *priv = rtllib_priv(dev);
+-      INIT_WORK_RSL(&priv->reset_wq, (void *)_rtl92e_restart, dev);
+-      INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)rtl92e_ips_leave_wq,
++      INIT_WORK_RSL(&priv->reset_wq, _rtl92e_restart, dev);
++      INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, rtl92e_ips_leave_wq,
+                     dev);
+       INIT_DELAYED_WORK_RSL(&priv->watch_dog_wq,
+-                            (void *)_rtl92e_watchdog_wq_cb, dev);
++                            _rtl92e_watchdog_wq_cb, dev);
+       INIT_DELAYED_WORK_RSL(&priv->txpower_tracking_wq,
+-                            (void *)rtl92e_dm_txpower_tracking_wq, dev);
++                            rtl92e_dm_txpower_tracking_wq, dev);
+       INIT_DELAYED_WORK_RSL(&priv->rfpath_check_wq,
+-                            (void *)rtl92e_dm_rf_pathcheck_wq, dev);
++                            rtl92e_dm_rf_pathcheck_wq, dev);
+       INIT_DELAYED_WORK_RSL(&priv->update_beacon_wq,
+-                            (void *)_rtl92e_update_beacon, dev);
+-      INIT_WORK_RSL(&priv->qos_activate, (void *)_rtl92e_qos_activate, dev);
++                            _rtl92e_update_beacon, dev);
++      INIT_WORK_RSL(&priv->qos_activate, _rtl92e_qos_activate, dev);
+       INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_wakeup_wq,
+-                            (void *) rtl92e_hw_wakeup_wq, dev);
++                            rtl92e_hw_wakeup_wq, dev);
+       INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
+-                            (void *) rtl92e_hw_sleep_wq, dev);
++                            rtl92e_hw_sleep_wq, dev);
+       tasklet_init(&priv->irq_rx_tasklet,
+-                   (void(*)(unsigned long))_rtl92e_irq_rx_tasklet,
++                   _rtl92e_irq_rx_tasklet,
+                    (unsigned long)priv);
+       tasklet_init(&priv->irq_tx_tasklet,
+-                   (void(*)(unsigned long))_rtl92e_irq_tx_tasklet,
++                   _rtl92e_irq_tx_tasklet,
+                    (unsigned long)priv);
+       tasklet_init(&priv->irq_prepare_beacon_tasklet,
+-                   (void(*)(unsigned long))_rtl92e_prepare_beacon,
++                   _rtl92e_prepare_beacon,
+                    (unsigned long)priv);
+ }
+@@ -1377,7 +1378,7 @@ static void _rtl92e_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
+       }
+ }
+-static void _rtl92e_watchdog_wq_cb(void *data)
++static void _rtl92e_watchdog_wq_cb(struct work_struct *data)
+ {
+       struct r8192_priv *priv = container_of_dwork_rsl(data,
+                                 struct r8192_priv, watch_dog_wq);
+@@ -2142,13 +2143,15 @@ static void _rtl92e_tx_resume(struct net_device *dev)
+       }
+ }
+-static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv)
++static void _rtl92e_irq_tx_tasklet(unsigned long _priv)
+ {
++      struct r8192_priv *priv = (struct r8192_priv *)_priv;
+       _rtl92e_tx_resume(priv->rtllib->dev);
+ }
+-static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv)
++static void _rtl92e_irq_rx_tasklet(unsigned long _priv)
+ {
++      struct r8192_priv *priv= (struct r8192_priv *)_priv;
+       _rtl92e_rx_normal(priv->rtllib->dev);
+       rtl92e_writel(priv->rtllib->dev, INTA_MASK,
+@@ -2236,7 +2239,7 @@ void rtl92e_commit(struct net_device *dev)
+       _rtl92e_up(dev, false);
+ }
+-static void _rtl92e_restart(void *data)
++static void _rtl92e_restart(struct work_struct *data)
+ {
+       struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
+                                 reset_wq);
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+index f627fdc..3ad70fb 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+@@ -586,12 +586,12 @@ void force_pci_posting(struct net_device *dev);
+ void rtl92e_rx_enable(struct net_device *);
+ void rtl92e_tx_enable(struct net_device *);
+-void rtl92e_hw_sleep_wq(void *data);
++void rtl92e_hw_sleep_wq(struct work_struct *data);
+ void rtl92e_commit(struct net_device *dev);
+ void rtl92e_check_rfctrl_gpio_timer(unsigned long data);
+-void rtl92e_hw_wakeup_wq(void *data);
++void rtl92e_hw_wakeup_wq(struct work_struct *data);
+ void rtl92e_reset_desc_ring(struct net_device *dev);
+ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode);
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+index 9bc2848..17ccbf7 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+@@ -195,7 +195,7 @@ static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
+ static        void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
+ static  void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
+ static void _rtl92e_dm_check_fsync(struct net_device *dev);
+-static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
++static void _rtl92e_dm_check_rf_ctrl_gpio(struct work_struct *data);
+ static void _rtl92e_dm_fsync_timer_callback(unsigned long data);
+ /*---------------------Define local function prototype-----------------------*/
+@@ -229,7 +229,7 @@ void rtl92e_dm_init(struct net_device *dev)
+               _rtl92e_dm_init_wa_broadcom_iot(dev);
+       INIT_DELAYED_WORK_RSL(&priv->gpio_change_rf_wq,
+-                            (void *)_rtl92e_dm_check_rf_ctrl_gpio, dev);
++                            _rtl92e_dm_check_rf_ctrl_gpio, dev);
+ }
+ void rtl92e_dm_deinit(struct net_device *dev)
+@@ -932,7 +932,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
+       priv->txpower_count = 0;
+ }
+-void rtl92e_dm_txpower_tracking_wq(void *data)
++void rtl92e_dm_txpower_tracking_wq(struct work_struct *data)
+ {
+       struct r8192_priv *priv = container_of_dwork_rsl(data,
+                                 struct r8192_priv, txpower_tracking_wq);
+@@ -1814,7 +1814,7 @@ static void _rtl92e_dm_init_wa_broadcom_iot(struct net_device *dev)
+       pHTInfo->WAIotTH = WAIotTHVal;
+ }
+-static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
++static void _rtl92e_dm_check_rf_ctrl_gpio(struct work_struct *data)
+ {
+       struct r8192_priv *priv = container_of_dwork_rsl(data,
+                                 struct r8192_priv, gpio_change_rf_wq);
+@@ -1868,7 +1868,7 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
+       }
+ }
+-void rtl92e_dm_rf_pathcheck_wq(void *data)
++void rtl92e_dm_rf_pathcheck_wq(struct work_struct *data)
+ {
+       struct r8192_priv *priv = container_of_dwork_rsl(data,
+                                 struct r8192_priv,
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+index 756a0dd..d2de5e8 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+@@ -191,13 +191,13 @@ void rtl92e_dm_watchdog(struct net_device *dev);
+ void    rtl92e_init_adaptive_rate(struct net_device *dev);
+-void    rtl92e_dm_txpower_tracking_wq(void *data);
++void    rtl92e_dm_txpower_tracking_wq(struct work_struct *data);
+ void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
+ void    rtl92e_dm_restore_state(struct net_device *dev);
+ void    rtl92e_dm_backup_state(struct net_device *dev);
+ void    rtl92e_dm_init_edca_turbo(struct net_device *dev);
+-void    rtl92e_dm_rf_pathcheck_wq(void *data);
++void    rtl92e_dm_rf_pathcheck_wq(struct work_struct *data);
+ void rtl92e_dm_init_txpower_tracking(struct net_device *dev);
+ #endif        /*__R8192UDM_H__ */
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+index 98e4d88..5216a5f 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+@@ -44,7 +44,7 @@ static void _rtl92e_hw_sleep(struct net_device *dev)
+       rtl92e_set_rf_state(dev, eRfSleep, RF_CHANGE_BY_PS);
+ }
+-void rtl92e_hw_sleep_wq(void *data)
++void rtl92e_hw_sleep_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_dwork_rsl(data,
+                                    struct rtllib_device, hw_sleep_wq);
+@@ -72,7 +72,7 @@ void rtl92e_hw_wakeup(struct net_device *dev)
+       rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_PS);
+ }
+-void rtl92e_hw_wakeup_wq(void *data)
++void rtl92e_hw_wakeup_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_dwork_rsl(data,
+                                    struct rtllib_device, hw_wakeup_wq);
+@@ -172,7 +172,7 @@ void rtl92e_ips_leave(struct net_device *dev)
+       }
+ }
+-void rtl92e_ips_leave_wq(void *data)
++void rtl92e_ips_leave_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_work_rsl(data,
+                                    struct rtllib_device, ips_leave_wq);
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
+index a46f4cf..8f46fda 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
+@@ -24,6 +24,7 @@
+ #include <linux/types.h>
+ struct net_device;
++struct work_struct;
+ #define RT_CHECK_FOR_HANG_PERIOD 2
+@@ -31,7 +32,7 @@ void rtl92e_hw_wakeup(struct net_device *dev);
+ void rtl92e_enter_sleep(struct net_device *dev, u64 time);
+ void rtl92e_rtllib_ips_leave_wq(struct net_device *dev);
+ void rtl92e_rtllib_ips_leave(struct net_device *dev);
+-void rtl92e_ips_leave_wq(void *data);
++void rtl92e_ips_leave_wq(struct work_struct *data);
+ void rtl92e_ips_enter(struct net_device *dev);
+ void rtl92e_ips_leave(struct net_device *dev);
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+index 70df6a1..21c9f2e 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+@@ -1187,30 +1187,30 @@ static const struct iw_priv_args r8192_private_args[] = {
+ };
+ static iw_handler r8192_private_handler[] = {
+-      (iw_handler)_rtl92e_wx_set_debug,   /*SIOCIWSECONDPRIV*/
+-      (iw_handler)_rtl92e_wx_set_scan_type,
+-      (iw_handler)_rtl92e_wx_set_rawtx,
+-      (iw_handler)_rtl92e_wx_force_reset,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)_rtl92e_wx_adapter_power_status,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)_rtl92e_wx_set_lps_awake_interval,
+-      (iw_handler)_rtl92e_wx_set_force_lps,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)NULL,
+-      (iw_handler)_rtl92e_wx_set_promisc_mode,
+-      (iw_handler)_rtl92e_wx_get_promisc_mode,
++      _rtl92e_wx_set_debug,   /*SIOCIWSECONDPRIV*/
++      _rtl92e_wx_set_scan_type,
++      _rtl92e_wx_set_rawtx,
++      _rtl92e_wx_force_reset,
++      NULL,
++      NULL,
++      _rtl92e_wx_adapter_power_status,
++      NULL,
++      NULL,
++      NULL,
++      _rtl92e_wx_set_lps_awake_interval,
++      _rtl92e_wx_set_force_lps,
++      NULL,
++      NULL,
++      NULL,
++      NULL,
++      NULL,
++      NULL,
++      NULL,
++      NULL,
++      NULL,
++      NULL,
++      _rtl92e_wx_set_promisc_mode,
++      _rtl92e_wx_get_promisc_mode,
+ };
+ static struct iw_statistics *_rtl92e_get_wireless_stats(struct net_device *dev)
+diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
+index 776e179..5a021e6 100644
+--- a/drivers/staging/rtl8192e/rtllib.h
++++ b/drivers/staging/rtl8192e/rtllib.h
+@@ -1993,7 +1993,7 @@ int rtllib_encrypt_fragment(
+       struct sk_buff *frag,
+       int hdr_len);
+-int rtllib_xmit(struct sk_buff *skb,  struct net_device *dev);
++netdev_tx_t rtllib_xmit(struct sk_buff *skb,  struct net_device *dev);
+ void rtllib_txb_free(struct rtllib_txb *);
+ /* rtllib_rx.c */
+@@ -2107,7 +2107,7 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
+ int rtllib_wx_get_freq(struct rtllib_device *ieee, struct iw_request_info *a,
+                      union iwreq_data *wrqu, char *b);
+-void rtllib_wx_sync_scan_wq(void *data);
++void rtllib_wx_sync_scan_wq(struct work_struct *data);
+ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
+                       struct iw_request_info *info,
+diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
+index 62154e3..bf1e431 100644
+--- a/drivers/staging/rtl8192e/rtllib_softmac.c
++++ b/drivers/staging/rtl8192e/rtllib_softmac.c
+@@ -574,7 +574,7 @@ out:
+       wireless_send_event(ieee->dev, SIOCGIWSCAN, &wrqu, NULL);
+ }
+-static void rtllib_softmac_scan_wq(void *data)
++static void rtllib_softmac_scan_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_dwork_rsl(data,
+                                    struct rtllib_device, softmac_scan_wq);
+@@ -1513,7 +1513,7 @@ static void rtllib_associate_step2(struct rtllib_device *ieee)
+       }
+ }
+-static void rtllib_associate_complete_wq(void *data)
++static void rtllib_associate_complete_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = (struct rtllib_device *)
+                                    container_of_work_rsl(data,
+@@ -1582,7 +1582,7 @@ static void rtllib_associate_complete(struct rtllib_device *ieee)
+       schedule_work(&ieee->associate_complete_wq);
+ }
+-static void rtllib_associate_procedure_wq(void *data)
++static void rtllib_associate_procedure_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_dwork_rsl(data,
+                                    struct rtllib_device,
+@@ -2054,8 +2054,9 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
+ }
+-static inline void rtllib_sta_ps(struct rtllib_device *ieee)
++static inline void rtllib_sta_ps(unsigned long _ieee)
+ {
++      struct rtllib_device *ieee = (struct rtllib_device *)_ieee;
+       u64 time;
+       short sleep;
+       unsigned long flags, flags2;
+@@ -2576,7 +2577,7 @@ static void rtllib_start_monitor_mode(struct rtllib_device *ieee)
+       }
+ }
+-static void rtllib_start_ibss_wq(void *data)
++static void rtllib_start_ibss_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_dwork_rsl(data,
+                                    struct rtllib_device, start_ibss_wq);
+@@ -2741,7 +2742,7 @@ static void rtllib_start_bss(struct rtllib_device *ieee)
+       spin_unlock_irqrestore(&ieee->lock, flags);
+ }
+-static void rtllib_link_change_wq(void *data)
++static void rtllib_link_change_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_dwork_rsl(data,
+                                    struct rtllib_device, link_change_wq);
+@@ -2767,7 +2768,7 @@ void rtllib_disassociate(struct rtllib_device *ieee)
+       notify_wx_assoc_event(ieee);
+ }
+-static void rtllib_associate_retry_wq(void *data)
++static void rtllib_associate_retry_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_dwork_rsl(data,
+                                    struct rtllib_device, associate_retry_wq);
+@@ -3020,19 +3021,18 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
+                   (unsigned long) ieee);
+       INIT_DELAYED_WORK_RSL(&ieee->link_change_wq,
+-                            (void *)rtllib_link_change_wq, ieee);
++                            rtllib_link_change_wq, ieee);
+       INIT_DELAYED_WORK_RSL(&ieee->start_ibss_wq,
+-                            (void *)rtllib_start_ibss_wq, ieee);
++                            rtllib_start_ibss_wq, ieee);
+       INIT_WORK_RSL(&ieee->associate_complete_wq,
+-                    (void *)rtllib_associate_complete_wq, ieee);
++                            rtllib_associate_complete_wq, ieee);
+       INIT_DELAYED_WORK_RSL(&ieee->associate_procedure_wq,
+-                            (void *)rtllib_associate_procedure_wq, ieee);
++                            rtllib_associate_procedure_wq, ieee);
+       INIT_DELAYED_WORK_RSL(&ieee->softmac_scan_wq,
+-                            (void *)rtllib_softmac_scan_wq, ieee);
++                            rtllib_softmac_scan_wq, ieee);
+       INIT_DELAYED_WORK_RSL(&ieee->associate_retry_wq,
+-                            (void *)rtllib_associate_retry_wq, ieee);
+-      INIT_WORK_RSL(&ieee->wx_sync_scan_wq, (void *)rtllib_wx_sync_scan_wq,
+-                    ieee);
++                            rtllib_associate_retry_wq, ieee);
++      INIT_WORK_RSL(&ieee->wx_sync_scan_wq, rtllib_wx_sync_scan_wq, ieee);
+       sema_init(&ieee->wx_sem, 1);
+       sema_init(&ieee->scan_sem, 1);
+@@ -3042,7 +3042,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
+       spin_lock_init(&ieee->beacon_lock);
+       tasklet_init(&ieee->ps_task,
+-           (void(*)(unsigned long)) rtllib_sta_ps,
++           rtllib_sta_ps,
+            (unsigned long)ieee);
+ }
+diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+index 61ed8b0..a8b7d01 100644
+--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
++++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+@@ -327,7 +327,7 @@ out:
+ }
+ EXPORT_SYMBOL(rtllib_wx_set_mode);
+-void rtllib_wx_sync_scan_wq(void *data)
++void rtllib_wx_sync_scan_wq(struct work_struct *data)
+ {
+       struct rtllib_device *ieee = container_of_work_rsl(data,
+                                    struct rtllib_device, wx_sync_scan_wq);
+diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
+index 58fc70e..3fe041e 100644
+--- a/drivers/staging/rtl8192e/rtllib_tx.c
++++ b/drivers/staging/rtl8192e/rtllib_tx.c
+@@ -981,7 +981,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
+       return 1;
+ }
+-int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
++netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       memset(skb->cb, 0, sizeof(skb->cb));
+       return rtllib_xmit_inter(skb, dev);
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+index 09e9499..dc65c79 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+@@ -2174,7 +2174,7 @@ int ieee80211_set_encryption(struct ieee80211_device *ieee);
+ int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
+                              struct sk_buff *frag, int hdr_len);
+-int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
++netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
+ void ieee80211_txb_free(struct ieee80211_txb *);
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+index 49db1b7..8e1b69a 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+@@ -1765,9 +1765,9 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
+ }
+-static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
++static inline void ieee80211_sta_ps(unsigned long _ieee)
+ {
+-
++      struct ieee80211_device *ieee = (struct ieee80211_device *)_ieee;
+       u32 th, tl;
+       short sleep;
+@@ -2735,7 +2735,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
+       spin_lock_init(&ieee->beacon_lock);
+       tasklet_init(&ieee->ps_task,
+-           (void(*)(unsigned long)) ieee80211_sta_ps,
++           ieee80211_sta_ps,
+            (unsigned long)ieee);
+ }
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+index 1ab0aea..41de55c 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+@@ -594,7 +594,7 @@ static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
+       }
+ }
+-int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
++netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ieee80211_device *ieee = netdev_priv(dev);
+       struct ieee80211_txb *txb = NULL;
+diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
+index dd0970f..7fa0bdf 100644
+--- a/drivers/staging/rtl8192u/r8192U_core.c
++++ b/drivers/staging/rtl8192u/r8192U_core.c
+@@ -2382,7 +2382,7 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
+ static void rtl819x_watchdog_wqcallback(struct work_struct *work);
+-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
++static void rtl8192_irq_rx_tasklet(unsigned long priv);
+ /* init tasklet and wait_queue here. only 2.6 above kernel is considered */
+ #define DRV_NAME "wlan0"
+ static void rtl8192_init_priv_task(struct net_device *dev)
+@@ -2405,7 +2405,7 @@ static void rtl8192_init_priv_task(struct net_device *dev)
+       INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
+       tasklet_init(&priv->irq_rx_tasklet,
+-                   (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
++                   rtl8192_irq_rx_tasklet,
+                    (unsigned long)priv);
+ }
+@@ -4942,8 +4942,9 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
+       }
+ }
+-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
++static void rtl8192_irq_rx_tasklet(unsigned long _priv)
+ {
++      struct r8192_priv *priv = (struct r8192_priv *)_priv;
+       struct sk_buff *skb;
+       struct rtl8192_rx_info *info;
+diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
+index f25b34c..487a963 100644
+--- a/drivers/staging/rtl8712/rtl8712_recv.c
++++ b/drivers/staging/rtl8712/rtl8712_recv.c
+@@ -45,7 +45,7 @@ static u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
+ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+ static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+-static void recv_tasklet(void *priv);
++static void recv_tasklet(unsigned long _priv);
+ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
+ {
+@@ -79,7 +79,7 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
+       }
+       precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
+       tasklet_init(&precvpriv->recv_tasklet,
+-           (void(*)(unsigned long))recv_tasklet,
++           recv_tasklet,
+            (unsigned long)padapter);
+       skb_queue_head_init(&precvpriv->rx_skb_queue);
+@@ -1103,7 +1103,7 @@ _exit_recvbuf2recvframe:
+       return _SUCCESS;
+ }
+-static void recv_tasklet(void *priv)
++static void recv_tasklet(unsigned long priv)
+ {
+       struct sk_buff *pskb;
+       struct _adapter *padapter = (struct _adapter *)priv;
+diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
+index 26dd24c..2eb37c9 100644
+--- a/drivers/staging/rtl8712/rtl871x_io.h
++++ b/drivers/staging/rtl8712/rtl871x_io.h
+@@ -108,7 +108,7 @@ struct     _io_ops {
+                         u8 *pmem);
+       u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+                          u8 *pmem);
+-};
++} __no_const;
+ struct io_req {
+       struct list_head list;
+diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h
+index c9218be..ecda3f6 100644
+--- a/drivers/staging/rtl8712/rtl871x_ioctl.h
++++ b/drivers/staging/rtl8712/rtl871x_ioctl.h
+@@ -76,18 +76,4 @@ uint oid_null_function(struct oid_par_priv *poid_par_priv);
+ extern struct iw_handler_def  r871x_handlers_def;
+-uint drv_query_info(struct net_device *MiniportAdapterContext,
+-                  uint Oid,
+-                  void *InformationBuffer,
+-                  u32 InformationBufferLength,
+-                  u32 *BytesWritten,
+-                  u32 *BytesNeeded);
+-
+-uint drv_set_info(struct net_device *MiniportAdapterContext,
+-                uint Oid,
+-                void *InformationBuffer,
+-                u32 InformationBufferLength,
+-                u32 *BytesRead,
+-                u32 *BytesNeeded);
+-
+ #endif
+diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
+index 99256ba..1b789b8 100644
+--- a/drivers/staging/rtl8712/rtl871x_xmit.c
++++ b/drivers/staging/rtl8712/rtl871x_xmit.c
+@@ -152,7 +152,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+       alloc_hwxmits(padapter);
+       init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+       tasklet_init(&pxmitpriv->xmit_tasklet,
+-              (void(*)(unsigned long))r8712_xmit_bh,
++              r8712_xmit_bh,
+               (unsigned long)padapter);
+       return _SUCCESS;
+ }
+diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
+index a9633c3..77b0c85 100644
+--- a/drivers/staging/rtl8712/rtl871x_xmit.h
++++ b/drivers/staging/rtl8712/rtl871x_xmit.h
+@@ -291,7 +291,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe);
+ int r8712_xmit_enqueue(struct _adapter *padapter,
+                      struct xmit_frame *pxmitframe);
+ int r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
+-void r8712_xmit_bh(void *priv);
++void r8712_xmit_bh(unsigned long priv);
+ void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
+                       struct xmit_buf *pxmitbuf);
+diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
+index 6f1234570..3c8fb5a 100644
+--- a/drivers/staging/rtl8712/usb_ops_linux.c
++++ b/drivers/staging/rtl8712/usb_ops_linux.c
+@@ -331,10 +331,10 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter)
+       }
+ }
+-void r8712_xmit_bh(void *priv)
++void r8712_xmit_bh(unsigned long priv)
+ {
+       int ret = false;
+-      struct _adapter *padapter = priv;
++      struct _adapter *padapter = (struct _adapter *)priv;
+       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+       if (padapter->bDriverStopped ||
+diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
+index 695f9b9..5f8019a 100644
+--- a/drivers/staging/rtl8712/xmit_linux.c
++++ b/drivers/staging/rtl8712/xmit_linux.c
+@@ -156,7 +156,7 @@ void r8712_xmit_complete(struct _adapter *padapter, struct xmit_frame *pxframe)
+       pxframe->pkt = NULL;
+ }
+-int r8712_xmit_entry(_pkt *pkt, struct  net_device *pnetdev)
++netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct  net_device *pnetdev)
+ {
+       struct xmit_frame *pxmitframe = NULL;
+       struct _adapter *padapter = netdev_priv(pnetdev);
+diff --git a/drivers/staging/rtl8712/xmit_osdep.h b/drivers/staging/rtl8712/xmit_osdep.h
+index 8eba7ca..6c4ce81 100644
+--- a/drivers/staging/rtl8712/xmit_osdep.h
++++ b/drivers/staging/rtl8712/xmit_osdep.h
+@@ -46,7 +46,7 @@ struct sta_xmit_priv;
+ struct xmit_frame;
+ struct xmit_buf;
+-int r8712_xmit_entry(_pkt *pkt, struct  net_device *pnetdev);
++netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct  net_device *pnetdev);
+ void r8712_SetFilter(struct work_struct *work);
+ int r8712_xmit_resource_alloc(struct _adapter *padapter,
+                          struct xmit_buf *pxmitbuf);
+diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+index 7dd1540..52d1392 100644
+--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
++++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+@@ -368,7 +368,7 @@ static void init_mlme_ext_priv23a_value(struct rtw_adapter *padapter)
+               _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+               _12M_RATE_, _24M_RATE_, 0xff,};
+-      atomic_set(&pmlmeext->event_seq, 0);
++      atomic_set_unchecked(&pmlmeext->event_seq, 0);
+       /* reset to zero when disconnect at client mode */
+       pmlmeext->mgnt_seq = 0;
+@@ -4734,7 +4734,7 @@ void report_survey_event23a(struct rtw_adapter *padapter,
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct survey_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       psurvey_evt = (struct survey_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+@@ -4783,7 +4783,7 @@ void report_surveydone_event23a(struct rtw_adapter *padapter)
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       psurveydone_evt = (struct surveydone_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+       psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+@@ -4825,7 +4825,7 @@ void report_join_res23a(struct rtw_adapter *padapter, int res)
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       pjoinbss_evt = (struct joinbss_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+       memcpy((unsigned char *)&pjoinbss_evt->network.network,
+@@ -4873,7 +4873,7 @@ void report_del_sta_event23a(struct rtw_adapter *padapter,
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct stadel_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       pdel_sta_evt = (struct stadel_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+       ether_addr_copy((unsigned char *)&pdel_sta_evt->macaddr, MacAddr);
+@@ -4925,7 +4925,7 @@ void report_add_sta_event23a(struct rtw_adapter *padapter,
+       pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+       pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+       pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+-      pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++      pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+       padd_sta_evt = (struct stassoc_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+       ether_addr_copy((unsigned char *)&padd_sta_evt->macaddr, MacAddr);
+diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
+index 3de40cf..8213068 100644
+--- a/drivers/staging/rtl8723au/core/rtw_xmit.c
++++ b/drivers/staging/rtl8723au/core/rtw_xmit.c
+@@ -183,7 +183,7 @@ int _rtw_init_xmit_priv23a(struct xmit_priv *pxmitpriv,
+       mutex_init(&pxmitpriv->ack_tx_mutex);
+       rtw_sctx_init23a(&pxmitpriv->ack_tx_ops, 0);
+       tasklet_init(&padapter->xmitpriv.xmit_tasklet,
+-                   (void(*)(unsigned long))rtl8723au_xmit_tasklet,
++                   rtl8723au_xmit_tasklet,
+                    (unsigned long)padapter);
+ exit:
+diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_recv.c b/drivers/staging/rtl8723au/hal/rtl8723au_recv.c
+index 0fec84b..298d283 100644
+--- a/drivers/staging/rtl8723au/hal/rtl8723au_recv.c
++++ b/drivers/staging/rtl8723au/hal/rtl8723au_recv.c
+@@ -33,7 +33,7 @@ int rtl8723au_init_recv_priv(struct rtw_adapter *padapter)
+       struct sk_buff *pskb;
+       tasklet_init(&precvpriv->recv_tasklet,
+-                   (void(*)(unsigned long))rtl8723au_recv_tasklet,
++                   rtl8723au_recv_tasklet,
+                    (unsigned long)padapter);
+       precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+diff --git a/drivers/staging/rtl8723au/hal/usb_ops_linux.c b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
+index 5c81ff4..b4c2601 100644
+--- a/drivers/staging/rtl8723au/hal/usb_ops_linux.c
++++ b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
+@@ -483,7 +483,7 @@ _exit_recvbuf2recvframe:
+       return _SUCCESS;
+ }
+-void rtl8723au_recv_tasklet(void *priv)
++void rtl8723au_recv_tasklet(unsigned long priv)
+ {
+       struct sk_buff *pskb;
+       struct rtw_adapter *padapter = (struct rtw_adapter *)priv;
+@@ -658,7 +658,7 @@ int rtl8723au_read_port(struct rtw_adapter *adapter, u32 cnt,
+       return ret;
+ }
+-void rtl8723au_xmit_tasklet(void *priv)
++void rtl8723au_xmit_tasklet(unsigned long priv)
+ {
+       int ret;
+       struct rtw_adapter *padapter = (struct rtw_adapter *)priv;
+diff --git a/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h b/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
+index bcf3657..74d4742 100644
+--- a/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
++++ b/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
+@@ -135,7 +135,6 @@ void       PHY_SetBWMode23a8723A(struct rtw_adapter *pAdapter,
+ /*  */
+ void  PHY_SwChnl8723A(struct rtw_adapter *pAdapter, u8 channel);
+                               /*  Call after initialization */
+-void ChkFwCmdIoDone(struct rtw_adapter *Adapter);
+ /*  */
+ /*  Modify the value of the hw register when beacon interval be changed. */
+@@ -144,13 +143,6 @@ void
+ rtl8192c_PHY_SetBeaconHwReg(struct rtw_adapter *Adapter, u16 BeaconInterval);
+-void PHY_SwitchEphyParameter(struct rtw_adapter *Adapter);
+-
+-void PHY_EnableHostClkReq(struct rtw_adapter *Adapter);
+-
+-bool
+-SetAntennaConfig92C(struct rtw_adapter *Adapter, u8 DefaultAnt);
+-
+ /*--------------------------Exported Function prototype---------------------*/
+ #define PHY_SetMacReg PHY_SetBBReg
+diff --git a/drivers/staging/rtl8723au/include/drv_types.h b/drivers/staging/rtl8723au/include/drv_types.h
+index e83463a..84230f3 100644
+--- a/drivers/staging/rtl8723au/include/drv_types.h
++++ b/drivers/staging/rtl8723au/include/drv_types.h
+@@ -185,7 +185,7 @@ struct dvobj_priv {
+       struct usb_interface *pusbintf;
+       struct usb_device *pusbdev;
+-      atomic_t continual_urb_error;
++      atomic_unchecked_t continual_urb_error;
+ /*-------- below is for PCIE INTERFACE --------*/
+diff --git a/drivers/staging/rtl8723au/include/hal_intf.h b/drivers/staging/rtl8723au/include/hal_intf.h
+index b924d47..1e3e51c 100644
+--- a/drivers/staging/rtl8723au/include/hal_intf.h
++++ b/drivers/staging/rtl8723au/include/hal_intf.h
+@@ -97,10 +97,8 @@ int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal);
+ int rtl8723au_hal_init(struct rtw_adapter *padapter);
+ int rtl8723au_hal_deinit(struct rtw_adapter *padapter);
+-void rtw_hal_stop(struct rtw_adapter *padapter);
+ void rtw_hal_update_ra_mask23a(struct sta_info *psta, u8 rssi_level);
+-void  rtw_hal_clone_data(struct rtw_adapter *dst_padapter, struct rtw_adapter *src_padapter);
+ void hw_var_set_correct_tsf(struct rtw_adapter *padapter);
+ void hw_var_set_mlme_disconnect(struct rtw_adapter *padapter);
+diff --git a/drivers/staging/rtl8723au/include/recv_osdep.h b/drivers/staging/rtl8723au/include/recv_osdep.h
+index c2d3f1b..bb0dc02 100644
+--- a/drivers/staging/rtl8723au/include/recv_osdep.h
++++ b/drivers/staging/rtl8723au/include/recv_osdep.h
+@@ -26,7 +26,6 @@ int rtw_recv_indicatepkt23a(struct rtw_adapter *adapter, struct recv_frame *prec
+ void rtw_handle_tkip_mic_err23a(struct rtw_adapter *padapter, u8 bgroup);
+-int   rtw_init_recv_priv(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
+ void rtw_free_recv_priv (struct recv_priv *precvpriv);
+ int rtw_os_recv_resource_init(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
+diff --git a/drivers/staging/rtl8723au/include/rtw_ap.h b/drivers/staging/rtl8723au/include/rtw_ap.h
+index 55a708f..2f111af 100644
+--- a/drivers/staging/rtl8723au/include/rtw_ap.h
++++ b/drivers/staging/rtl8723au/include/rtw_ap.h
+@@ -26,8 +26,6 @@
+ void init_mlme_ap_info23a(struct rtw_adapter *padapter);
+ void free_mlme_ap_info23a(struct rtw_adapter *padapter);
+ /* void update_BCNTIM(struct rtw_adapter *padapter); */
+-void rtw_add_bcn_ie(struct rtw_adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index, u8 *data, u8 len);
+-void rtw_remove_bcn_ie(struct rtw_adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index);
+ void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx);
+ void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_level);
+ void expire_timeout_chk23a(struct rtw_adapter *padapter);
+diff --git a/drivers/staging/rtl8723au/include/rtw_cmd.h b/drivers/staging/rtl8723au/include/rtw_cmd.h
+index d1fa95d..338b933 100644
+--- a/drivers/staging/rtl8723au/include/rtw_cmd.h
++++ b/drivers/staging/rtl8723au/include/rtw_cmd.h
+@@ -712,7 +712,6 @@ int rtw_ps_cmd23a(struct rtw_adapter*padapter);
+ int rtw_chk_hi_queue_cmd23a(struct rtw_adapter*padapter);
+ #endif
+-int rtw_set_chplan_cmd(struct rtw_adapter*padapter, u8 chplan, u8 enqueue);
+ int rtw_led_blink_cmd(struct rtw_adapter*padapter, struct led_8723a *pLed);
+ int rtw_set_csa_cmd(struct rtw_adapter*padapter, u8 new_ch_no);
+diff --git a/drivers/staging/rtl8723au/include/rtw_eeprom.h b/drivers/staging/rtl8723au/include/rtw_eeprom.h
+index a86f36e..8addfe7 100644
+--- a/drivers/staging/rtl8723au/include/rtw_eeprom.h
++++ b/drivers/staging/rtl8723au/include/rtw_eeprom.h
+@@ -125,11 +125,4 @@ struct eeprom_priv {
+       u8              efuse_eeprom_data[HWSET_MAX_SIZE_512]; /* 92C:256bytes, 88E:512bytes, we use union set (512bytes) */
+ };
+-void eeprom_write16(struct rtw_adapter *padapter, u16 reg, u16 data);
+-u16 eeprom_read16(struct rtw_adapter *padapter, u16 reg);
+-void read_eeprom_content(struct rtw_adapter *padapter);
+-void eeprom_read_sz(struct rtw_adapter *padapter, u16 reg, u8 *data, u32 sz);
+-
+-void read_eeprom_content_by_attrib(struct rtw_adapter *padapter);
+-
+ #endif  /* __RTL871X_EEPROM_H__ */
+diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+index 0e7d3da..4a54c4fa3 100644
+--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
++++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+@@ -406,7 +406,7 @@ struct p2p_oper_class_map {
+ struct mlme_ext_priv {
+       struct rtw_adapter      *padapter;
+       u8      mlmeext_init;
+-      atomic_t                event_seq;
++      atomic_unchecked_t      event_seq;
+       u16     mgnt_seq;
+       /* struct fw_priv       fwpriv; */
+@@ -541,8 +541,6 @@ void report_add_sta_event23a(struct rtw_adapter *padapter,
+                         unsigned char *MacAddr, int cam_idx);
+ int set_tx_beacon_cmd23a(struct rtw_adapter*padapter);
+-unsigned int setup_beacon_frame(struct rtw_adapter *padapter,
+-                              unsigned char *beacon_frame);
+ void update_mgnt_tx_rate23a(struct rtw_adapter *padapter, u8 rate);
+ void update_mgntframe_attrib23a(struct rtw_adapter *padapter,
+                            struct pkt_attrib *pattrib);
+@@ -595,14 +593,6 @@ struct cmd_hdl {
+ };
+-int read_macreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+-int write_macreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+-int read_bbreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+-int write_bbreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+-int read_rfreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+-int write_rfreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
+-
+-
+ int NULL_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+ int join_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+ int disconnect_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+@@ -612,8 +602,6 @@ int sitesurvey_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+ int setauth_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+ int setkey_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+ int set_stakey_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+-int set_assocsta_hdl(struct rtw_adapter *padapter, const u8 *pbuf);
+-int del_assocsta_hdl(struct rtw_adapter *padapter, const u8 *pbuf);
+ int add_ba_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+ int mlme_evt_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
+diff --git a/drivers/staging/rtl8723au/include/usb_ops.h b/drivers/staging/rtl8723au/include/usb_ops.h
+index ff11e13..22a13ac 100644
+--- a/drivers/staging/rtl8723au/include/usb_ops.h
++++ b/drivers/staging/rtl8723au/include/usb_ops.h
+@@ -36,9 +36,9 @@ enum {
+ void rtl8723au_set_hw_type(struct rtw_adapter *padapter);
+-void rtl8723au_recv_tasklet(void *priv);
++void rtl8723au_recv_tasklet(unsigned long priv);
+-void rtl8723au_xmit_tasklet(void *priv);
++void rtl8723au_xmit_tasklet(unsigned long priv);
+ /* Increase and check if the continual_urb_error of this @param dvobjprive is
+  * larger than MAX_CONTINUAL_URB_ERR. Return result
+@@ -48,7 +48,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+       int ret = false;
+       int value;
+-      value = atomic_inc_return(&dvobj->continual_urb_error);
++      value = atomic_inc_return_unchecked(&dvobj->continual_urb_error);
+       if (value > MAX_CONTINUAL_URB_ERR) {
+               DBG_8723A("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
+                         dvobj, value, MAX_CONTINUAL_URB_ERR);
+@@ -60,7 +60,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+ /* Set the continual_urb_error of this @param dvobjprive to 0 */
+ static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
+ {
+-      atomic_set(&dvobj->continual_urb_error, 0);
++      atomic_set_unchecked(&dvobj->continual_urb_error, 0);
+ }
+ bool rtl8723au_chip_configure(struct rtw_adapter *padapter);
+diff --git a/drivers/staging/rtl8723au/include/xmit_osdep.h b/drivers/staging/rtl8723au/include/xmit_osdep.h
+index 2be04c48..a494e09 100644
+--- a/drivers/staging/rtl8723au/include/xmit_osdep.h
++++ b/drivers/staging/rtl8723au/include/xmit_osdep.h
+@@ -21,7 +21,7 @@
+ #define NR_XMITFRAME  256
+-int rtw_xmit23a_entry23a(struct sk_buff *pkt, struct net_device *pnetdev);
++netdev_tx_t rtw_xmit23a_entry23a(struct sk_buff *pkt, struct net_device *pnetdev);
+ void rtw_os_xmit_schedule23a(struct rtw_adapter *padapter);
+diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+index d0ba377..884c9d7 100644
+--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
++++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+@@ -2435,7 +2435,7 @@ static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
+       return 0;
+ }
+-static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb,
++static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb,
+                                             struct net_device *ndev)
+ {
+       int ret = 0;
+diff --git a/drivers/staging/rtl8723au/os_dep/xmit_linux.c b/drivers/staging/rtl8723au/os_dep/xmit_linux.c
+index 64be72a..d0d2f81 100644
+--- a/drivers/staging/rtl8723au/os_dep/xmit_linux.c
++++ b/drivers/staging/rtl8723au/os_dep/xmit_linux.c
+@@ -117,7 +117,7 @@ static void rtw_check_xmit_resource(struct rtw_adapter *padapter,
+       }
+ }
+-int rtw_xmit23a_entry23a(struct sk_buff *skb, struct net_device *pnetdev)
++netdev_tx_t rtw_xmit23a_entry23a(struct sk_buff *skb, struct net_device *pnetdev)
+ {
+       struct rtw_adapter *padapter = netdev_priv(pnetdev);
+       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
+index 6ed004e..f8ebf08 100644
+--- a/drivers/staging/sm750fb/sm750.c
++++ b/drivers/staging/sm750fb/sm750.c
+@@ -725,6 +725,7 @@ static struct fb_ops lynxfb_ops = {
+       .fb_set_par = lynxfb_ops_set_par,
+       .fb_setcolreg = lynxfb_ops_setcolreg,
+       .fb_blank = lynxfb_ops_blank,
++      .fb_pan_display = lynxfb_ops_pan_display,
+       .fb_fillrect = cfb_fillrect,
+       .fb_imageblit = cfb_imageblit,
+       .fb_copyarea = cfb_copyarea,
+@@ -770,7 +771,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
+       par->index = index;
+       output->channel = &crtc->channel;
+       sm750fb_set_drv(par);
+-      lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
+       /*
+        * set current cursor variable and proc pointer,
+@@ -787,16 +787,20 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
+       memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
+       if (!g_hwcursor) {
+-              lynxfb_ops.fb_cursor = NULL;
++              pax_open_kernel();
++              const_cast(lynxfb_ops.fb_cursor) = NULL;
++              pax_close_kernel();
+               hw_cursor_disable(&crtc->cursor);
+       }
+       /* set info->fbops, must be set before fb_find_mode */
+       if (!sm750_dev->accel_off) {
+               /* use 2d acceleration */
+-              lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect;
+-              lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea;
+-              lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit;
++              pax_open_kernel();
++              const_cast(lynxfb_ops.fb_fillrect) = lynxfb_ops_fillrect;
++              const_cast(lynxfb_ops.fb_copyarea) = lynxfb_ops_copyarea;
++              const_cast(lynxfb_ops.fb_imageblit) = lynxfb_ops_imageblit;
++              pax_close_kernel();
+       }
+       info->fbops = &lynxfb_ops;
+diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
+index 39edd20..d860d0c 100644
+--- a/drivers/staging/unisys/visorbus/visorbus_private.h
++++ b/drivers/staging/unisys/visorbus/visorbus_private.h
+@@ -34,7 +34,7 @@ struct visorchipset_busdev_notifiers {
+       void (*device_destroy)(struct visor_device *bus_info);
+       void (*device_pause)(struct visor_device *bus_info);
+       void (*device_resume)(struct visor_device *bus_info);
+-};
++} __no_const;
+ /*  These functions live inside visorchipset, and will be called to indicate
+  *  responses to specific events (by code outside of visorchipset).
+@@ -49,7 +49,7 @@ struct visorchipset_busdev_responders {
+       void (*device_destroy)(struct visor_device *p, int response);
+       void (*device_pause)(struct visor_device *p, int response);
+       void (*device_resume)(struct visor_device *p, int response);
+-};
++} __no_const;
+ /** Register functions (in the bus driver) to get called by visorchipset
+  *  whenever a bus or device appears for which this guest is to be the
+diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
+index a28388d..6ae2929 100644
+--- a/drivers/staging/unisys/visornic/visornic_main.c
++++ b/drivers/staging/unisys/visornic/visornic_main.c
+@@ -797,7 +797,7 @@ static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
+  *    can be called again.
+  *    Returns NETDEV_TX_OK.
+  */
+-static int
++static netdev_tx_t
+ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+       struct visornic_devdata *devdata;
+diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
+index e4c3165..d0c9eeb 100644
+--- a/drivers/staging/vt6655/rxtx.c
++++ b/drivers/staging/vt6655/rxtx.c
+@@ -1243,7 +1243,7 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
+               mic_hdr->payload_len = cpu_to_be16(payload_len);
+               ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
+-              pn64 = atomic64_read(&tx_key->tx_pn);
++              pn64 = atomic64_read_unchecked(&tx_key->tx_pn);
+               mic_hdr->ccmp_pn[5] = pn64;
+               mic_hdr->ccmp_pn[4] = pn64 >> 8;
+               mic_hdr->ccmp_pn[3] = pn64 >> 16;
+diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
+index aa59e7f..094dd59 100644
+--- a/drivers/staging/vt6656/rxtx.c
++++ b/drivers/staging/vt6656/rxtx.c
+@@ -749,7 +749,7 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
+               mic_hdr->payload_len = cpu_to_be16(payload_len);
+               ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
+-              pn64 = atomic64_read(&tx_key->tx_pn);
++              pn64 = atomic64_read_unchecked(&tx_key->tx_pn);
+               mic_hdr->ccmp_pn[5] = pn64;
+               mic_hdr->ccmp_pn[4] = pn64 >> 8;
+               mic_hdr->ccmp_pn[3] = pn64 >> 16;
+diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
+index ddfea29..5305b38 100644
+--- a/drivers/staging/wilc1000/host_interface.h
++++ b/drivers/staging/wilc1000/host_interface.h
+@@ -1,6 +1,7 @@
+ #ifndef HOST_INT_H
+ #define HOST_INT_H
++#include <linux/netdevice.h>
+ #include "coreconfigurator.h"
+ #define IP_ALEN  4
+diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
+index 3221511..6b6f9eb 100644
+--- a/drivers/staging/wilc1000/linux_wlan.c
++++ b/drivers/staging/wilc1000/linux_wlan.c
+@@ -983,7 +983,7 @@ static void linux_wlan_tx_complete(void *priv, int status)
+       kfree(pv_data);
+ }
+-int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
++netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+       struct wilc_vif *vif;
+       struct tx_complete_data *tx_data = NULL;
+diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
+index 22cf4b7..2684b57 100644
+--- a/drivers/staging/wilc1000/wilc_spi.c
++++ b/drivers/staging/wilc1000/wilc_spi.c
+@@ -19,6 +19,7 @@
+ #include <linux/of_gpio.h>
+ #include <linux/string.h>
++#include <linux/netdevice.h>
+ #include "wilc_wlan_if.h"
+ #include "wilc_wlan.h"
+ #include "wilc_wfi_netdevice.h"
+diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
+index 30e5312..1493a73 100644
+--- a/drivers/staging/wilc1000/wilc_wlan.h
++++ b/drivers/staging/wilc1000/wilc_wlan.h
+@@ -295,7 +295,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc);
+ void wilc_enable_tcp_ack_filter(bool value);
+ int wilc_wlan_get_num_conn_ifcs(struct wilc *);
+-int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
++netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
+ int wilc_mac_open(struct net_device *ndev);
+ int wilc_mac_close(struct net_device *ndev);
+diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
+index 90cc8cd..b98abd7 100644
+--- a/drivers/staging/wlan-ng/p80211netdev.c
++++ b/drivers/staging/wlan-ng/p80211netdev.c
+@@ -317,7 +317,7 @@ static void p80211netdev_rx_bh(unsigned long arg)
+ * Returns:
+ *     zero on success, non-zero on failure.
+ ----------------------------------------------------------------*/
+-static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
++static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
+                                        netdevice_t *netdev)
+ {
+       int result = 0;
+diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
+index 58bb6ed..d77a7e3 100644
+--- a/drivers/target/sbp/sbp_target.c
++++ b/drivers/target/sbp/sbp_target.c
+@@ -56,7 +56,7 @@ static const u32 sbp_unit_directory_template[] = {
+ #define SESSION_MAINTENANCE_INTERVAL HZ
+-static atomic_t login_id = ATOMIC_INIT(0);
++static atomic_unchecked_t login_id = ATOMIC_INIT(0);
+ static void session_maintenance_work(struct work_struct *);
+ static int sbp_run_transaction(struct fw_card *, int, int, int, int,
+@@ -422,7 +422,7 @@ static void sbp_management_request_login(
+       login->login_lun = unpacked_lun;
+       login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
+       login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
+-      login->login_id = atomic_inc_return(&login_id);
++      login->login_id = atomic_inc_return_unchecked(&login_id);
+       login->tgt_agt = sbp_target_agent_register(login);
+       if (IS_ERR(login->tgt_agt)) {
+diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
+index 01f0015..aa56551 100644
+--- a/drivers/thermal/devfreq_cooling.c
++++ b/drivers/thermal/devfreq_cooling.c
+@@ -363,6 +363,15 @@ static struct thermal_cooling_device_ops devfreq_cooling_ops = {
+       .set_cur_state = devfreq_cooling_set_cur_state,
+ };
++static struct thermal_cooling_device_ops devfreq_cooling_power_ops = {
++      .get_max_state = devfreq_cooling_get_max_state,
++      .get_cur_state = devfreq_cooling_get_cur_state,
++      .set_cur_state = devfreq_cooling_set_cur_state,
++      .get_requested_power = devfreq_cooling_get_requested_power,
++      .state2power = devfreq_cooling_state2power,
++      .power2state = devfreq_cooling_power2state,
++};
++
+ /**
+  * devfreq_cooling_gen_tables() - Generate power and freq tables.
+  * @dfc: Pointer to devfreq cooling device.
+@@ -482,15 +491,9 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
+       dfc->devfreq = df;
+-      if (dfc_power) {
++      if (dfc_power)
+               dfc->power_ops = dfc_power;
+-              devfreq_cooling_ops.get_requested_power =
+-                      devfreq_cooling_get_requested_power;
+-              devfreq_cooling_ops.state2power = devfreq_cooling_state2power;
+-              devfreq_cooling_ops.power2state = devfreq_cooling_power2state;
+-      }
+-
+       err = devfreq_cooling_gen_tables(dfc);
+       if (err)
+               goto free_dfc;
+@@ -502,7 +505,7 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
+       snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id);
+       cdev = thermal_of_cooling_device_register(np, dev_name, dfc,
+-                                                &devfreq_cooling_ops);
++                                                dfc_power ? &devfreq_cooling_power_ops : &devfreq_cooling_ops);
+       if (IS_ERR(cdev)) {
+               err = PTR_ERR(cdev);
+               dev_err(df->dev.parent,
+diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
+index 5836e55..708bbd6 100644
+--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
++++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
+@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
+       platform_set_drvdata(pdev, priv);
+       if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
+-              int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+-              int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
++              pax_open_kernel();
++              const_cast(int3400_thermal_ops.get_mode) = int3400_thermal_get_mode;
++              const_cast(int3400_thermal_ops.set_mode) = int3400_thermal_set_mode;
++              pax_close_kernel();
+       }
+       priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
+                                               priv, &int3400_thermal_ops,
+diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
+index b8e509c..f12be01 100644
+--- a/drivers/thermal/of-thermal.c
++++ b/drivers/thermal/of-thermal.c
+@@ -31,6 +31,7 @@
+ #include <linux/export.h>
+ #include <linux/string.h>
+ #include <linux/thermal.h>
++#include <linux/mm.h>
+ #include "thermal_core.h"
+@@ -425,9 +426,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
+       tz->ops = ops;
+       tz->sensor_data = data;
+-      tzd->ops->get_temp = of_thermal_get_temp;
+-      tzd->ops->get_trend = of_thermal_get_trend;
+-      tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
++      pax_open_kernel();
++      const_cast(tzd->ops->get_temp) = of_thermal_get_temp;
++      const_cast(tzd->ops->get_trend) = of_thermal_get_trend;
++      const_cast(tzd->ops->set_emul_temp) = of_thermal_set_emul_temp;
++      pax_close_kernel();
+       mutex_unlock(&tzd->lock);
+       return tzd;
+@@ -553,9 +556,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
+               return;
+       mutex_lock(&tzd->lock);
+-      tzd->ops->get_temp = NULL;
+-      tzd->ops->get_trend = NULL;
+-      tzd->ops->set_emul_temp = NULL;
++      pax_open_kernel();
++      const_cast(tzd->ops->get_temp) = NULL;
++      const_cast(tzd->ops->get_trend) = NULL;
++      const_cast(tzd->ops->set_emul_temp) = NULL;
++      pax_close_kernel();
+       tz->ops = NULL;
+       tz->sensor_data = NULL;
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index 97f0a2b..5fa3381 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
+@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
++static struct notifier_block pkg_temp_thermal_notifier = {
+       .notifier_call = pkg_temp_thermal_cpu_callback,
+ };
+diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
+index 5e4fa92..39fe3d2 100644
+--- a/drivers/tty/cyclades.c
++++ b/drivers/tty/cyclades.c
+@@ -1568,10 +1568,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
+       printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
+                       info->port.count);
+ #endif
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+ #ifdef CY_DEBUG_COUNT
+       printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
+-              current->pid, info->port.count);
++              current->pid, atomic_read(&info->port.count));
+ #endif
+       /*
+@@ -3947,7 +3947,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
+               for (j = 0; j < cy_card[i].nports; j++) {
+                       info = &cy_card[i].ports[j];
+-                      if (info->port.count) {
++                      if (atomic_read(&info->port.count)) {
+                               /* XXX is the ldisc num worth this? */
+                               struct tty_struct *tty;
+                               struct tty_ldisc *ld;
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index ce86487..8ff3311 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -343,7 +343,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+       spin_lock_irqsave(&hp->port.lock, flags);
+       /* Check and then increment for fast path open. */
+-      if (hp->port.count++ > 0) {
++      if (atomic_inc_return(&hp->port.count) > 1) {
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+               hvc_kick();
+               return 0;
+@@ -398,7 +398,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+       spin_lock_irqsave(&hp->port.lock, flags);
+-      if (--hp->port.count == 0) {
++      if (atomic_dec_return(&hp->port.count) == 0) {
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+               /* We are done with the tty pointer now. */
+               tty_port_tty_set(&hp->port, NULL);
+@@ -420,9 +420,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+                */
+               tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
+       } else {
+-              if (hp->port.count < 0)
++              if (atomic_read(&hp->port.count) < 0)
+                       printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
+-                              hp->vtermno, hp->port.count);
++                              hp->vtermno, atomic_read(&hp->port.count));
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+       }
+ }
+@@ -452,12 +452,12 @@ static void hvc_hangup(struct tty_struct *tty)
+        * open->hangup case this can be called after the final close so prevent
+        * that from happening for now.
+        */
+-      if (hp->port.count <= 0) {
++      if (atomic_read(&hp->port.count) <= 0) {
+               spin_unlock_irqrestore(&hp->port.lock, flags);
+               return;
+       }
+-      hp->port.count = 0;
++      atomic_set(&hp->port.count, 0);
+       spin_unlock_irqrestore(&hp->port.lock, flags);
+       tty_port_tty_set(&hp->port, NULL);
+@@ -505,7 +505,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
+               return -EPIPE;
+       /* FIXME what's this (unprotected) check for? */
+-      if (hp->port.count <= 0)
++      if (atomic_read(&hp->port.count) <= 0)
+               return -EIO;
+       spin_lock_irqsave(&hp->lock, flags);
+diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
+index 3c4d7c2..3410b86 100644
+--- a/drivers/tty/hvc/hvcs.c
++++ b/drivers/tty/hvc/hvcs.c
+@@ -83,6 +83,7 @@
+ #include <asm/hvcserver.h>
+ #include <asm/uaccess.h>
+ #include <asm/vio.h>
++#include <asm/local.h>
+ /*
+  * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
+@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
+       spin_lock_irqsave(&hvcsd->lock, flags);
+-      if (hvcsd->port.count > 0) {
++      if (atomic_read(&hvcsd->port.count) > 0) {
+               spin_unlock_irqrestore(&hvcsd->lock, flags);
+               printk(KERN_INFO "HVCS: vterm state unchanged.  "
+                               "The hvcs device node is still in use.\n");
+@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
+               }
+       }
+-      hvcsd->port.count = 0;
++      atomic_set(&hvcsd->port.count, 0);
+       hvcsd->port.tty = tty;
+       tty->driver_data = hvcsd;
+@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
+       unsigned long flags;
+       spin_lock_irqsave(&hvcsd->lock, flags);
+-      hvcsd->port.count++;
++      atomic_inc(&hvcsd->port.count);
+       hvcsd->todo_mask |= HVCS_SCHED_READ;
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+       hvcsd = tty->driver_data;
+       spin_lock_irqsave(&hvcsd->lock, flags);
+-      if (--hvcsd->port.count == 0) {
++      if (atomic_dec_and_test(&hvcsd->port.count)) {
+               vio_disable_interrupts(hvcsd->vdev);
+@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+               free_irq(irq, hvcsd);
+               return;
+-      } else if (hvcsd->port.count < 0) {
++      } else if (atomic_read(&hvcsd->port.count) < 0) {
+               printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+                               " is missmanaged.\n",
+-              hvcsd->vdev->unit_address, hvcsd->port.count);
++              hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
+       }
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+       spin_lock_irqsave(&hvcsd->lock, flags);
+       /* Preserve this so that we know how many kref refs to put */
+-      temp_open_count = hvcsd->port.count;
++      temp_open_count = atomic_read(&hvcsd->port.count);
+       /*
+        * Don't kref put inside the spinlock because the destruction
+@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+       tty->driver_data = NULL;
+       hvcsd->port.tty = NULL;
+-      hvcsd->port.count = 0;
++      atomic_set(&hvcsd->port.count, 0);
+       /* This will drop any buffered data on the floor which is OK in a hangup
+        * scenario. */
+@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
+        * the middle of a write operation?  This is a crummy place to do this
+        * but we want to keep it all in the spinlock.
+        */
+-      if (hvcsd->port.count <= 0) {
++      if (atomic_read(&hvcsd->port.count) <= 0) {
+               spin_unlock_irqrestore(&hvcsd->lock, flags);
+               return -ENODEV;
+       }
+@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
+ {
+       struct hvcs_struct *hvcsd = tty->driver_data;
+-      if (!hvcsd || hvcsd->port.count <= 0)
++      if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
+               return 0;
+       return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
+index 96ce6bd..208f20a 100644
+--- a/drivers/tty/hvc/hvsi.c
++++ b/drivers/tty/hvc/hvsi.c
+@@ -85,7 +85,7 @@ struct hvsi_struct {
+       int n_outbuf;
+       uint32_t vtermno;
+       uint32_t virq;
+-      atomic_t seqno; /* HVSI packet sequence number */
++      atomic_unchecked_t seqno; /* HVSI packet sequence number */
+       uint16_t mctrl;
+       uint8_t state;  /* HVSI protocol state */
+       uint8_t flags;
+@@ -297,7 +297,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
+       packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
+       packet.hdr.len = sizeof(struct hvsi_query_response);
+-      packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
++      packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno));
+       packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
+       packet.u.version = HVSI_VERSION;
+       packet.query_seqno = cpu_to_be16(query_seqno+1);
+@@ -557,7 +557,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
+       packet.hdr.type = VS_QUERY_PACKET_HEADER;
+       packet.hdr.len = sizeof(struct hvsi_query);
+-      packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
++      packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno));
+       packet.verb = cpu_to_be16(verb);
+       pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
+@@ -599,7 +599,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
+       int wrote;
+       packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+-      packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
++      packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno));
+       packet.hdr.len = sizeof(struct hvsi_control);
+       packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+       packet.mask = cpu_to_be32(HVSI_TSDTR);
+@@ -682,7 +682,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
+       BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
+       packet.hdr.type = VS_DATA_PACKET_HEADER;
+-      packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
++      packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno));
+       packet.hdr.len = count + sizeof(struct hvsi_header);
+       memcpy(&packet.data, buf, count);
+@@ -699,7 +699,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
+       struct hvsi_control packet __ALIGNED__;
+       packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+-      packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
++      packet.hdr.seqno = cpu_to_be16(atomic_inc_return_unchecked(&hp->seqno));
+       packet.hdr.len = 6;
+       packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
+@@ -727,7 +727,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
+       tty_port_tty_set(&hp->port, tty);
+       spin_lock_irqsave(&hp->lock, flags);
+-      hp->port.count++;
++      atomic_inc(&hp->port.count);
+       atomic_set(&hp->seqno, 0);
+       h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
+       spin_unlock_irqrestore(&hp->lock, flags);
+@@ -784,7 +784,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
+       spin_lock_irqsave(&hp->lock, flags);
+-      if (--hp->port.count == 0) {
++      if (atomic_dec_return(&hp->port.count) == 0) {
+               tty_port_tty_set(&hp->port, NULL);
+               hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
+@@ -817,9 +817,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
+                       spin_lock_irqsave(&hp->lock, flags);
+               }
+-      } else if (hp->port.count < 0)
++      } else if (atomic_read(&hp->port.count) < 0)
+               printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
+-                     hp - hvsi_ports, hp->port.count);
++                     hp - hvsi_ports, atomic_read(&hp->port.count));
+       spin_unlock_irqrestore(&hp->lock, flags);
+ }
+@@ -834,7 +834,7 @@ static void hvsi_hangup(struct tty_struct *tty)
+       tty_port_tty_set(&hp->port, NULL);
+       spin_lock_irqsave(&hp->lock, flags);
+-      hp->port.count = 0;
++      atomic_set(&hp->port.count, 0);
+       hp->n_outbuf = 0;
+       spin_unlock_irqrestore(&hp->lock, flags);
+ }
+diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
+index a270f04..7c77b5d 100644
+--- a/drivers/tty/hvc/hvsi_lib.c
++++ b/drivers/tty/hvc/hvsi_lib.c
+@@ -8,7 +8,7 @@
+ static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
+ {
+-      packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
++      packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
+       /* Assumes that always succeeds, works in practice */
+       return pv->put_chars(pv->termno, (char *)packet, packet->len);
+@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
+       /* Reset state */
+       pv->established = 0;
+-      atomic_set(&pv->seqno, 0);
++      atomic_set_unchecked(&pv->seqno, 0);
+       pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
+diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
+index 2685d59..a63936a 100644
+--- a/drivers/tty/ipwireless/tty.c
++++ b/drivers/tty/ipwireless/tty.c
+@@ -28,6 +28,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/tty_flip.h>
+ #include <linux/uaccess.h>
++#include <asm/local.h>
+ #include "tty.h"
+ #include "network.h"
+@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+               return -ENODEV;
+       mutex_lock(&tty->ipw_tty_mutex);
+-      if (tty->port.count == 0)
++      if (atomic_read(&tty->port.count) == 0)
+               tty->tx_bytes_queued = 0;
+-      tty->port.count++;
++      atomic_inc(&tty->port.count);
+       tty->port.tty = linux_tty;
+       linux_tty->driver_data = tty;
+@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+ static void do_ipw_close(struct ipw_tty *tty)
+ {
+-      tty->port.count--;
+-
+-      if (tty->port.count == 0) {
++      if (atomic_dec_return(&tty->port.count) == 0) {
+               struct tty_struct *linux_tty = tty->port.tty;
+               if (linux_tty != NULL) {
+@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
+               return;
+       mutex_lock(&tty->ipw_tty_mutex);
+-      if (tty->port.count == 0) {
++      if (atomic_read(&tty->port.count) == 0) {
+               mutex_unlock(&tty->ipw_tty_mutex);
+               return;
+       }
+@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+       mutex_lock(&tty->ipw_tty_mutex);
+-      if (!tty->port.count) {
++      if (!atomic_read(&tty->port.count)) {
+               mutex_unlock(&tty->ipw_tty_mutex);
+               return;
+       }
+@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
+               return -ENODEV;
+       mutex_lock(&tty->ipw_tty_mutex);
+-      if (!tty->port.count) {
++      if (!atomic_read(&tty->port.count)) {
+               mutex_unlock(&tty->ipw_tty_mutex);
+               return -EINVAL;
+       }
+@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+@@ -270,7 +269,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
+       if (!tty)
+               return 0;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return 0;
+       return tty->tx_bytes_queued;
+@@ -351,7 +350,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       return get_control_lines(tty);
+@@ -367,7 +366,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       return set_control_lines(tty, set, clear);
+@@ -381,7 +380,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
+       if (!tty)
+               return -ENODEV;
+-      if (!tty->port.count)
++      if (!atomic_read(&tty->port.count))
+               return -EINVAL;
+       /* FIXME: Exactly how is the tty object locked here .. */
+@@ -537,7 +536,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
+                                * are gone */
+                               mutex_lock(&ttyj->ipw_tty_mutex);
+                       }
+-                      while (ttyj->port.count)
++                      while (atomic_read(&ttyj->port.count))
+                               do_ipw_close(ttyj);
+                       ipwireless_disassociate_network_ttys(network,
+                                                            ttyj->channel_idx);
+diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
+index 60d37b2..3d222ca 100644
+--- a/drivers/tty/moxa.c
++++ b/drivers/tty/moxa.c
+@@ -1188,7 +1188,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
+       }
+       ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
+-      ch->port.count++;
++      atomic_inc(&ch->port.count);
+       tty->driver_data = ch;
+       tty_port_tty_set(&ch->port, tty);
+       mutex_lock(&ch->port.mutex);
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 54cab59..3c05ac4 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+       spin_lock_init(&dlci->lock);
+       mutex_init(&dlci->mutex);
+       dlci->fifo = &dlci->_fifo;
+-      if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
++      if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
+               kfree(dlci);
+               return NULL;
+       }
+@@ -2652,7 +2652,7 @@ static inline void muxnet_put(struct gsm_mux_net *mux_net)
+       kref_put(&mux_net->ref, net_free);
+ }
+-static int gsm_mux_net_start_xmit(struct sk_buff *skb,
++static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb,
+                                     struct net_device *net)
+ {
+       struct gsm_mux_net *mux_net = netdev_priv(net);
+@@ -2943,7 +2943,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+       struct gsm_dlci *dlci = tty->driver_data;
+       struct tty_port *port = &dlci->port;
+-      port->count++;
++      atomic_inc(&port->count);
+       tty_port_tty_set(port, tty);
+       dlci->modem_rx = 0;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index bdf0e6e..ea92f7e 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1478,7 +1478,7 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
+ static void
+ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
+-                         char *fp, int count)
++                         char *fp, size_t count)
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+       size_t n, head;
+@@ -1498,7 +1498,7 @@ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
+ static void
+ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
+-                    char *fp, int count)
++                    char *fp, size_t count)
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+       char flag = TTY_NORMAL;
+@@ -1515,7 +1515,7 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
+ static void
+ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
+-                        char *fp, int count)
++                        char *fp, size_t count)
+ {
+       char flag = TTY_NORMAL;
+@@ -1529,7 +1529,7 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
+ static void
+ n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp,
+-                        char *fp, int count)
++                        char *fp, size_t count)
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+       char flag = TTY_NORMAL;
+@@ -1563,7 +1563,7 @@ n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp,
+ static void
+ n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp,
+-                     char *fp, int count)
++                     char *fp, size_t count)
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+       char flag = TTY_NORMAL;
+@@ -1588,7 +1588,7 @@ n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp,
+ }
+ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+-                        char *fp, int count)
++                        char *fp, size_t count)
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+       bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
+@@ -1666,10 +1666,10 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+  */
+ static int
+ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+-                       char *fp, int count, int flow)
++                       char *fp, size_t count, int flow)
+ {
+       struct n_tty_data *ldata = tty->disc_data;
+-      int room, n, rcvd = 0, overflow;
++      size_t room, n, rcvd = 0, overflow;
+       down_read(&tty->termios_rwsem);
+@@ -1692,15 +1692,16 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+               room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
+               if (I_PARMRK(tty))
+                       room = (room + 2) / 3;
+-              room--;
+-              if (room <= 0) {
++              if (room <= 1) {
+                       overflow = ldata->icanon && ldata->canon_head == tail;
+-                      if (overflow && room < 0)
++                      if (overflow && room == 0)
+                               ldata->read_head--;
+                       room = overflow;
+                       ldata->no_room = flow && !room;
+-              } else
++              } else {
++                      room--;
+                       overflow = 0;
++              }
+               n = min(count, room);
+               if (!n)
+@@ -2465,7 +2466,8 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+ {
+       *ops = n_tty_ops;
+       ops->owner = NULL;
+-      ops->refcount = ops->flags = 0;
++      atomic_set(&ops->refcount, 0);
++      ops->flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 51e0d32..d1ac13c 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -856,8 +856,10 @@ static void __init unix98_pty_init(void)
+               panic("Couldn't register Unix98 pts driver");
+       /* Now create the /dev/ptmx special device */
++      pax_open_kernel();
+       tty_default_fops(&ptmx_fops);
+-      ptmx_fops.open = ptmx_open;
++      const_cast(ptmx_fops.open) = ptmx_open;
++      pax_close_kernel();
+       cdev_init(&ptmx_cdev, &ptmx_fops);
+       if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
+index b0cc47c..58ea7a9 100644
+--- a/drivers/tty/rocket.c
++++ b/drivers/tty/rocket.c
+@@ -906,7 +906,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
+       tty->driver_data = info;
+       tty_port_tty_set(port, tty);
+-      if (port->count++ == 0) {
++      if (atomic_inc_return(&port->count) == 1) {
+               atomic_inc(&rp_num_ports_open);
+ #ifdef ROCKET_DEBUG_OPEN
+@@ -915,7 +915,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
+ #endif
+       }
+ #ifdef ROCKET_DEBUG_OPEN
+-      printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
++      printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
+ #endif
+       /*
+@@ -1500,7 +1500,7 @@ static void rp_hangup(struct tty_struct *tty)
+ #endif
+       rp_flush_buffer(tty);
+       spin_lock_irqsave(&info->port.lock, flags);
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               atomic_dec(&rp_num_ports_open);
+       clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+       spin_unlock_irqrestore(&info->port.lock, flags);
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index dcf43f6..594793a 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -488,9 +488,9 @@ static void univ8250_release_port(struct uart_port *port)
+ static void univ8250_rsa_support(struct uart_ops *ops)
+ {
+-      ops->config_port  = univ8250_config_port;
+-      ops->request_port = univ8250_request_port;
+-      ops->release_port = univ8250_release_port;
++      const_cast(ops->config_port)  = univ8250_config_port;
++      const_cast(ops->request_port) = univ8250_request_port;
++      const_cast(ops->release_port) = univ8250_release_port;
+ }
+ #else
+@@ -533,8 +533,10 @@ static void __init serial8250_isa_init_ports(void)
+       }
+       /* chain base port ops to support Remote Supervisor Adapter */
+-      univ8250_port_ops = *base_ops;
++      pax_open_kernel();
++      memcpy((void *)&univ8250_port_ops, base_ops, sizeof univ8250_port_ops);
+       univ8250_rsa_support(&univ8250_port_ops);
++      pax_close_kernel();
+       if (share_irqs)
+               irqflag = IRQF_SHARED;
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index bc51b32..f947b5b 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -5795,7 +5795,7 @@ static struct pci_device_id serial_pci_tbl[] = {
+ };
+ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
+-                                              pci_channel_state_t state)
++                                              enum pci_channel_state state)
+ {
+       struct serial_private *priv = pci_get_drvdata(dev);
+diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
+index e5c42fe..f091b02 100644
+--- a/drivers/tty/serial/ioc4_serial.c
++++ b/drivers/tty/serial/ioc4_serial.c
+@@ -437,7 +437,7 @@ struct ioc4_soft {
+               } is_intr_info[MAX_IOC4_INTR_ENTS];
+               /* Number of entries active in the above array */
+-              atomic_t is_num_intrs;
++              atomic_unchecked_t is_num_intrs;
+       } is_intr_type[IOC4_NUM_INTR_TYPES];
+       /* is_ir_lock must be held while
+@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
+       BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
+              || (type == IOC4_OTHER_INTR_TYPE)));
+-      i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
++      i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
+       BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
+       /* Save off the lower level interrupt handler */
+@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
+       soft = arg;
+       for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
+-              num_intrs = (int)atomic_read(
++              num_intrs = (int)atomic_read_unchecked(
+                               &soft->is_intr_type[intr_type].is_num_intrs);
+               this_mir = this_ir = pending_intrs(soft, intr_type);
+diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
+index a119f11..120444e 100644
+--- a/drivers/tty/serial/jsm/jsm_driver.c
++++ b/drivers/tty/serial/jsm/jsm_driver.c
+@@ -336,7 +336,7 @@ static struct pci_driver jsm_driver = {
+ };
+ static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev,
+-                                      pci_channel_state_t state)
++                                      enum pci_channel_state state)
+ {
+       struct jsm_board *brd = pci_get_drvdata(pdev);
+diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
+index 117df15..8f7486f 100644
+--- a/drivers/tty/serial/kgdb_nmi.c
++++ b/drivers/tty/serial/kgdb_nmi.c
+@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
+        * I/O utilities that messages sent to the console will automatically
+        * be displayed on the dbg_io.
+        */
+-      dbg_io_ops->is_console = true;
++      pax_open_kernel();
++      const_cast(dbg_io_ops->is_console) = true;
++      pax_close_kernel();
+       return 0;
+ }
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index a260cde..604fce9 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -24,8 +24,9 @@
+ #define MAX_CONFIG_LEN                40
+ static struct kgdb_io         kgdboc_io_ops;
++static struct kgdb_io         kgdboc_io_ops_console;
+-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
++/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
+ static int configured         = -1;
+ static char config[MAX_CONFIG_LEN];
+@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
+       kgdboc_unregister_kbd();
+       if (configured == 1)
+               kgdb_unregister_io_module(&kgdboc_io_ops);
++      else if (configured == 2)
++              kgdb_unregister_io_module(&kgdboc_io_ops_console);
+ }
+ static int configure_kgdboc(void)
+@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
+       int err;
+       char *cptr = config;
+       struct console *cons;
++      int is_console = 0;
+       err = kgdboc_option_setup(config);
+       if (err || !strlen(config) || isspace(config[0]))
+               goto noconfig;
+       err = -ENODEV;
+-      kgdboc_io_ops.is_console = 0;
+       kgdb_tty_driver = NULL;
+       kgdboc_use_kms = 0;
+@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
+               int idx;
+               if (cons->device && cons->device(cons, &idx) == p &&
+                   idx == tty_line) {
+-                      kgdboc_io_ops.is_console = 1;
++                      is_console = 1;
+                       break;
+               }
+               cons = cons->next;
+@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
+       kgdb_tty_line = tty_line;
+ do_register:
+-      err = kgdb_register_io_module(&kgdboc_io_ops);
++      if (is_console) {
++              err = kgdb_register_io_module(&kgdboc_io_ops_console);
++              configured = 2;
++      } else {
++              err = kgdb_register_io_module(&kgdboc_io_ops);
++              configured = 1;
++      }
+       if (err)
+               goto noconfig;
+@@ -205,8 +214,6 @@ do_register:
+       if (err)
+               goto nmi_con_failed;
+-      configured = 1;
+-
+       return 0;
+ nmi_con_failed:
+@@ -223,7 +230,7 @@ noconfig:
+ static int __init init_kgdboc(void)
+ {
+       /* Already configured? */
+-      if (configured == 1)
++      if (configured >= 1)
+               return 0;
+       return configure_kgdboc();
+@@ -245,7 +252,7 @@ static void kgdboc_put_char(u8 chr)
+                                       kgdb_tty_line, chr);
+ }
+-static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
++static int param_set_kgdboc_var(const char *kmessage, const struct kernel_param *kp)
+ {
+       int len = strlen(kmessage);
+@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+       if (config[len - 1] == '\n')
+               config[len - 1] = '\0';
+-      if (configured == 1)
++      if (configured >= 1)
+               cleanup_kgdboc();
+       /* Go and configure with the new params. */
+@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
+       .post_exception         = kgdboc_post_exp_handler,
+ };
++static struct kgdb_io kgdboc_io_ops_console = {
++      .name                   = "kgdboc",
++      .read_char              = kgdboc_get_char,
++      .write_char             = kgdboc_put_char,
++      .pre_exception          = kgdboc_pre_exp_handler,
++      .post_exception         = kgdboc_post_exp_handler,
++      .is_console             = 1
++};
++
+ #ifdef CONFIG_KGDB_SERIAL_CONSOLE
+ /* This is only available if kgdboc is a built in for early debugging */
+ static int __init kgdboc_early_init(char *opt)
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index 7312e7e..0a0f8b6 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -1726,7 +1726,7 @@ static struct uart_driver msm_uart_driver = {
+       .cons = MSM_CONSOLE,
+ };
+-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
++static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
+ static const struct of_device_id msm_uartdm_table[] = {
+       { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
+@@ -1750,7 +1750,7 @@ static int msm_serial_probe(struct platform_device *pdev)
+               line = pdev->id;
+       if (line < 0)
+-              line = atomic_inc_return(&msm_uart_next_id) - 1;
++              line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
+       if (unlikely(line < 0 || line >= UART_NR))
+               return -ENXIO;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index ae2095a..a3cec83 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -976,11 +976,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
+       ourport->tx_in_progress = 0;
+ }
++static int s3c64xx_serial_startup(struct uart_port *port);
+ static int s3c24xx_serial_startup(struct uart_port *port)
+ {
+       struct s3c24xx_uart_port *ourport = to_ourport(port);
+       int ret;
++      /* Startup sequence is different for s3c64xx and higher SoC's */
++      if (s3c24xx_serial_has_interrupt_mask(port))
++              return s3c64xx_serial_startup(port);
++
+       dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
+           port, (unsigned long long)port->mapbase, port->membase);
+@@ -1687,10 +1692,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+       /* setup info for port */
+       port->dev       = &platdev->dev;
+-      /* Startup sequence is different for s3c64xx and higher SoC's */
+-      if (s3c24xx_serial_has_interrupt_mask(port))
+-              s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
+-
+       port->uartclk = 1;
+       if (cfg->uart_flags & UPF_CONS_FLOW) {
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 9fc1533..01c5972 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1473,7 +1473,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
+               state = drv->state + tty->index;
+               port = &state->port;
+               spin_lock_irq(&port->lock);
+-              --port->count;
++              atomic_dec(&port->count);
+               spin_unlock_irq(&port->lock);
+               return;
+       }
+@@ -1614,7 +1614,7 @@ static void uart_hangup(struct tty_struct *tty)
+               uart_flush_buffer(tty);
+               uart_shutdown(tty, state);
+               spin_lock_irqsave(&port->lock, flags);
+-              port->count = 0;
++              atomic_set(&port->count, 0);
+               spin_unlock_irqrestore(&port->lock, flags);
+               tty_port_set_active(port, 0);
+               tty_port_tty_set(port, NULL);
+@@ -1717,7 +1717,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
+       pr_debug("uart_open(%d) called\n", line);
+       spin_lock_irq(&port->lock);
+-      ++port->count;
++      atomic_inc(&port->count);
+       spin_unlock_irq(&port->lock);
+       /*
+diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
+index c13e27e..335a512 100644
+--- a/drivers/tty/synclink.c
++++ b/drivers/tty/synclink.c
+@@ -3075,7 +3075,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
+       
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
+-                       __FILE__,__LINE__, info->device_name, info->port.count);
++                       __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
+       if (tty_port_close_start(&info->port, tty, filp) == 0)
+               goto cleanup;
+@@ -3093,7 +3093,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
+ cleanup:                      
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+-                      tty->driver->name, info->port.count);
++                      tty->driver->name, atomic_read(&info->port.count));
+                       
+ }     /* end of mgsl_close() */
+@@ -3192,8 +3192,8 @@ static void mgsl_hangup(struct tty_struct *tty)
+       mgsl_flush_buffer(tty);
+       shutdown(info);
+-      
+-      info->port.count = 0;   
++
++      atomic_set(&info->port.count, 0);
+       tty_port_set_active(&info->port, 0);
+       info->port.tty = NULL;
+@@ -3281,10 +3281,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
+       
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):block_til_ready before block on %s count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+       spin_lock_irqsave(&info->irq_spinlock, flags);
+-      port->count--;
++      atomic_dec(&port->count);
+       spin_unlock_irqrestore(&info->irq_spinlock, flags);
+       port->blocked_open++;
+@@ -3311,7 +3311,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
+               
+               if (debug_level >= DEBUG_LEVEL_INFO)
+                       printk("%s(%d):block_til_ready blocking on %s count=%d\n",
+-                               __FILE__,__LINE__, tty->driver->name, port->count );
++                               __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+                                
+               tty_unlock(tty);
+               schedule();
+@@ -3323,12 +3323,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
+       
+       /* FIXME: Racy on hangup during close wait */
+       if (!tty_hung_up_p(filp))
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+                        
+       if (!retval)
+               tty_port_set_active(port, 1);
+@@ -3380,7 +3380,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
+               
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
+-                       __FILE__,__LINE__,tty->driver->name, info->port.count);
++                       __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
+       info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -3390,10 +3390,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
+               spin_unlock_irqrestore(&info->netlock, flags);
+               goto cleanup;
+       }
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (info->port.count == 1) {
++      if (atomic_read(&info->port.count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info);
+               if (retval < 0)
+@@ -3417,8 +3417,8 @@ cleanup:
+       if (retval) {
+               if (tty->count == 1)
+                       info->port.tty = NULL; /* tty layer will release tty struct */
+-              if(info->port.count)
+-                      info->port.count--;
++              if (atomic_read(&info->port.count))
++                      atomic_dec(&info->port.count);
+       }
+       
+       return retval;
+@@ -7637,7 +7637,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       switch (encoding)
+@@ -7733,7 +7733,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -7819,7 +7819,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
+index 7aca2d4..45a7121 100644
+--- a/drivers/tty/synclink_gt.c
++++ b/drivers/tty/synclink_gt.c
+@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
+       tty->driver_data = info;
+       info->port.tty = tty;
+-      DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
++      DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
+       mutex_lock(&info->port.mutex);
+       info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -682,10 +682,10 @@ static int open(struct tty_struct *tty, struct file *filp)
+               mutex_unlock(&info->port.mutex);
+               goto cleanup;
+       }
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (info->port.count == 1) {
++      if (atomic_read(&info->port.count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info);
+               if (retval < 0) {
+@@ -706,8 +706,8 @@ cleanup:
+       if (retval) {
+               if (tty->count == 1)
+                       info->port.tty = NULL; /* tty layer will release tty struct */
+-              if(info->port.count)
+-                      info->port.count--;
++              if(atomic_read(&info->port.count))
++                      atomic_dec(&info->port.count);
+       }
+       DBGINFO(("%s open rc=%d\n", info->device_name, retval));
+@@ -720,7 +720,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+       if (sanity_check(info, tty->name, "close"))
+               return;
+-      DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
++      DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
+       if (tty_port_close_start(&info->port, tty, filp) == 0)
+               goto cleanup;
+@@ -737,7 +737,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+       tty_port_close_end(&info->port, tty);
+       info->port.tty = NULL;
+ cleanup:
+-      DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
++      DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
+ }
+ static void hangup(struct tty_struct *tty)
+@@ -755,7 +755,7 @@ static void hangup(struct tty_struct *tty)
+       shutdown(info);
+       spin_lock_irqsave(&info->port.lock, flags);
+-      info->port.count = 0;
++      atomic_set(&info->port.count, 0);
+       info->port.tty = NULL;
+       spin_unlock_irqrestore(&info->port.lock, flags);
+       tty_port_set_active(&info->port, 0);
+@@ -1435,7 +1435,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       DBGINFO(("%s hdlcdev_attach\n", info->device_name));
+@@ -1531,7 +1531,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               DBGINFO(("%s hdlc_open busy\n", dev->name));
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -1616,7 +1616,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+       DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+@@ -2403,7 +2403,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
+               if (port == NULL)
+                       continue;
+               spin_lock(&port->lock);
+-              if ((port->port.count || port->netcount) &&
++              if ((atomic_read(&port->port.count) || port->netcount) &&
+                   port->pending_bh && !port->bh_running &&
+                   !port->bh_requested) {
+                       DBGISR(("%s bh queued\n", port->device_name));
+@@ -3282,7 +3282,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       add_wait_queue(&port->open_wait, &wait);
+       spin_lock_irqsave(&info->lock, flags);
+-      port->count--;
++      atomic_dec(&port->count);
+       spin_unlock_irqrestore(&info->lock, flags);
+       port->blocked_open++;
+@@ -3317,7 +3317,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       remove_wait_queue(&port->open_wait, &wait);
+       if (!tty_hung_up_p(filp))
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       if (!retval)
+diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
+index dec1565..bbf9fcc 100644
+--- a/drivers/tty/synclinkmp.c
++++ b/drivers/tty/synclinkmp.c
+@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s open(), old ref count = %d\n",
+-                       __FILE__,__LINE__,tty->driver->name, info->port.count);
++                       __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
+       info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -760,10 +760,10 @@ static int open(struct tty_struct *tty, struct file *filp)
+               spin_unlock_irqrestore(&info->netlock, flags);
+               goto cleanup;
+       }
+-      info->port.count++;
++      atomic_inc(&info->port.count);
+       spin_unlock_irqrestore(&info->netlock, flags);
+-      if (info->port.count == 1) {
++      if (atomic_read(&info->port.count) == 1) {
+               /* 1st open on this device, init hardware */
+               retval = startup(info);
+               if (retval < 0)
+@@ -787,8 +787,8 @@ cleanup:
+       if (retval) {
+               if (tty->count == 1)
+                       info->port.tty = NULL; /* tty layer will release tty struct */
+-              if(info->port.count)
+-                      info->port.count--;
++              if(atomic_read(&info->port.count))
++                      atomic_dec(&info->port.count);
+       }
+       return retval;
+@@ -806,7 +806,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s close() entry, count=%d\n",
+-                       __FILE__,__LINE__, info->device_name, info->port.count);
++                       __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
+       if (tty_port_close_start(&info->port, tty, filp) == 0)
+               goto cleanup;
+@@ -825,7 +825,7 @@ static void close(struct tty_struct *tty, struct file *filp)
+ cleanup:
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
+-                      tty->driver->name, info->port.count);
++                      tty->driver->name, atomic_read(&info->port.count));
+ }
+ /* Called by tty_hangup() when a hangup is signaled.
+@@ -848,7 +848,7 @@ static void hangup(struct tty_struct *tty)
+       shutdown(info);
+       spin_lock_irqsave(&info->port.lock, flags);
+-      info->port.count = 0;
++      atomic_set(&info->port.count, 0);
+       info->port.tty = NULL;
+       spin_unlock_irqrestore(&info->port.lock, flags);
+       tty_port_set_active(&info->port, 1);
+@@ -1551,7 +1551,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+       unsigned short new_crctype;
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       switch (encoding)
+@@ -1647,7 +1647,7 @@ static int hdlcdev_open(struct net_device *dev)
+       /* arbitrate between network and tty opens */
+       spin_lock_irqsave(&info->netlock, flags);
+-      if (info->port.count != 0 || info->netcount != 0) {
++      if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
+               printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+               spin_unlock_irqrestore(&info->netlock, flags);
+               return -EBUSY;
+@@ -1733,7 +1733,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+               printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+       /* return error if TTY interface open */
+-      if (info->port.count)
++      if (atomic_read(&info->port.count))
+               return -EBUSY;
+       if (cmd != SIOCWANDEV)
+@@ -2610,7 +2610,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
+                * do not request bottom half processing if the
+                * device is not open in a normal mode.
+                */
+-              if ( port && (port->port.count || port->netcount) &&
++              if ( port && (atomic_read(&port->port.count) || port->netcount) &&
+                    port->pending_bh && !port->bh_running &&
+                    !port->bh_requested ) {
+                       if ( debug_level >= DEBUG_LEVEL_ISR )
+@@ -3300,10 +3300,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s block_til_ready() before block, count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+       spin_lock_irqsave(&info->lock, flags);
+-      port->count--;
++      atomic_dec(&port->count);
+       spin_unlock_irqrestore(&info->lock, flags);
+       port->blocked_open++;
+@@ -3330,7 +3330,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+               if (debug_level >= DEBUG_LEVEL_INFO)
+                       printk("%s(%d):%s block_til_ready() count=%d\n",
+-                               __FILE__,__LINE__, tty->driver->name, port->count );
++                               __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+               tty_unlock(tty);
+               schedule();
+@@ -3340,12 +3340,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(&port->open_wait, &wait);
+       if (!tty_hung_up_p(filp))
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       if (debug_level >= DEBUG_LEVEL_INFO)
+               printk("%s(%d):%s block_til_ready() after, count=%d\n",
+-                       __FILE__,__LINE__, tty->driver->name, port->count );
++                       __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
+       if (!retval)
+               tty_port_set_active(port, 1);
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 52bbd27..7846d42 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -1090,7 +1090,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
+ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
+                                  size_t count, loff_t *ppos)
+ {
+-      if (count) {
++      if (count && capable(CAP_SYS_ADMIN)) {
+               char c;
+               if (get_user(c, buf))
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 734a635..0518bb7 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -105,6 +105,8 @@
+ #include <linux/kmod.h>
+ #include <linux/nsproxy.h>
++#include <linux/grsecurity.h>
++
+ #undef TTY_DEBUG_HANGUP
+ #ifdef TTY_DEBUG_HANGUP
+ # define tty_debug_hangup(tty, f, args...)    tty_debug(tty, f, ##args)
+@@ -2286,6 +2288,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
+       char ch, mbz = 0;
+       struct tty_ldisc *ld;
++      if (gr_handle_tiocsti(tty))
++              return -EPERM;
+       if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (get_user(ch, p))
+@@ -3560,7 +3564,7 @@ EXPORT_SYMBOL(tty_devnum);
+ void tty_default_fops(struct file_operations *fops)
+ {
+-      *fops = tty_fops;
++      memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
+ }
+ /*
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 68947f6..1f85fef2 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -68,7 +68,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
+       raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
+       tty_ldiscs[disc] = new_ldisc;
+       new_ldisc->num = disc;
+-      new_ldisc->refcount = 0;
++      atomic_set(&new_ldisc->refcount, 0);
+       raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
+       return ret;
+@@ -96,7 +96,7 @@ int tty_unregister_ldisc(int disc)
+               return -EINVAL;
+       raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
+-      if (tty_ldiscs[disc]->refcount)
++      if (atomic_read(&tty_ldiscs[disc]->refcount))
+               ret = -EBUSY;
+       else
+               tty_ldiscs[disc] = NULL;
+@@ -117,7 +117,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
+       if (ldops) {
+               ret = ERR_PTR(-EAGAIN);
+               if (try_module_get(ldops->owner)) {
+-                      ldops->refcount++;
++                      atomic_inc(&ldops->refcount);
+                       ret = ldops;
+               }
+       }
+@@ -130,7 +130,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
+       unsigned long flags;
+       raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
+-      ldops->refcount--;
++      atomic_dec(&ldops->refcount);
+       module_put(ldops->owner);
+       raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
+ }
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index c3f9d93..f81070c 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
+       unsigned long flags;
+       spin_lock_irqsave(&port->lock, flags);
+-      port->count = 0;
++      atomic_set(&port->count, 0);
+       tty = port->tty;
+       if (tty)
+               set_bit(TTY_IO_ERROR, &tty->flags);
+@@ -388,7 +388,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+       /* The port lock protects the port counts */
+       spin_lock_irqsave(&port->lock, flags);
+-      port->count--;
++      atomic_dec(&port->count);
+       port->blocked_open++;
+       spin_unlock_irqrestore(&port->lock, flags);
+@@ -429,7 +429,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+          we must not mess that up further */
+       spin_lock_irqsave(&port->lock, flags);
+       if (!tty_hung_up_p(filp))
+-              port->count++;
++              atomic_inc(&port->count);
+       port->blocked_open--;
+       spin_unlock_irqrestore(&port->lock, flags);
+       if (retval == 0)
+@@ -462,18 +462,18 @@ int tty_port_close_start(struct tty_port *port,
+               return 0;
+       spin_lock_irqsave(&port->lock, flags);
+-      if (tty->count == 1 && port->count != 1) {
++      if (tty->count == 1 && atomic_read(&port->count) != 1) {
+               tty_warn(tty, "%s: tty->count = 1 port count = %d\n", __func__,
+-                       port->count);
+-              port->count = 1;
++                       atomic_read(&port->count));
++              atomic_set(&port->count, 1);
+       }
+-      if (--port->count < 0) {
++      if (atomic_dec_return(&port->count) < 0) {
+               tty_warn(tty, "%s: bad port count (%d)\n", __func__,
+-                       port->count);
+-              port->count = 0;
++                       atomic_read(&port->count));
++              atomic_set(&port->count, 0);
+       }
+-      if (port->count) {
++      if (atomic_read(&port->count)) {
+               spin_unlock_irqrestore(&port->lock, flags);
+               return 0;
+       }
+@@ -567,7 +567,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+                                                       struct file *filp)
+ {
+       spin_lock_irq(&port->lock);
+-      ++port->count;
++      atomic_inc(&port->count);
+       spin_unlock_irq(&port->lock);
+       tty_port_tty_set(port, tty);
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index 0f8caae..07939b5 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -630,6 +630,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
+            kbd->kbdmode == VC_OFF) &&
+            value != KVAL(K_SAK))
+               return;         /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++      {
++              void *func = fn_handler[value];
++              if (func == fn_show_state || func == fn_show_ptregs ||
++                  func == fn_show_mem)
++                      return;
++      }
++#endif
++
+       fn_handler[value](vc);
+ }
+@@ -1858,9 +1868,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
+       if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+               return -EFAULT;
+-      if (!capable(CAP_SYS_TTY_CONFIG))
+-              perm = 0;
+-
+       switch (cmd) {
+       case KDGKBENT:
+               /* Ensure another thread doesn't free it under us */
+@@ -1875,6 +1882,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
+               spin_unlock_irqrestore(&kbd_event_lock, flags);
+               return put_user(val, &user_kbe->kb_value);
+       case KDSKBENT:
++              if (!capable(CAP_SYS_TTY_CONFIG))
++                      perm = 0;
++
+               if (!perm)
+                       return -EPERM;
+               if (!i && v == K_NOSUCHMAP) {
+@@ -1965,9 +1975,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+       int i, j, k;
+       int ret;
+-      if (!capable(CAP_SYS_TTY_CONFIG))
+-              perm = 0;
+-
+       kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
+       if (!kbs) {
+               ret = -ENOMEM;
+@@ -2001,6 +2008,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+               kfree(kbs);
+               return ((p && *p) ? -EOVERFLOW : 0);
+       case KDSKBSENT:
++              if (!capable(CAP_SYS_TTY_CONFIG))
++                      perm = 0;
++
+               if (!perm) {
+                       ret = -EPERM;
+                       goto reterr;
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index fba021f..977a54e 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -25,6 +25,7 @@
+ #include <linux/kobject.h>
+ #include <linux/cdev.h>
+ #include <linux/uio_driver.h>
++#include <asm/local.h>
+ #define UIO_MAX_DEVICES               (1U << MINORBITS)
+@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+ {
+       struct uio_device *idev = dev_get_drvdata(dev);
+-      return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
++      return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
+ }
+ static DEVICE_ATTR_RO(event);
+@@ -401,7 +402,7 @@ void uio_event_notify(struct uio_info *info)
+ {
+       struct uio_device *idev = info->uio_dev;
+-      atomic_inc(&idev->event);
++      atomic_inc_unchecked(&idev->event);
+       wake_up_interruptible(&idev->wait);
+       kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
+ }
+@@ -454,7 +455,7 @@ static int uio_open(struct inode *inode, struct file *filep)
+       }
+       listener->dev = idev;
+-      listener->event_count = atomic_read(&idev->event);
++      listener->event_count = atomic_read_unchecked(&idev->event);
+       filep->private_data = listener;
+       if (idev->info->open) {
+@@ -505,7 +506,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
+               return -EIO;
+       poll_wait(filep, &idev->wait, wait);
+-      if (listener->event_count != atomic_read(&idev->event))
++      if (listener->event_count != atomic_read_unchecked(&idev->event))
+               return POLLIN | POLLRDNORM;
+       return 0;
+ }
+@@ -530,7 +531,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
+       do {
+               set_current_state(TASK_INTERRUPTIBLE);
+-              event_count = atomic_read(&idev->event);
++              event_count = atomic_read_unchecked(&idev->event);
+               if (event_count != listener->event_count) {
+                       __set_current_state(TASK_RUNNING);
+                       if (copy_to_user(buf, &event_count, count))
+@@ -588,9 +589,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
+ static int uio_find_mem_index(struct vm_area_struct *vma)
+ {
+       struct uio_device *idev = vma->vm_private_data;
++      unsigned long size;
+       if (vma->vm_pgoff < MAX_UIO_MAPS) {
+-              if (idev->info->mem[vma->vm_pgoff].size == 0)
++              size = idev->info->mem[vma->vm_pgoff].size;
++              if (size == 0)
++                      return -1;
++              if (vma->vm_end - vma->vm_start > size)
+                       return -1;
+               return (int)vma->vm_pgoff;
+       }
+@@ -822,7 +827,7 @@ int __uio_register_device(struct module *owner,
+       idev->owner = owner;
+       idev->info = info;
+       init_waitqueue_head(&idev->wait);
+-      atomic_set(&idev->event, 0);
++      atomic_set_unchecked(&idev->event, 0);
+       ret = uio_get_minor(idev);
+       if (ret)
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 0a866e9..e0c35aa 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -474,7 +474,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
+               ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+               if (ret < 2)
+                       return -EINVAL;
+-              if (index < 0 || index > 0x7f)
++              if (index > 0x7f)
+                       return -EINVAL;
+               if (tmp < 0 || tmp > len - pos)
+                       return -EINVAL;
+diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
+index db322d9..f0f4bc1 100644
+--- a/drivers/usb/atm/usbatm.c
++++ b/drivers/usb/atm/usbatm.c
+@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               if (printk_ratelimit())
+                       atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
+                               __func__, vpi, vci);
+-              atomic_inc(&vcc->stats->rx_err);
++              atomic_inc_unchecked(&vcc->stats->rx_err);
+               return;
+       }
+@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               if (length > ATM_MAX_AAL5_PDU) {
+                       atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
+                                 __func__, length, vcc);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto out;
+               }
+@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               if (sarb->len < pdu_length) {
+                       atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
+                                 __func__, pdu_length, sarb->len, vcc);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto out;
+               }
+               if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
+                       atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
+                                 __func__, vcc);
+-                      atomic_inc(&vcc->stats->rx_err);
++                      atomic_inc_unchecked(&vcc->stats->rx_err);
+                       goto out;
+               }
+@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+                       if (printk_ratelimit())
+                               atm_err(instance, "%s: no memory for skb (length: %u)!\n",
+                                       __func__, length);
+-                      atomic_inc(&vcc->stats->rx_drop);
++                      atomic_inc_unchecked(&vcc->stats->rx_drop);
+                       goto out;
+               }
+@@ -415,7 +415,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+               vcc->push(vcc, skb);
+-              atomic_inc(&vcc->stats->rx);
++              atomic_inc_unchecked(&vcc->stats->rx);
+       out:
+               skb_trim(sarb, 0);
+       }
+@@ -613,7 +613,7 @@ static void usbatm_tx_process(unsigned long data)
+                       struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
+                       usbatm_pop(vcc, skb);
+-                      atomic_inc(&vcc->stats->tx);
++                      atomic_inc_unchecked(&vcc->stats->tx);
+                       skb = skb_dequeue(&instance->sndqueue);
+               }
+@@ -757,11 +757,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
+       if (!left--)
+               return sprintf(page,
+                              "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+-                             atomic_read(&atm_dev->stats.aal5.tx),
+-                             atomic_read(&atm_dev->stats.aal5.tx_err),
+-                             atomic_read(&atm_dev->stats.aal5.rx),
+-                             atomic_read(&atm_dev->stats.aal5.rx_err),
+-                             atomic_read(&atm_dev->stats.aal5.rx_drop));
++                             atomic_read_unchecked(&atm_dev->stats.aal5.tx),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.rx),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
++                             atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
+       if (!left--) {
+               if (instance->disconnected)
+diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
+index ef04b50..7582d99 100644
+--- a/drivers/usb/core/devices.c
++++ b/drivers/usb/core/devices.c
+@@ -119,7 +119,7 @@ static const char format_endpt[] =
+  * time it gets called.
+  */
+ static struct device_connect_event {
+-      atomic_t count;
++      atomic_unchecked_t count;
+       wait_queue_head_t wait;
+ } device_event = {
+       .count = ATOMIC_INIT(1),
+@@ -157,7 +157,7 @@ static const struct class_info clas_info[] = {
+ void usbfs_conn_disc_event(void)
+ {
+-      atomic_add(2, &device_event.count);
++      atomic_add_unchecked(2, &device_event.count);
+       wake_up(&device_event.wait);
+ }
+@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
+       poll_wait(file, &device_event.wait, wait);
+-      event_count = atomic_read(&device_event.count);
++      event_count = atomic_read_unchecked(&device_event.count);
+       if (file->f_version != event_count) {
+               file->f_version = event_count;
+               return POLLIN | POLLRDNORM;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 09c8d9c..14ee687 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -290,7 +290,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
+       struct usb_dev_state *ps = file->private_data;
+       struct usb_device *dev = ps->dev;
+       ssize_t ret = 0;
+-      unsigned len;
++      size_t len;
+       loff_t pos;
+       int i;
+@@ -332,22 +332,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
+       for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
+               struct usb_config_descriptor *config =
+                       (struct usb_config_descriptor *)dev->rawdescriptors[i];
+-              unsigned int length = le16_to_cpu(config->wTotalLength);
++              size_t length = le16_to_cpu(config->wTotalLength);
+               if (*ppos < pos + length) {
+                       /* The descriptor may claim to be longer than it
+                        * really is.  Here is the actual allocated length. */
+-                      unsigned alloclen =
++                      size_t alloclen =
+                               le16_to_cpu(dev->config[i].desc.wTotalLength);
+-                      len = length - (*ppos - pos);
++                      len = length + pos - *ppos;
+                       if (len > nbytes)
+                               len = nbytes;
+                       /* Simply don't write (skip over) unallocated parts */
+                       if (alloclen > (*ppos - pos)) {
+-                              alloclen -= (*ppos - pos);
++                              alloclen = alloclen + pos - *ppos;
+                               if (copy_to_user(buf,
+                                   dev->rawdescriptors[i] + (*ppos - pos),
+                                   min(len, alloclen))) {
+@@ -1682,7 +1682,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+               }
+       }
+       as->urb->dev = ps->dev;
+-      as->urb->pipe = (uurb->type << 30) |
++      as->urb->pipe = ((unsigned int)uurb->type << 30) |
+                       __create_pipe(ps->dev, uurb->endpoint & 0xf) |
+                       (uurb->endpoint & USB_DIR_IN);
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index d2e3f65..e389998 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1630,7 +1630,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+        */
+       usb_get_urb(urb);
+       atomic_inc(&urb->use_count);
+-      atomic_inc(&urb->dev->urbnum);
++      atomic_inc_unchecked(&urb->dev->urbnum);
+       usbmon_urb_submit(&hcd->self, urb);
+       /* NOTE requirements on root-hub callers (usbfs and the hub
+@@ -1657,7 +1657,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+               urb->hcpriv = NULL;
+               INIT_LIST_HEAD(&urb->urb_list);
+               atomic_dec(&urb->use_count);
+-              atomic_dec(&urb->dev->urbnum);
++              atomic_dec_unchecked(&urb->dev->urbnum);
+               if (atomic_read(&urb->reject))
+                       wake_up(&usb_kill_urb_queue);
+               usb_put_urb(urb);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 1d5fc32..7dc3bd4 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -26,6 +26,7 @@
+ #include <linux/mutex.h>
+ #include <linux/random.h>
+ #include <linux/pm_qos.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+@@ -4785,6 +4786,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+                       goto done;
+               return;
+       }
++
++      if (gr_handle_new_usb())
++              goto done;
++
+       if (hub_is_superspeed(hub->hdev))
+               unit_load = 150;
+       else
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index c953a0f..54c64f4 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -259,7 +259,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
+       struct usb_device *udev;
+       udev = to_usb_device(dev);
+-      return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
++      return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
+ }
+ static DEVICE_ATTR_RO(urbnum);
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 5e80697..1e91073 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -444,7 +444,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+       set_dev_node(&dev->dev, dev_to_node(bus->controller));
+       dev->state = USB_STATE_ATTACHED;
+       dev->lpm_disable_count = 1;
+-      atomic_set(&dev->urbnum, 0);
++      atomic_set_unchecked(&dev->urbnum, 0);
+       INIT_LIST_HEAD(&dev->ep0.urb_list);
+       dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
+diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
+index 12731e6..0391d02 100644
+--- a/drivers/usb/early/ehci-dbgp.c
++++ b/drivers/usb/early/ehci-dbgp.c
+@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
+ #ifdef CONFIG_KGDB
+ static struct kgdb_io kgdbdbgp_io_ops;
+-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
++static struct kgdb_io kgdbdbgp_io_ops_console;
++#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
+ #else
+ #define dbgp_kgdb_mode (0)
+ #endif
+@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
+       .write_char = kgdbdbgp_write_char,
+ };
++static struct kgdb_io kgdbdbgp_io_ops_console = {
++      .name = "kgdbdbgp",
++      .read_char = kgdbdbgp_read_char,
++      .write_char = kgdbdbgp_write_char,
++      .is_console = 1
++};
++
+ static int kgdbdbgp_wait_time;
+ static int __init kgdbdbgp_parse_config(char *str)
+@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
+               ptr++;
+               kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+       }
+-      kgdb_register_io_module(&kgdbdbgp_io_ops);
+-      kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
++      if (early_dbgp_console.index != -1)
++              kgdb_register_io_module(&kgdbdbgp_io_ops_console);
++      else
++              kgdb_register_io_module(&kgdbdbgp_io_ops);
+       return 0;
+ }
+diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
+index 0473d61..5e9caa5 100644
+--- a/drivers/usb/gadget/function/f_phonet.c
++++ b/drivers/usb/gadget/function/f_phonet.c
+@@ -223,7 +223,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
+       netif_wake_queue(dev);
+ }
+-static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct phonet_port *port = netdev_priv(dev);
+       struct f_phonet *fp;
+diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
+index f2ac0cb..4038262 100644
+--- a/drivers/usb/gadget/function/f_uac1.c
++++ b/drivers/usb/gadget/function/f_uac1.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/atomic.h>
++#include <linux/module.h>
+ #include "u_uac1.h"
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index e0cd1e4..0a41c55 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -752,9 +752,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
+                       spin_lock_irq(&port->port_lock);
+                       /* already open?  Great. */
+-                      if (port->port.count) {
++                      if (atomic_read(&port->port.count)) {
+                               status = 0;
+-                              port->port.count++;
++                              atomic_inc(&port->port.count);
+                       /* currently opening/closing? wait ... */
+                       } else if (port->openclose) {
+@@ -813,7 +813,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
+       tty->driver_data = port;
+       port->port.tty = tty;
+-      port->port.count = 1;
++      atomic_set(&port->port.count, 1);
+       port->openclose = false;
+       /* if connected, start the I/O stream */
+@@ -855,11 +855,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
+       spin_lock_irq(&port->port_lock);
+-      if (port->port.count != 1) {
+-              if (port->port.count == 0)
++      if (atomic_read(&port->port.count) != 1) {
++              if (atomic_read(&port->port.count) == 0)
+                       WARN_ON(1);
+               else
+-                      --port->port.count;
++                      atomic_dec(&port->port.count);
+               goto exit;
+       }
+@@ -869,7 +869,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
+        * and sleep if necessary
+        */
+       port->openclose = true;
+-      port->port.count = 0;
++      atomic_set(&port->port.count, 0);
+       gser = port->port_usb;
+       if (gser && gser->disconnect)
+@@ -1324,7 +1324,7 @@ static int gs_closed(struct gs_port *port)
+       int cond;
+       spin_lock_irq(&port->port_lock);
+-      cond = (port->port.count == 0) && !port->openclose;
++      cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
+       spin_unlock_irq(&port->port_lock);
+       return cond;
+ }
+@@ -1469,7 +1469,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
+       /* if it's already open, start I/O ... and notify the serial
+        * protocol about open/close status (connect/disconnect).
+        */
+-      if (port->port.count) {
++      if (atomic_read(&port->port.count)) {
+               pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
+               gs_start_io(port);
+               if (gser->connect)
+@@ -1516,7 +1516,7 @@ void gserial_disconnect(struct gserial *gser)
+       port->port_usb = NULL;
+       gser->ioport = NULL;
+-      if (port->port.count > 0 || port->openclose) {
++      if (atomic_read(&port->port.count) > 0 || port->openclose) {
+               wake_up_interruptible(&port->drain_wait);
+               if (port->port.tty)
+                       tty_hangup(port->port.tty);
+@@ -1529,7 +1529,7 @@ void gserial_disconnect(struct gserial *gser)
+       /* finally, free any unused/unusable I/O buffers */
+       spin_lock_irqsave(&port->port_lock, flags);
+-      if (port->port.count == 0 && !port->openclose)
++      if (atomic_read(&port->port.count) == 0 && !port->openclose)
+               gs_buf_free(&port->port_write_buf);
+       gs_free_requests(gser->out, &port->read_pool, NULL);
+       gs_free_requests(gser->out, &port->read_queue, NULL);
+diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
+index c78c841..48fd281 100644
+--- a/drivers/usb/gadget/function/u_uac1.c
++++ b/drivers/usb/gadget/function/u_uac1.c
+@@ -17,6 +17,7 @@
+ #include <linux/ctype.h>
+ #include <linux/random.h>
+ #include <linux/syscalls.h>
++#include <linux/module.h>
+ #include "u_uac1.h"
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 77d0790..d123802 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -2458,7 +2458,7 @@ static int dummy_setup(struct usb_hcd *hcd)
+       struct dummy *dum;
+       dum = *((void **)dev_get_platdata(hcd->self.controller));
+-      hcd->self.sg_tablesize = ~0;
++      hcd->self.sg_tablesize = SG_ALL;
+       if (usb_hcd_is_primary_hcd(hcd)) {
+               dum->hs_hcd = hcd_to_dummy_hcd(hcd);
+               dum->hs_hcd->dum = dum;
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 1e5f529..5832376 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -573,7 +573,7 @@ static int ehci_init(struct usb_hcd *hcd)
+       /* Accept arbitrarily long scatter-gather lists */
+       if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+-              hcd->self.sg_tablesize = ~0;
++              hcd->self.sg_tablesize = SG_ALL;
+       /* Prepare for unlinking active QHs */
+       ehci->old_current = ~0;
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 74f62d6..459983a 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -777,7 +777,7 @@ static struct urb *request_single_step_set_feature_urb(
+       urb->transfer_flags = URB_DIR_IN;
+       usb_get_urb(urb);
+       atomic_inc(&urb->use_count);
+-      atomic_inc(&urb->dev->urbnum);
++      atomic_inc_unchecked(&urb->dev->urbnum);
+       urb->setup_dma = dma_map_single(
+                       hcd->self.controller,
+                       urb->setup_packet,
+@@ -844,7 +844,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+       urb->status = -EINPROGRESS;
+       usb_get_urb(urb);
+       atomic_inc(&urb->use_count);
+-      atomic_inc(&urb->dev->urbnum);
++      atomic_inc_unchecked(&urb->dev->urbnum);
+       retval = submit_single_step_set_feature(hcd, urb, 0);
+       if (!retval && !wait_for_completion_timeout(&done,
+                                               msecs_to_jiffies(2000))) {
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index eca3710..eca7127 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -44,9 +44,9 @@
+ static int
+ qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
+-                size_t len, int token, int maxpacket)
++                size_t len, u32 token, int maxpacket)
+ {
+-      int     i, count;
++      u32     i, count;
+       u64     addr = buf;
+       /* one buffer entry per 4K ... first might be short or unaligned */
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 66efa9a..50b719d 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -5025,7 +5025,7 @@ static int hcd_fotg210_init(struct usb_hcd *hcd)
+       /* Accept arbitrarily long scatter-gather lists */
+       if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+-              hcd->self.sg_tablesize = ~0;
++              hcd->self.sg_tablesize = SG_ALL;
+       return 0;
+ }
+diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
+index 1db0626..2e9f5ea 100644
+--- a/drivers/usb/host/hwa-hc.c
++++ b/drivers/usb/host/hwa-hc.c
+@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
+       struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+       struct wahc *wa = &hwahc->wa;
+       struct device *dev = &wa->usb_iface->dev;
+-      u8 mas_le[UWB_NUM_MAS/8];
++      u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
++
++      if (mas_le == NULL)
++              return -ENOMEM;
+       /* Set the stream index */
+       result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
+                       WUSB_REQ_SET_WUSB_MAS,
+                       USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+-                      mas_le, 32, USB_CTRL_SET_TIMEOUT);
++                      mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
+       if (result < 0)
+               dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
+ out:
++      kfree(mas_le);
++
+       return result;
+ }
+@@ -812,7 +817,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
+               goto error_alloc;
+       }
+       usb_hcd->wireless = 1;
+-      usb_hcd->self.sg_tablesize = ~0;
++      usb_hcd->self.sg_tablesize = SG_ALL;
+       wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+       hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+       hwahc_init(hwahc);
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 1700908..3b49b2e 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -444,7 +444,7 @@ static int ohci_init (struct ohci_hcd *ohci)
+       struct usb_hcd *hcd = ohci_to_hcd(ohci);
+       /* Accept arbitrarily long scatter-gather lists */
+-      hcd->self.sg_tablesize = ~0;
++      hcd->self.sg_tablesize = SG_ALL;
+       if (distrust_firmware)
+               ohci->flags |= OHCI_QUIRK_HUB_POWER;
+diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
+index 672cea3..31a730db 100644
+--- a/drivers/usb/host/r8a66597.h
++++ b/drivers/usb/host/r8a66597.h
+@@ -125,7 +125,7 @@ struct r8a66597 {
+       unsigned short interval_map;
+       unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE];
+       unsigned char dma_map;
+-      unsigned int max_root_hub;
++      unsigned char max_root_hub;
+       struct list_head child_device;
+       unsigned long child_connect_map[4];
+diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
+index a7de8e8..e1ef134 100644
+--- a/drivers/usb/host/uhci-hcd.c
++++ b/drivers/usb/host/uhci-hcd.c
+@@ -570,7 +570,7 @@ static int uhci_start(struct usb_hcd *hcd)
+       hcd->uses_new_polling = 1;
+       /* Accept arbitrarily long scatter-gather lists */
+       if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+-              hcd->self.sg_tablesize = ~0;
++              hcd->self.sg_tablesize = SG_ALL;
+       spin_lock_init(&uhci->lock);
+       setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index d7b0f97..378d99d 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -32,7 +32,7 @@
+ #define SSIC_PORT_CFG2                0x880c
+ #define SSIC_PORT_CFG2_OFFSET 0x30
+ #define PROG_DONE             (1 << 30)
+-#define SSIC_PORT_UNUSED      (1 << 31)
++#define SSIC_PORT_UNUSED      (1U << 31)
+ /* Device for a quirk */
+ #define PCI_VENDOR_ID_FRESCO_LOGIC    0x1b73
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 797137e..b7be2b3 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1889,9 +1889,9 @@ td_cleanup:
+        * unsigned).  Play it safe and say we didn't transfer anything.
+        */
+       if (urb->actual_length > urb->transfer_buffer_length) {
+-              xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
++              xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, trans. len = %u\n",
+                       urb->transfer_buffer_length,
+-                      urb->actual_length);
++                      EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+               urb->actual_length = 0;
+               if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+                       *status = -EREMOTEIO;
+@@ -1970,10 +1970,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
+       case COMP_STOP:
+               /* Did we stop at data stage? */
+-              if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
+-                      td->urb->actual_length =
+-                              td->urb->transfer_buffer_length -
+-                              EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
++              if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) {
++                      if (td->urb->transfer_buffer_length >= EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)))
++                              td->urb->actual_length =
++                                      td->urb->transfer_buffer_length -
++                                      EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
++                      else
++                              td->urb->actual_length =
++                                      td->urb->transfer_buffer_length + 1;
++              }
+               /* fall through */
+       case COMP_STOP_INVAL:
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
+@@ -1987,12 +1992,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+               /* else fall through */
+       case COMP_STALL:
+               /* Did we transfer part of the data (middle) phase? */
+-              if (event_trb != ep_ring->dequeue &&
+-                              event_trb != td->last_trb)
+-                      td->urb->actual_length =
+-                              td->urb->transfer_buffer_length -
+-                              EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+-              else if (!td->urb_length_set)
++              if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) {
++                      if (td->urb->transfer_buffer_length >= EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)))
++                              td->urb->actual_length =
++                                      td->urb->transfer_buffer_length -
++                                      EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
++                      else
++                              td->urb->actual_length =
++                                      td->urb->transfer_buffer_length + 1;
++              } else if (!td->urb_length_set)
+                       td->urb->actual_length = 0;
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
+@@ -2025,9 +2033,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+                        * the last TRB.
+                        */
+                       td->urb_length_set = true;
+-                      td->urb->actual_length =
+-                              td->urb->transfer_buffer_length -
+-                              EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
++                      if (td->urb->transfer_buffer_length >= EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)))
++                              td->urb->actual_length =
++                                      td->urb->transfer_buffer_length -
++                                      EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
++                      else
++                              BUG();
+                       xhci_dbg(xhci, "Waiting for status "
+                                       "stage event\n");
+                       return 0;
+@@ -2222,11 +2233,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+       /* Fast path - was this the last TRB in the TD for this URB? */
+       } else if (event_trb == td->last_trb) {
+               if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+-                      td->urb->actual_length =
+-                              td->urb->transfer_buffer_length -
+-                              EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+-                      if (td->urb->transfer_buffer_length <
+-                                      td->urb->actual_length) {
++                      if (td->urb->transfer_buffer_length < EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))) {
+                               xhci_warn(xhci, "HC gave bad length "
+                                               "of %d bytes left\n",
+                                         EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+@@ -2235,7 +2242,10 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+                                       *status = -EREMOTEIO;
+                               else
+                                       *status = 0;
+-                      }
++                      } else
++                              td->urb->actual_length =
++                                      td->urb->transfer_buffer_length -
++                                      EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+                       /* Don't overwrite a previously set error code */
+                       if (*status == -EINPROGRESS) {
+                               if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 01d96c9..63270ff 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4838,7 +4838,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+       int                     retval;
+       /* Accept arbitrarily long scatter-gather lists */
+-      hcd->self.sg_tablesize = ~0;
++      hcd->self.sg_tablesize = SG_ALL;
+       /* support to build packet from discontinuous buffers */
+       hcd->self.no_sg_constraint = 1;
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index a0a3827..d7ec10b 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -84,7 +84,7 @@ struct appledisplay {
+       struct mutex sysfslock;         /* concurrent read and write */
+ };
+-static atomic_t count_displays = ATOMIC_INIT(0);
++static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
+ static struct workqueue_struct *wq;
+ static void appledisplay_complete(struct urb *urb)
+@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
+       /* Register backlight device */
+       snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
+-              atomic_inc_return(&count_displays) - 1);
++              atomic_inc_return_unchecked(&count_displays) - 1);
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.type = BACKLIGHT_RAW;
+       props.max_brightness = 0xff;
+diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
+index 460cebf..eb16bb4 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
++++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
+@@ -1368,29 +1368,77 @@ static void sisusbdummycon_init(struct vc_data *vc, int init)
+       vc_resize(vc, 80, 25);
+ }
+-static int sisusbdummycon_dummy(void)
++static void sisusb_con_deinit(struct vc_data *a)
+ {
+-    return 0;
+ }
+-#define SISUSBCONDUMMY        (void *)sisusbdummycon_dummy
++static void sisusb_con_clear(struct vc_data *a, int b, int c, int d, int e)
++{
++}
++
++static void sisusb_con_putc(struct vc_data *a, int b, int c, int d)
++{
++}
++
++static void sisusb_con_putcs(struct vc_data *a, const unsigned short *b, int c, int d, int e)
++{
++}
++
++static void sisusb_con_cursor(struct vc_data *a, int b)
++{
++}
++
++static int sisusb_con_scroll(struct vc_data *a, int b, int c, int d, int e)
++{
++      return 0;
++}
++
++static int sisusb_con_switch(struct vc_data *a)
++{
++      return 0;
++}
++
++static int sisusb_con_blank(struct vc_data *a, int b, int c)
++{
++      return 0;
++}
++
++static int sisusb_con_font_set(struct vc_data *a, struct console_font *b, unsigned c)
++{
++      return 0;
++}
++
++static int sisusb_con_font_get(struct vc_data *a, struct console_font *b)
++{
++      return 0;
++}
++
++static int sisusb_con_font_default(struct vc_data *a, struct console_font *b, char *c)
++{
++      return 0;
++}
++
++static int sisusb_con_font_copy(struct vc_data *a, int b)
++{
++      return 0;
++}
+ static const struct consw sisusb_dummy_con = {
+       .owner =                THIS_MODULE,
+       .con_startup =          sisusbdummycon_startup,
+       .con_init =             sisusbdummycon_init,
+-      .con_deinit =           SISUSBCONDUMMY,
+-      .con_clear =            SISUSBCONDUMMY,
+-      .con_putc =             SISUSBCONDUMMY,
+-      .con_putcs =            SISUSBCONDUMMY,
+-      .con_cursor =           SISUSBCONDUMMY,
+-      .con_scroll =           SISUSBCONDUMMY,
+-      .con_switch =           SISUSBCONDUMMY,
+-      .con_blank =            SISUSBCONDUMMY,
+-      .con_font_set =         SISUSBCONDUMMY,
+-      .con_font_get =         SISUSBCONDUMMY,
+-      .con_font_default =     SISUSBCONDUMMY,
+-      .con_font_copy =        SISUSBCONDUMMY,
++      .con_deinit =           sisusb_con_deinit,
++      .con_clear =            sisusb_con_clear,
++      .con_putc =             sisusb_con_putc,
++      .con_putcs =            sisusb_con_putcs,
++      .con_cursor =           sisusb_con_cursor,
++      .con_scroll =           sisusb_con_scroll,
++      .con_switch =           sisusb_con_switch,
++      .con_blank =            sisusb_con_blank,
++      .con_font_set =         sisusb_con_font_set,
++      .con_font_get =         sisusb_con_font_get,
++      .con_font_default =     sisusb_con_font_default,
++      .con_font_copy =        sisusb_con_font_copy,
+ };
+ int
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index 8967715..4a3791b 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
+       info->port = port;
+-      ++port->port.count;
++      atomic_inc(&port->port.count);
+       if (!tty_port_initialized(&port->port)) {
+               if (serial->type->set_termios) {
+                       /*
+@@ -172,7 +172,7 @@ static int usb_console_setup(struct console *co, char *options)
+       }
+       /* Now that any required fake tty operations are completed restore
+        * the tty port count */
+-      --port->port.count;
++      atomic_dec(&port->port.count);
+       /* The console is special in terms of closing the device so
+        * indicate this port is now acting as a system console. */
+       port->port.console = 1;
+@@ -184,7 +184,7 @@ static int usb_console_setup(struct console *co, char *options)
+       tty_port_tty_set(&port->port, NULL);
+       tty_kref_put(tty);
+  reset_open_count:
+-      port->port.count = 0;
++      atomic_set(&port->port.count, 0);
+       usb_autopm_put_interface(serial->interface);
+  error_get_interface:
+       usb_serial_put(serial);
+@@ -195,7 +195,7 @@ static int usb_console_setup(struct console *co, char *options)
+ static void usb_console_write(struct console *co,
+                                       const char *buf, unsigned count)
+ {
+-      static struct usbcons_info *info = &usbcons_info;
++      struct usbcons_info *info = &usbcons_info;
+       struct usb_serial_port *port = info->port;
+       struct usb_serial *serial;
+       int retval = -ENODEV;
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index ffd0867..eb28464 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -709,7 +709,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+       if (need_auto_sense) {
+               int temp_result;
+               struct scsi_eh_save ses;
+-              int sense_size = US_SENSE_SIZE;
++              unsigned int sense_size = US_SENSE_SIZE;
+               struct scsi_sense_hdr sshdr;
+               const u8 *scdd;
+               u8 fm_ili;
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 8c5f011..05e59a2 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -942,7 +942,7 @@ static void usb_stor_scan_dwork(struct work_struct *work)
+       clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
+ }
+-static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
++static unsigned short usb_stor_sg_tablesize(struct usb_interface *intf)
+ {
+       struct usb_device *usb_dev = interface_to_usbdev(intf);
+diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
+index 8fae28b..8b4bfec 100644
+--- a/drivers/usb/storage/usb.h
++++ b/drivers/usb/storage/usb.h
+@@ -64,7 +64,7 @@ struct us_unusual_dev {
+       __u8  useProtocol;
+       __u8  useTransport;
+       int (*initFunction)(struct us_data *);
+-};
++} __do_const;
+ /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
+diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
+index a863a98..d272795 100644
+--- a/drivers/usb/usbip/vhci.h
++++ b/drivers/usb/usbip/vhci.h
+@@ -83,7 +83,7 @@ struct vhci_hcd {
+       unsigned resuming:1;
+       unsigned long re_timeout;
+-      atomic_t seqnum;
++      atomic_unchecked_t seqnum;
+       /*
+        * NOTE:
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 2e0450b..6ebf0f6 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -447,7 +447,7 @@ static void vhci_tx_urb(struct urb *urb)
+       spin_lock_irqsave(&vdev->priv_lock, flags);
+-      priv->seqnum = atomic_inc_return(&the_controller->seqnum);
++      priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+       if (priv->seqnum == 0xffff)
+               dev_info(&urb->dev->dev, "seqnum max\n");
+@@ -696,7 +696,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+                       return -ENOMEM;
+               }
+-              unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
++              unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+               if (unlink->seqnum == 0xffff)
+                       pr_info("seqnum max\n");
+@@ -904,7 +904,7 @@ static int vhci_start(struct usb_hcd *hcd)
+               vdev->rhport = rhport;
+       }
+-      atomic_set(&vhci->seqnum, 0);
++      atomic_set_unchecked(&vhci->seqnum, 0);
+       spin_lock_init(&vhci->lock);
+       hcd->power_budget = 0; /* no limit */
+diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
+index d656e0e..466853e 100644
+--- a/drivers/usb/usbip/vhci_rx.c
++++ b/drivers/usb/usbip/vhci_rx.c
+@@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+       if (!urb) {
+               pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
+               pr_info("max seqnum %d\n",
+-                      atomic_read(&the_controller->seqnum));
++                      atomic_read_unchecked(&the_controller->seqnum));
+               usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+               return;
+       }
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index 5b5462e..fac23a0 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -60,7 +60,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+               if (vdev->ud.status == VDEV_ST_USED) {
+                       out += sprintf(out, "%03u %08x ",
+                                      vdev->speed, vdev->devid);
+-                      out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
++                      out += sprintf(out, "%16pK ", vdev->ud.tcp_socket);
+                       out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
+               } else {
+diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
+index e429b59..e0840c6 100644
+--- a/drivers/usb/usbip/vudc_rx.c
++++ b/drivers/usb/usbip/vudc_rx.c
+@@ -142,7 +142,7 @@ static int v_recv_cmd_submit(struct vudc *udc,
+       urb_p->urb->status = -EINPROGRESS;
+       /* FIXME: more pipe setup to please usbip_common */
+-      urb_p->urb->pipe &= ~(3 << 30);
++      urb_p->urb->pipe &= ~(3U << 30);
+       switch (urb_p->ep->type) {
+       case USB_ENDPOINT_XFER_BULK:
+               urb_p->urb->pipe |= (PIPE_BULK << 30);
+diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
+index edc7267..9f65ce2 100644
+--- a/drivers/usb/wusbcore/wa-hc.h
++++ b/drivers/usb/wusbcore/wa-hc.h
+@@ -240,7 +240,7 @@ struct wahc {
+       spinlock_t xfer_list_lock;
+       struct work_struct xfer_enqueue_work;
+       struct work_struct xfer_error_work;
+-      atomic_t xfer_id_count;
++      atomic_unchecked_t xfer_id_count;
+       kernel_ulong_t  quirks;
+ };
+@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
+       INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
+       INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
+       wa->dto_in_use = 0;
+-      atomic_set(&wa->xfer_id_count, 1);
++      atomic_set_unchecked(&wa->xfer_id_count, 1);
+       /* init the buf in URBs */
+       for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
+               usb_init_urb(&(wa->buf_in_urbs[index]));
+diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
+index 69af4fd..da390d7 100644
+--- a/drivers/usb/wusbcore/wa-xfer.c
++++ b/drivers/usb/wusbcore/wa-xfer.c
+@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
+  */
+ static void wa_xfer_id_init(struct wa_xfer *xfer)
+ {
+-      xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
++      xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
+ }
+ /* Return the xfer's ID. */
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index d624a52..7017191 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1283,7 +1283,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
+ }
+ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
+-                                                pci_channel_state_t state)
++                                                enum pci_channel_state state)
+ {
+       struct vfio_pci_device *vdev;
+       struct vfio_device *device;
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 3bb02c6..a01ff38 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
+ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
+ {
+       __virtio16 v = 0;
+-      int rc = get_user(v, (__force __virtio16 __user *)p);
++      int rc = get_user(v, (__force_user __virtio16 *)p);
+       *val = vringh16_to_cpu(vrh, v);
+       return rc;
+ }
+@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
+ static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
+ {
+       __virtio16 v = cpu_to_vringh16(vrh, val);
+-      return put_user(v, (__force __virtio16 __user *)p);
++      return put_user(v, (__force_user __virtio16 *)p);
+ }
+ static inline int copydesc_user(void *dst, const void *src, size_t len)
+ {
+-      return copy_from_user(dst, (__force void __user *)src, len) ?
++      return copy_from_user(dst, (void __force_user *)src, len) ?
+               -EFAULT : 0;
+ }
+@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
+                              const struct vring_used_elem *src,
+                              unsigned int num)
+ {
+-      return copy_to_user((__force void __user *)dst, src,
++      return copy_to_user((void __force_user *)dst, src,
+                           sizeof(*dst) * num) ? -EFAULT : 0;
+ }
+ static inline int xfer_from_user(void *src, void *dst, size_t len)
+ {
+-      return copy_from_user(dst, (__force void __user *)src, len) ?
++      return copy_from_user(dst, (void __force_user *)src, len) ?
+               -EFAULT : 0;
+ }
+ static inline int xfer_to_user(void *dst, void *src, size_t len)
+ {
+-      return copy_to_user((__force void __user *)dst, src, len) ?
++      return copy_to_user((void __force_user *)dst, src, len) ?
+               -EFAULT : 0;
+ }
+@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
+       vrh->last_used_idx = 0;
+       vrh->vring.num = num;
+       /* vring expects kernel addresses, but only used via accessors. */
+-      vrh->vring.desc = (__force struct vring_desc *)desc;
+-      vrh->vring.avail = (__force struct vring_avail *)avail;
+-      vrh->vring.used = (__force struct vring_used *)used;
++      vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
++      vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
++      vrh->vring.used = (__force_kernel struct vring_used *)used;
+       return 0;
+ }
+ EXPORT_SYMBOL(vringh_init_user);
+@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
+ static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
+ {
+-      ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
++      ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
+       return 0;
+ }
+diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
+index 84a110a..96312c3 100644
+--- a/drivers/video/backlight/kb3886_bl.c
++++ b/drivers/video/backlight/kb3886_bl.c
+@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
+ static unsigned long kb3886bl_flags;
+ #define KB3886BL_SUSPENDED     0x01
+-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
++static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
+       {
+               .ident = "Sahara Touch-iT",
+               .matches = {
+diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
+index 9269d56..78d2a06 100644
+--- a/drivers/video/console/dummycon.c
++++ b/drivers/video/console/dummycon.c
+@@ -41,12 +41,60 @@ static void dummycon_init(struct vc_data *vc, int init)
+       vc_resize(vc, DUMMY_COLUMNS, DUMMY_ROWS);
+ }
+-static int dummycon_dummy(void)
++static void dummycon_deinit(struct vc_data *a)
++{
++}
++
++static void dummycon_clear(struct vc_data *a, int b, int c, int d, int e)
++{
++}
++
++static void dummycon_putc(struct vc_data *a, int b, int c, int d)
++{
++}
++
++static void dummycon_putcs(struct vc_data *a, const unsigned short *b, int c, int d, int e)
++{
++}
++
++static void dummycon_cursor(struct vc_data *a, int b)
++{
++}
++
++static int dummycon_scroll(struct vc_data *a, int b, int c, int d, int e)
++{
++    return 0;
++}
++
++static int dummycon_switch(struct vc_data *a)
+ {
+     return 0;
+ }
+-#define DUMMY (void *)dummycon_dummy
++static int dummycon_blank(struct vc_data *a, int b, int c)
++{
++    return 0;
++}
++
++static int dummycon_font_set(struct vc_data *a, struct console_font *b, unsigned c)
++{
++    return 0;
++}
++
++static int dummycon_font_get(struct vc_data *a, struct console_font *b)
++{
++    return 0;
++}
++
++static int dummycon_font_default(struct vc_data *a, struct console_font *b , char *c)
++{
++    return 0;
++}
++
++static int dummycon_font_copy(struct vc_data *a, int b)
++{
++    return 0;
++}
+ /*
+  *  The console `switch' structure for the dummy console
+@@ -58,17 +106,17 @@ const struct consw dummy_con = {
+     .owner =          THIS_MODULE,
+     .con_startup =    dummycon_startup,
+     .con_init =               dummycon_init,
+-    .con_deinit =     DUMMY,
+-    .con_clear =      DUMMY,
+-    .con_putc =               DUMMY,
+-    .con_putcs =      DUMMY,
+-    .con_cursor =     DUMMY,
+-    .con_scroll =     DUMMY,
+-    .con_switch =     DUMMY,
+-    .con_blank =      DUMMY,
+-    .con_font_set =   DUMMY,
+-    .con_font_get =   DUMMY,
+-    .con_font_default =       DUMMY,
+-    .con_font_copy =  DUMMY,
++    .con_deinit =     dummycon_deinit,
++    .con_clear =      dummycon_clear,
++    .con_putc =               dummycon_putc,
++    .con_putcs =      dummycon_putcs,
++    .con_cursor =     dummycon_cursor,
++    .con_scroll =     dummycon_scroll,
++    .con_switch =     dummycon_switch,
++    .con_blank =      dummycon_blank,
++    .con_font_set =   dummycon_font_set,
++    .con_font_get =   dummycon_font_get,
++    .con_font_default =       dummycon_font_default,
++    .con_font_copy =  dummycon_font_copy,
+ };
+ EXPORT_SYMBOL_GPL(dummy_con);
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index b87f5cf..6aad4f8 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -106,7 +106,7 @@ static int fbcon_softback_size = 32768;
+ static unsigned long softback_buf, softback_curr;
+ static unsigned long softback_in;
+ static unsigned long softback_top, softback_end;
+-static int softback_lines;
++static long softback_lines;
+ /* console mappings */
+ static int first_fb_vc;
+ static int last_fb_vc = MAX_NR_CONSOLES - 1;
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 1157661..453a373 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1404,21 +1404,26 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
+  *  The console `switch' structure for the VGA based console
+  */
+-static int vgacon_dummy(struct vc_data *c)
++static void vgacon_clear(struct vc_data *vc, int a, int b, int c, int d)
+ {
+-      return 0;
+ }
+-#define DUMMY (void *) vgacon_dummy
++static void vgacon_putc(struct vc_data *vc, int a, int b, int c)
++{
++}
++
++static void vgacon_putcs(struct vc_data *vc, const unsigned short *a, int b, int c, int d)
++{
++}
+ const struct consw vga_con = {
+       .owner = THIS_MODULE,
+       .con_startup = vgacon_startup,
+       .con_init = vgacon_init,
+       .con_deinit = vgacon_deinit,
+-      .con_clear = DUMMY,
+-      .con_putc = DUMMY,
+-      .con_putcs = DUMMY,
++      .con_clear = vgacon_clear,
++      .con_putc = vgacon_putc,
++      .con_putcs = vgacon_putcs,
+       .con_cursor = vgacon_cursor,
+       .con_scroll = vgacon_scroll,
+       .con_switch = vgacon_switch,
+diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
+index 1b0b233..6f34c2c 100644
+--- a/drivers/video/fbdev/arcfb.c
++++ b/drivers/video/fbdev/arcfb.c
+@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
+               return -ENOSPC;
+       err = 0;
+-      if ((count + p) > fbmemlength) {
++      if (count > (fbmemlength - p)) {
+               count = fbmemlength - p;
+               err = -ENOSPC;
+       }
+diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
+index 0a46268..e55dcb5 100644
+--- a/drivers/video/fbdev/aty/aty128fb.c
++++ b/drivers/video/fbdev/aty/aty128fb.c
+@@ -144,7 +144,7 @@ enum {
+ };
+ /* Must match above enum */
+-static char * const r128_family[] = {
++static const char * const r128_family[] = {
+       "AGP",
+       "PCI",
+       "PRO AGP",
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
+index f34ed47f..7283c9f 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -1335,10 +1335,14 @@ static int atyfb_set_par(struct fb_info *info)
+       par->accel_flags = var->accel_flags; /* hack */
+       if (var->accel_flags) {
+-              info->fbops->fb_sync = atyfb_sync;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_sync) = atyfb_sync;
++              pax_close_kernel();
+               info->flags &= ~FBINFO_HWACCEL_DISABLED;
+       } else {
+-              info->fbops->fb_sync = NULL;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_sync) = NULL;
++              pax_close_kernel();
+               info->flags |= FBINFO_HWACCEL_DISABLED;
+       }
+diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
+index 51f29d6..2c15339 100644
+--- a/drivers/video/fbdev/aty/mach64_ct.c
++++ b/drivers/video/fbdev/aty/mach64_ct.c
+@@ -630,13 +630,14 @@ static void aty_resume_pll_ct(const struct fb_info *info,
+       aty_st_pll_ct(EXT_VPLL_CNTL, pll->ct.ext_vpll_cntl, par);
+ }
+-static int dummy(void)
++static int aty_set_dac(const struct fb_info * info,
++              const union aty_pll * pll, u32 bpp, u32 accel)
+ {
+       return 0;
+ }
+ const struct aty_dac_ops aty_dac_ct = {
+-      .set_dac        = (void *) dummy,
++      .set_dac        = aty_set_dac
+ };
+ const struct aty_pll_ops aty_pll_ct = {
+diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
+index 2fa0317..d687dab 100644
+--- a/drivers/video/fbdev/aty/mach64_cursor.c
++++ b/drivers/video/fbdev/aty/mach64_cursor.c
+@@ -8,6 +8,7 @@
+ #include "../core/fb_draw.h"
+ #include <asm/io.h>
++#include <asm/pgtable.h>
+ #ifdef __sparc__
+ #include <asm/fbio.h>
+@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
+       info->sprite.buf_align = 16;    /* and 64 lines tall. */
+       info->sprite.flags = FB_PIXMAP_IO;
+-      info->fbops->fb_cursor = atyfb_cursor;
++      pax_open_kernel();
++      const_cast(info->fbops->fb_cursor) = atyfb_cursor;
++      pax_close_kernel();
+       return 0;
+ }
+diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
+index 10c988a..f7d9299 100644
+--- a/drivers/video/fbdev/aty/mach64_gx.c
++++ b/drivers/video/fbdev/aty/mach64_gx.c
+@@ -894,17 +894,26 @@ static int aty_set_dac_unsupported(const struct fb_info *info,
+       return 0;
+ }
+-static int dummy(void)
++static int aty_var_to_pll(const struct fb_info * info, u32 vclk_per, u32 bpp, union aty_pll * pll)
+ {
+       return 0;
+ }
++static u32 aty_pll_to_var(const struct fb_info * info, const union aty_pll * pll)
++{
++      return 0;
++}
++
++static void aty_set_pll(const struct fb_info * info, const union aty_pll * pll)
++{
++}
++
+ const struct aty_dac_ops aty_dac_unsupported = {
+       .set_dac        = aty_set_dac_unsupported,
+ };
+ const struct aty_pll_ops aty_pll_unsupported = {
+-      .var_to_pll     = (void *) dummy,
+-      .pll_to_var     = (void *) dummy,
+-      .set_pll        = (void *) dummy,
++      .var_to_pll     = aty_var_to_pll,
++      .pll_to_var     = aty_pll_to_var,
++      .set_pll        = aty_set_pll,
+ };
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index 74b5bca..5bddbea 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -208,7 +208,9 @@ void fb_deferred_io_init(struct fb_info *info)
+       BUG_ON(!fbdefio);
+       mutex_init(&fbdefio->lock);
+-      info->fbops->fb_mmap = fb_deferred_io_mmap;
++      pax_open_kernel();
++      const_cast(info->fbops->fb_mmap) = fb_deferred_io_mmap;
++      pax_close_kernel();
+       INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
+       INIT_LIST_HEAD(&fbdefio->pagelist);
+       if (fbdefio->delay == 0) /* set a default of 1 s */
+@@ -239,7 +241,9 @@ void fb_deferred_io_cleanup(struct fb_info *info)
+               page->mapping = NULL;
+       }
+-      info->fbops->fb_mmap = NULL;
++      pax_open_kernel();
++      const_cast(info->fbops->fb_mmap) = NULL;
++      pax_close_kernel();
+       mutex_destroy(&fbdefio->lock);
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 76c1ad9..6ec5e94 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
+       __u32 data;
+       int err;
+-      err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
++      err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
+       data = (__u32) (unsigned long) fix->smem_start;
+       err |= put_user(data, &fix32->smem_start);
+@@ -1435,10 +1435,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
+       return vm_iomap_memory(vma, start, len);
+ }
+-static int
+-fb_open(struct inode *inode, struct file *file)
+-__acquires(&info->lock)
+-__releases(&info->lock)
++static int fb_open(struct inode *inode, struct file *file)
+ {
+       int fbidx = iminor(inode);
+       struct fb_info *info;
+@@ -1476,10 +1473,7 @@ out:
+       return res;
+ }
+-static int 
+-fb_release(struct inode *inode, struct file *file)
+-__acquires(&info->lock)
+-__releases(&info->lock)
++static int fb_release(struct inode *inode, struct file *file)
+ {
+       struct fb_info * const info = file->private_data;
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 2fd49b2..67e3d86 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -240,7 +240,7 @@ static uint screen_fb_size;
+ static inline int synthvid_send(struct hv_device *hdev,
+                               struct synthvid_msg *msg)
+ {
+-      static atomic64_t request_id = ATOMIC64_INIT(0);
++      static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
+       int ret;
+       msg->pipe_hdr.type = PIPE_MSG_DATA;
+@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
+       ret = vmbus_sendpacket(hdev->channel, msg,
+                              msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
+-                             atomic64_inc_return(&request_id),
++                             atomic64_inc_return_unchecked(&request_id),
+                              VM_PKT_DATA_INBAND, 0);
+       if (ret)
+diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
+index 7672d2e..b56437f 100644
+--- a/drivers/video/fbdev/i810/i810_accel.c
++++ b/drivers/video/fbdev/i810/i810_accel.c
+@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
+               }
+       }
+       printk("ringbuffer lockup!!!\n");
++      printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
+       i810_report_error(mmio); 
+       par->dev_flags |= LOCKUP;
+       info->pixmap.scan_align = 1;
+diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
+index a01147f..5d896f8 100644
+--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
++++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
+@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
+ #ifdef CONFIG_FB_MATROX_MYSTIQUE
+ struct matrox_switch matrox_mystique = {
+-      MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
++      .preinit = MGA1064_preinit,
++      .reset = MGA1064_reset,
++      .init = MGA1064_init,
++      .restore = MGA1064_restore,
+ };
+ EXPORT_SYMBOL(matrox_mystique);
+ #endif
+ #ifdef CONFIG_FB_MATROX_G
+ struct matrox_switch matrox_G100 = {
+-      MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
++      .preinit = MGAG100_preinit,
++      .reset = MGAG100_reset,
++      .init = MGAG100_init,
++      .restore = MGAG100_restore,
+ };
+ EXPORT_SYMBOL(matrox_G100);
+ #endif
+diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
+index 195ad7c..09743fc 100644
+--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
++++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
+@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
+ }
+ struct matrox_switch matrox_millennium = {
+-      Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
++      .preinit = Ti3026_preinit,
++      .reset = Ti3026_reset,
++      .init = Ti3026_init,
++      .restore = Ti3026_restore
+ };
+ EXPORT_SYMBOL(matrox_millennium);
+ #endif
+diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
+index 11eb094..622ee31 100644
+--- a/drivers/video/fbdev/matrox/matroxfb_base.c
++++ b/drivers/video/fbdev/matrox/matroxfb_base.c
+@@ -2176,7 +2176,7 @@ static struct pci_driver matroxfb_driver = {
+ #define RS1056x480    14      /* 132 x 60 text */
+ #define RSNoxNo               15
+ /* 10-FF */
+-static struct { int xres, yres, left, right, upper, lower, hslen, vslen, vfreq; } timmings[] __initdata = {
++static struct { unsigned int xres, yres, left, right, upper, lower, hslen, vslen, vfreq; } timmings[] __initdata = {
+       {  640,  400,  48, 16, 39,  8,  96, 2, 70 },
+       {  640,  480,  48, 16, 33, 10,  96, 2, 60 },
+       {  800,  600, 144, 24, 28,  8, 112, 6, 60 },
+diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
+index fe92eed..239e386 100644
+--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
++++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
+@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
+       struct mb862xxfb_par *par = info->par;
+       if (info->var.bits_per_pixel == 32) {
+-              info->fbops->fb_fillrect = cfb_fillrect;
+-              info->fbops->fb_copyarea = cfb_copyarea;
+-              info->fbops->fb_imageblit = cfb_imageblit;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_fillrect) = cfb_fillrect;
++              const_cast(info->fbops->fb_copyarea) = cfb_copyarea;
++              const_cast(info->fbops->fb_imageblit) = cfb_imageblit;
++              pax_close_kernel();
+       } else {
+               outreg(disp, GC_L0EM, 3);
+-              info->fbops->fb_fillrect = mb86290fb_fillrect;
+-              info->fbops->fb_copyarea = mb86290fb_copyarea;
+-              info->fbops->fb_imageblit = mb86290fb_imageblit;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_fillrect) = mb86290fb_fillrect;
++              const_cast(info->fbops->fb_copyarea) = mb86290fb_copyarea;
++              const_cast(info->fbops->fb_imageblit) = mb86290fb_imageblit;
++              pax_close_kernel();
+       }
+       outreg(draw, GDC_REG_DRAW_BASE, 0);
+       outreg(draw, GDC_REG_MODE_MISC, 0x8000);
+diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
+index ce7dab7..89d6521 100644
+--- a/drivers/video/fbdev/nvidia/nvidia.c
++++ b/drivers/video/fbdev/nvidia/nvidia.c
+@@ -660,19 +660,23 @@ static int nvidiafb_set_par(struct fb_info *info)
+       info->fix.line_length = (info->var.xres_virtual *
+                                info->var.bits_per_pixel) >> 3;
+       if (info->var.accel_flags) {
+-              info->fbops->fb_imageblit = nvidiafb_imageblit;
+-              info->fbops->fb_fillrect = nvidiafb_fillrect;
+-              info->fbops->fb_copyarea = nvidiafb_copyarea;
+-              info->fbops->fb_sync = nvidiafb_sync;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_imageblit) = nvidiafb_imageblit;
++              const_cast(info->fbops->fb_fillrect) = nvidiafb_fillrect;
++              const_cast(info->fbops->fb_copyarea) = nvidiafb_copyarea;
++              const_cast(info->fbops->fb_sync) = nvidiafb_sync;
++              pax_close_kernel();
+               info->pixmap.scan_align = 4;
+               info->flags &= ~FBINFO_HWACCEL_DISABLED;
+               info->flags |= FBINFO_READS_FAST;
+               NVResetGraphics(info);
+       } else {
+-              info->fbops->fb_imageblit = cfb_imageblit;
+-              info->fbops->fb_fillrect = cfb_fillrect;
+-              info->fbops->fb_copyarea = cfb_copyarea;
+-              info->fbops->fb_sync = NULL;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_imageblit) = cfb_imageblit;
++              const_cast(info->fbops->fb_fillrect) = cfb_fillrect;
++              const_cast(info->fbops->fb_copyarea) = cfb_copyarea;
++              const_cast(info->fbops->fb_sync) = NULL;
++              pax_close_kernel();
+               info->pixmap.scan_align = 1;
+               info->flags |= FBINFO_HWACCEL_DISABLED;
+               info->flags &= ~FBINFO_READS_FAST;
+@@ -1164,8 +1168,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
+       info->pixmap.size = 8 * 1024;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-      if (!hwcur)
+-          info->fbops->fb_cursor = NULL;
++      if (!hwcur) {
++              pax_open_kernel();
++              const_cast(info->fbops->fb_cursor) = NULL;
++              pax_close_kernel();
++      }
+       info->var.accel_flags = (!noaccel);
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c
+index dd54686..6ef7ef6 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/display.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c
+@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
+       if (dssdev->name == NULL)
+               dssdev->name = dssdev->alias;
++      pax_open_kernel();
+       if (drv && drv->get_resolution == NULL)
+-              drv->get_resolution = omapdss_default_get_resolution;
++              const_cast(drv->get_resolution) = omapdss_default_get_resolution;
+       if (drv && drv->get_recommended_bpp == NULL)
+-              drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
++              const_cast(drv->get_recommended_bpp) = omapdss_default_get_recommended_bpp;
+       if (drv && drv->get_timings == NULL)
+-              drv->get_timings = omapdss_default_get_timings;
++              const_cast(drv->get_timings) = omapdss_default_get_timings;
++      pax_close_kernel();
+       mutex_lock(&panel_list_mutex);
+       list_add_tail(&dssdev->panel_list, &panel_list);
+diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
+index 96aa46d..65e2554 100644
+--- a/drivers/video/fbdev/s1d13xxxfb.c
++++ b/drivers/video/fbdev/s1d13xxxfb.c
+@@ -880,8 +880,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
+       switch(prod_id) {
+       case S1D13506_PROD_ID:  /* activate acceleration */
+-              s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
+-              s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
++              pax_open_kernel();
++              const_cast(s1d13xxxfb_fbops.fb_fillrect) = s1d13xxxfb_bitblt_solidfill;
++              const_cast(s1d13xxxfb_fbops.fb_copyarea) = s1d13xxxfb_bitblt_copyarea;
++              pax_close_kernel();
+               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+                       FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
+               break;
+diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+index 82c0a8c..42499a1 100644
+--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
++++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
+ }
+ static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
+-      lcdc_sys_write_index,
+-      lcdc_sys_write_data,
+-      lcdc_sys_read_data,
++      .write_index = lcdc_sys_write_index,
++      .write_data = lcdc_sys_write_data,
++      .read_data = lcdc_sys_read_data,
+ };
+ static int sh_mobile_lcdc_sginit(struct fb_info *info,
+diff --git a/drivers/video/fbdev/sis/sis_main.h b/drivers/video/fbdev/sis/sis_main.h
+index 32e23c2..7b73082 100644
+--- a/drivers/video/fbdev/sis/sis_main.h
++++ b/drivers/video/fbdev/sis/sis_main.h
+@@ -763,7 +763,7 @@ extern void                SiS_SetCH700x(struct SiS_Private *SiS_Pr, unsigned short reg, unsig
+ extern unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg);
+ extern void           SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val);
+ extern void           SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg,
+-                              unsigned char myor, unsigned char myand);
++                              unsigned char myor, unsigned short myand);
+ extern void           SiS_DDC2Delay(struct SiS_Private *SiS_Pr, unsigned int delaytime);
+ extern void           SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo);
+ extern unsigned short SiS_HandleDDC(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine,
+diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
+index 9279e5f..d9fb0bd 100644
+--- a/drivers/video/fbdev/smscufx.c
++++ b/drivers/video/fbdev/smscufx.c
+@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
+               fb_deferred_io_cleanup(info);
+               kfree(info->fbdefio);
+               info->fbdefio = NULL;
+-              info->fbops->fb_mmap = ufx_ops_mmap;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_mmap) = ufx_ops_mmap;
++              pax_close_kernel();
+       }
+       pr_debug("released /dev/fb%d user=%d count=%d",
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
+index e9c2f7b..87506f4 100644
+--- a/drivers/video/fbdev/udlfb.c
++++ b/drivers/video/fbdev/udlfb.c
+@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
+               dlfb_urb_completion(urb);
+ error:
+-      atomic_add(bytes_sent, &dev->bytes_sent);
+-      atomic_add(bytes_identical, &dev->bytes_identical);
+-      atomic_add(width*height*2, &dev->bytes_rendered);
++      atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++      atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++      atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
+       end_cycles = get_cycles();
+-      atomic_add(((unsigned int) ((end_cycles - start_cycles)
++      atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+                   >> 10)), /* Kcycles */
+                  &dev->cpu_kcycles_used);
+@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
+               dlfb_urb_completion(urb);
+ error:
+-      atomic_add(bytes_sent, &dev->bytes_sent);
+-      atomic_add(bytes_identical, &dev->bytes_identical);
+-      atomic_add(bytes_rendered, &dev->bytes_rendered);
++      atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++      atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++      atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
+       end_cycles = get_cycles();
+-      atomic_add(((unsigned int) ((end_cycles - start_cycles)
++      atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+                   >> 10)), /* Kcycles */
+                  &dev->cpu_kcycles_used);
+ }
+@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
+               fb_deferred_io_cleanup(info);
+               kfree(info->fbdefio);
+               info->fbdefio = NULL;
+-              info->fbops->fb_mmap = dlfb_ops_mmap;
++              pax_open_kernel();
++              const_cast(info->fbops->fb_mmap) = dlfb_ops_mmap;
++              pax_close_kernel();
+       }
+       pr_warn("released /dev/fb%d user=%d count=%d\n",
+@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->bytes_rendered));
++                      atomic_read_unchecked(&dev->bytes_rendered));
+ }
+ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->bytes_identical));
++                      atomic_read_unchecked(&dev->bytes_identical));
+ }
+ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->bytes_sent));
++                      atomic_read_unchecked(&dev->bytes_sent));
+ }
+ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+       return snprintf(buf, PAGE_SIZE, "%u\n",
+-                      atomic_read(&dev->cpu_kcycles_used));
++                      atomic_read_unchecked(&dev->cpu_kcycles_used));
+ }
+ static ssize_t edid_show(
+@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
+       struct fb_info *fb_info = dev_get_drvdata(fbdev);
+       struct dlfb_data *dev = fb_info->par;
+-      atomic_set(&dev->bytes_rendered, 0);
+-      atomic_set(&dev->bytes_identical, 0);
+-      atomic_set(&dev->bytes_sent, 0);
+-      atomic_set(&dev->cpu_kcycles_used, 0);
++      atomic_set_unchecked(&dev->bytes_rendered, 0);
++      atomic_set_unchecked(&dev->bytes_identical, 0);
++      atomic_set_unchecked(&dev->bytes_sent, 0);
++      atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
+       return count;
+ }
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
+index 178ae93..043ddca 100644
+--- a/drivers/video/fbdev/uvesafb.c
++++ b/drivers/video/fbdev/uvesafb.c
+@@ -19,6 +19,7 @@
+ #include <linux/io.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/moduleloader.h>
+ #include <video/edid.h>
+ #include <video/uvesafb.h>
+ #ifdef CONFIG_X86
+@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
+       if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
+               par->pmi_setpal = par->ypan = 0;
+       } else {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_MODULES
++              par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
++#endif
++              if (!par->pmi_code) {
++                      par->pmi_setpal = par->ypan = 0;
++                      return 0;
++              }
++#endif
++
+               par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+                                               + task->t.regs.edi);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++              pax_open_kernel();
++              memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
++              pax_close_kernel();
++
++              par->pmi_start = (void *)ktva_ktla((unsigned long)(par->pmi_code + par->pmi_base[1]));
++              par->pmi_pal = (void *)ktva_ktla((unsigned long)(par->pmi_code + par->pmi_base[2]));
++#else
+               par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
+               par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
++#endif
++
+               printk(KERN_INFO "uvesafb: protected mode interface info at "
+                                "%04x:%04x\n",
+                                (u16)task->t.regs.es, (u16)task->t.regs.edi);
+@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
+       par->ypan = ypan;
+       if (par->pmi_setpal || par->ypan) {
++#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
+               if (__supported_pte_mask & _PAGE_NX) {
+                       par->pmi_setpal = par->ypan = 0;
+                       printk(KERN_WARNING "uvesafb: NX protection is active, "
+                                           "better not use the PMI.\n");
+-              } else {
++              } else
++#endif
+                       uvesafb_vbe_getpmi(task, par);
+-              }
+       }
+ #else
+       /* The protected mode interface is not available on non-x86. */
+@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
+       info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
+       /* Disable blanking if the user requested so. */
+-      if (!blank)
+-              info->fbops->fb_blank = NULL;
++      if (!blank) {
++              pax_open_kernel();
++              const_cast(info->fbops->fb_blank) = NULL;
++              pax_close_kernel();
++      }
+       /*
+        * Find out how much IO memory is required for the mode with
+@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
+       info->flags = FBINFO_FLAG_DEFAULT |
+                       (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
+-      if (!par->ypan)
+-              info->fbops->fb_pan_display = NULL;
++      if (!par->ypan) {
++              pax_open_kernel();
++              const_cast(info->fbops->fb_pan_display) = NULL;
++              pax_close_kernel();
++      }
+ }
+ static void uvesafb_init_mtrr(struct fb_info *info)
+@@ -1786,6 +1816,11 @@ out_mode:
+ out:
+       kfree(par->vbe_modes);
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++      if (par->pmi_code)
++              module_memfree_exec(par->pmi_code);
++#endif
++
+       framebuffer_release(info);
+       return err;
+ }
+@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
+               kfree(par->vbe_state_orig);
+               kfree(par->vbe_state_saved);
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++              if (par->pmi_code)
++                      module_memfree_exec(par->pmi_code);
++#endif
++
+               framebuffer_release(info);
+       }
+       return 0;
+diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
+index 528fe91..475d9e6 100644
+--- a/drivers/video/fbdev/vesafb.c
++++ b/drivers/video/fbdev/vesafb.c
+@@ -9,6 +9,7 @@
+  */
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -56,8 +57,8 @@ static int   vram_remap;                     /* Set amount of memory to be used */
+ static int   vram_total;                      /* Set total amount of memory */
+ static int   pmi_setpal __read_mostly = 1;    /* pmi for palette changes ??? */
+ static int   ypan       __read_mostly;                /* 0..nothing, 1..ypan, 2..ywrap */
+-static void  (*pmi_start)(void) __read_mostly;
+-static void  (*pmi_pal)  (void) __read_mostly;
++static void  (*pmi_start)(void) __read_only;
++static void  (*pmi_pal)  (void) __read_only;
+ static int   depth      __read_mostly;
+ static int   vga_compat __read_mostly;
+ /* --------------------------------------------------------------------- */
+@@ -241,6 +242,7 @@ static int vesafb_probe(struct platform_device *dev)
+       unsigned int size_remap;
+       unsigned int size_total;
+       char *option = NULL;
++      void *pmi_code = NULL;
+       /* ignore error return of fb_get_options */
+       fb_get_options("vesafb", &option);
+@@ -287,10 +289,6 @@ static int vesafb_probe(struct platform_device *dev)
+               size_remap = size_total;
+       vesafb_fix.smem_len = size_remap;
+-#ifndef __i386__
+-      screen_info.vesapm_seg = 0;
+-#endif
+-
+       if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
+               printk(KERN_WARNING
+                      "vesafb: cannot reserve video memory at 0x%lx\n",
+@@ -320,9 +318,21 @@ static int vesafb_probe(struct platform_device *dev)
+       printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+              vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
++#ifdef __i386__
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++      pmi_code = module_alloc_exec(screen_info.vesapm_size);
++      if (!pmi_code)
++#elif !defined(CONFIG_PAX_KERNEXEC)
++      if (0)
++#endif
++
++#endif
++      screen_info.vesapm_seg = 0;
++
+       if (screen_info.vesapm_seg) {
+-              printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+-                     screen_info.vesapm_seg,screen_info.vesapm_off);
++              printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
++                     screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
+       }
+       if (screen_info.vesapm_seg < 0xc000)
+@@ -330,9 +340,25 @@ static int vesafb_probe(struct platform_device *dev)
+       if (ypan || pmi_setpal) {
+               unsigned short *pmi_base;
++
+               pmi_base  = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+-              pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
+-              pmi_pal   = (void*)((char*)pmi_base + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++              pax_open_kernel();
++              memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
++#else
++              pmi_code  = pmi_base;
++#endif
++
++              pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
++              pmi_pal   = (void*)((char*)pmi_code + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++              pmi_start = (void *)ktva_ktla((unsigned long)pmi_start);
++              pmi_pal = (void *)ktva_ktla((unsigned long)pmi_pal);
++              pax_close_kernel();
++#endif
++
+               printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+               if (pmi_base[3]) {
+                       printk(KERN_INFO "vesafb: pmi: ports = ");
+@@ -452,8 +478,11 @@ static int vesafb_probe(struct platform_device *dev)
+       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
+               (ypan ? FBINFO_HWACCEL_YPAN : 0);
+-      if (!ypan)
+-              info->fbops->fb_pan_display = NULL;
++      if (!ypan) {
++              pax_open_kernel();
++              const_cast(info->fbops->fb_pan_display) = NULL;
++              pax_close_kernel();
++      }
+       if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+               err = -ENOMEM;
+@@ -467,6 +496,11 @@ static int vesafb_probe(struct platform_device *dev)
+       fb_info(info, "%s frame buffer device\n", info->fix.id);
+       return 0;
+ err:
++
++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++      module_memfree_exec(pmi_code);
++#endif
++
+       arch_phys_wc_del(par->wc_cookie);
+       if (info->screen_base)
+               iounmap(info->screen_base);
+diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
+index 88714ae..16c2e11 100644
+--- a/drivers/video/fbdev/via/via_clock.h
++++ b/drivers/video/fbdev/via/via_clock.h
+@@ -56,7 +56,7 @@ struct via_clock {
+       void (*set_engine_pll_state)(u8 state);
+       void (*set_engine_pll)(struct via_pll_config config);
+-};
++} __no_const;
+ static inline u32 get_pll_internal_frequency(u32 ref_freq,
+diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
+index 3c14e43..2630570 100644
+--- a/drivers/video/logo/logo_linux_clut224.ppm
++++ b/drivers/video/logo/logo_linux_clut224.ppm
+@@ -2,1603 +2,1123 @@ P3
+ # Standard 224-color Linux logo
+ 80 80
+ 255
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6   6   6   6  10  10  10  10  10  10
+- 10  10  10   6   6   6   6   6   6   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  10  10  10  14  14  14
+- 22  22  22  26  26  26  30  30  30  34  34  34
+- 30  30  30  30  30  30  26  26  26  18  18  18
+- 14  14  14  10  10  10   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  26  26  26  42  42  42
+- 54  54  54  66  66  66  78  78  78  78  78  78
+- 78  78  78  74  74  74  66  66  66  54  54  54
+- 42  42  42  26  26  26  18  18  18  10  10  10
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 22  22  22  42  42  42  66  66  66  86  86  86
+- 66  66  66  38  38  38  38  38  38  22  22  22
+- 26  26  26  34  34  34  54  54  54  66  66  66
+- 86  86  86  70  70  70  46  46  46  26  26  26
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  26  26  26
+- 50  50  50  82  82  82  58  58  58   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  54  54  54  86  86  86  66  66  66
+- 38  38  38  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  22  22  22  50  50  50
+- 78  78  78  34  34  34   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   6   6   6  70  70  70
+- 78  78  78  46  46  46  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  42  42  42  82  82  82
+- 26  26  26   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  14  14  14
+- 46  46  46  34  34  34   6   6   6   2   2   6
+- 42  42  42  78  78  78  42  42  42  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 10  10  10  30  30  30  66  66  66  58  58  58
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  26  26  26
+- 86  86  86 101 101 101  46  46  46  10  10  10
+-  2   2   6  58  58  58  70  70  70  34  34  34
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 14  14  14  42  42  42  86  86  86  10  10  10
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  30  30  30
+- 94  94  94  94  94  94  58  58  58  26  26  26
+-  2   2   6   6   6   6  78  78  78  54  54  54
+- 22  22  22   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 22  22  22  62  62  62  62  62  62   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  26  26  26
+- 54  54  54  38  38  38  18  18  18  10  10  10
+-  2   2   6   2   2   6  34  34  34  82  82  82
+- 38  38  38  14  14  14   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 30  30  30  78  78  78  30  30  30   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  10  10  10
+- 10  10  10   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  78  78  78
+- 50  50  50  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  86  86  86  14  14  14   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  54  54  54
+- 66  66  66  26  26  26   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  82  82  82   2   2   6   2   2   6
+-  2   2   6   6   6   6  10  10  10   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   6   6   6
+- 14  14  14  10  10  10   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  18  18  18
+- 82  82  82  34  34  34  10  10  10   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6   2   2   6
+-  6   6   6   6   6   6  22  22  22  34  34  34
+-  6   6   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  18  18  18  34  34  34
+- 10  10  10  50  50  50  22  22  22   2   2   6
+-  2   2   6   2   2   6   2   2   6  10  10  10
+- 86  86  86  42  42  42  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6   2   2   6
+- 38  38  38 116 116 116  94  94  94  22  22  22
+- 22  22  22   2   2   6   2   2   6   2   2   6
+- 14  14  14  86  86  86 138 138 138 162 162 162
+-154 154 154  38  38  38  26  26  26   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 86  86  86  46  46  46  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6  14  14  14
+-134 134 134 198 198 198 195 195 195 116 116 116
+- 10  10  10   2   2   6   2   2   6   6   6   6
+-101  98  89 187 187 187 210 210 210 218 218 218
+-214 214 214 134 134 134  14  14  14   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 86  86  86  50  50  50  18  18  18   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  86  86  86   2   2   6  54  54  54
+-218 218 218 195 195 195 226 226 226 246 246 246
+- 58  58  58   2   2   6   2   2   6  30  30  30
+-210 210 210 253 253 253 174 174 174 123 123 123
+-221 221 221 234 234 234  74  74  74   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 70  70  70  58  58  58  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 46  46  46  82  82  82   2   2   6 106 106 106
+-170 170 170  26  26  26  86  86  86 226 226 226
+-123 123 123  10  10  10  14  14  14  46  46  46
+-231 231 231 190 190 190   6   6   6  70  70  70
+- 90  90  90 238 238 238 158 158 158   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 70  70  70  58  58  58  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   0
+-  0   0   1   0   0   1   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  86  86  86   6   6   6 116 116 116
+-106 106 106   6   6   6  70  70  70 149 149 149
+-128 128 128  18  18  18  38  38  38  54  54  54
+-221 221 221 106 106 106   2   2   6  14  14  14
+- 46  46  46 190 190 190 198 198 198   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 74  74  74  62  62  62  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   0
+-  0   0   1   0   0   0   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  94  94  94  14  14  14 101 101 101
+-128 128 128   2   2   6  18  18  18 116 116 116
+-118  98  46 121  92   8 121  92   8  98  78  10
+-162 162 162 106 106 106   2   2   6   2   2   6
+-  2   2   6 195 195 195 195 195 195   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 74  74  74  62  62  62  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   1   0   0   1
+-  0   0   1   0   0   0   0   0   1   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  90  90  90  14  14  14  58  58  58
+-210 210 210  26  26  26  54  38   6 154 114  10
+-226 170  11 236 186  11 225 175  15 184 144  12
+-215 174  15 175 146  61  37  26   9   2   2   6
+- 70  70  70 246 246 246 138 138 138   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 70  70  70  66  66  66  26  26  26   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  86  86  86  14  14  14  10  10  10
+-195 195 195 188 164 115 192 133   9 225 175  15
+-239 182  13 234 190  10 232 195  16 232 200  30
+-245 207  45 241 208  19 232 195  16 184 144  12
+-218 194 134 211 206 186  42  42  42   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 50  50  50  74  74  74  30  30  30   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 34  34  34  86  86  86  14  14  14   2   2   6
+-121  87  25 192 133   9 219 162  10 239 182  13
+-236 186  11 232 195  16 241 208  19 244 214  54
+-246 218  60 246 218  38 246 215  20 241 208  19
+-241 208  19 226 184  13 121  87  25   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 50  50  50  82  82  82  34  34  34  10  10  10
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 34  34  34  82  82  82  30  30  30  61  42   6
+-180 123   7 206 145  10 230 174  11 239 182  13
+-234 190  10 238 202  15 241 208  19 246 218  74
+-246 218  38 246 215  20 246 215  20 246 215  20
+-226 184  13 215 174  15 184 144  12   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 26  26  26  94  94  94  42  42  42  14  14  14
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78  50  50  50 104  69   6
+-192 133   9 216 158  10 236 178  12 236 186  11
+-232 195  16 241 208  19 244 214  54 245 215  43
+-246 215  20 246 215  20 241 208  19 198 155  10
+-200 144  11 216 158  10 156 118  10   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  90  90  90  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78  46  46  46  22  22  22
+-137  92   6 210 162  10 239 182  13 238 190  10
+-238 202  15 241 208  19 246 215  20 246 215  20
+-241 208  19 203 166  17 185 133  11 210 150  10
+-216 158  10 210 150  10 102  78  10   2   2   6
+-  6   6   6  54  54  54  14  14  14   2   2   6
+-  2   2   6  62  62  62  74  74  74  30  30  30
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 34  34  34  78  78  78  50  50  50   6   6   6
+- 94  70  30 139 102  15 190 146  13 226 184  13
+-232 200  30 232 195  16 215 174  15 190 146  13
+-168 122  10 192 133   9 210 150  10 213 154  11
+-202 150  34 182 157 106 101  98  89   2   2   6
+-  2   2   6  78  78  78 116 116 116  58  58  58
+-  2   2   6  22  22  22  90  90  90  46  46  46
+- 18  18  18   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  86  86  86  50  50  50   6   6   6
+-128 128 128 174 154 114 156 107  11 168 122  10
+-198 155  10 184 144  12 197 138  11 200 144  11
+-206 145  10 206 145  10 197 138  11 188 164 115
+-195 195 195 198 198 198 174 174 174  14  14  14
+-  2   2   6  22  22  22 116 116 116 116 116 116
+- 22  22  22   2   2   6  74  74  74  70  70  70
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50 101 101 101  26  26  26  10  10  10
+-138 138 138 190 190 190 174 154 114 156 107  11
+-197 138  11 200 144  11 197 138  11 192 133   9
+-180 123   7 190 142  34 190 178 144 187 187 187
+-202 202 202 221 221 221 214 214 214  66  66  66
+-  2   2   6   2   2   6  50  50  50  62  62  62
+-  6   6   6   2   2   6  10  10  10  90  90  90
+- 50  50  50  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  34  34  34
+- 74  74  74  74  74  74   2   2   6   6   6   6
+-144 144 144 198 198 198 190 190 190 178 166 146
+-154 121  60 156 107  11 156 107  11 168 124  44
+-174 154 114 187 187 187 190 190 190 210 210 210
+-246 246 246 253 253 253 253 253 253 182 182 182
+-  6   6   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  62  62  62
+- 74  74  74  34  34  34  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0  10  10  10  22  22  22  54  54  54
+- 94  94  94  18  18  18   2   2   6  46  46  46
+-234 234 234 221 221 221 190 190 190 190 190 190
+-190 190 190 187 187 187 187 187 187 190 190 190
+-190 190 190 195 195 195 214 214 214 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+- 82  82  82   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  14  14  14
+- 86  86  86  54  54  54  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  46  46  46  90  90  90
+- 46  46  46  18  18  18   6   6   6 182 182 182
+-253 253 253 246 246 246 206 206 206 190 190 190
+-190 190 190 190 190 190 190 190 190 190 190 190
+-206 206 206 231 231 231 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-202 202 202  14  14  14   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 42  42  42  86  86  86  42  42  42  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 14  14  14  38  38  38  74  74  74  66  66  66
+-  2   2   6   6   6   6  90  90  90 250 250 250
+-253 253 253 253 253 253 238 238 238 198 198 198
+-190 190 190 190 190 190 195 195 195 221 221 221
+-246 246 246 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253  82  82  82   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  78  78  78  70  70  70  34  34  34
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 34  34  34  66  66  66  78  78  78   6   6   6
+-  2   2   6  18  18  18 218 218 218 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-226 226 226 231 231 231 246 246 246 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 178 178 178   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  18  18  18  90  90  90  62  62  62
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  26  26  26
+- 58  58  58  90  90  90  18  18  18   2   2   6
+-  2   2   6 110 110 110 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 231 231 231  18  18  18   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  18  18  18  94  94  94
+- 54  54  54  26  26  26  10  10  10   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  22  22  22  50  50  50
+- 90  90  90  26  26  26   2   2   6   2   2   6
+- 14  14  14 195 195 195 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 242 242 242  54  54  54   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6  38  38  38
+- 86  86  86  50  50  50  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  38  38  38  82  82  82
+- 34  34  34   2   2   6   2   2   6   2   2   6
+- 42  42  42 195 195 195 246 246 246 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 242 242 242 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 246 246 246 238 238 238
+-226 226 226 231 231 231 101 101 101   6   6   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 38  38  38  82  82  82  42  42  42  14  14  14
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 10  10  10  26  26  26  62  62  62  66  66  66
+-  2   2   6   2   2   6   2   2   6   6   6   6
+- 70  70  70 170 170 170 206 206 206 234 234 234
+-246 246 246 250 250 250 250 250 250 238 238 238
+-226 226 226 231 231 231 238 238 238 250 250 250
+-250 250 250 250 250 250 246 246 246 231 231 231
+-214 214 214 206 206 206 202 202 202 202 202 202
+-198 198 198 202 202 202 182 182 182  18  18  18
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  62  62  62  66  66  66  30  30  30
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 14  14  14  42  42  42  82  82  82  18  18  18
+-  2   2   6   2   2   6   2   2   6  10  10  10
+- 94  94  94 182 182 182 218 218 218 242 242 242
+-250 250 250 253 253 253 253 253 253 250 250 250
+-234 234 234 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-238 238 238 226 226 226 210 210 210 202 202 202
+-195 195 195 195 195 195 210 210 210 158 158 158
+-  6   6   6  14  14  14  50  50  50  14  14  14
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  86  86  86  46  46  46
+- 18  18  18   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 22  22  22  54  54  54  70  70  70   2   2   6
+-  2   2   6  10  10  10   2   2   6  22  22  22
+-166 166 166 231 231 231 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-231 231 231 206 206 206 198 198 198 226 226 226
+- 94  94  94   2   2   6   6   6   6  38  38  38
+- 30  30  30   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  62  62  62  66  66  66
+- 26  26  26  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  74  74  74  50  50  50   2   2   6
+- 26  26  26  26  26  26   2   2   6 106 106 106
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 246 246 246 218 218 218 202 202 202
+-210 210 210  14  14  14   2   2   6   2   2   6
+- 30  30  30  22  22  22   2   2   6   2   2   6
+-  2   2   6   2   2   6  18  18  18  86  86  86
+- 42  42  42  14  14  14   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 42  42  42  90  90  90  22  22  22   2   2   6
+- 42  42  42   2   2   6  18  18  18 218 218 218
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 250 250 250 221 221 221
+-218 218 218 101 101 101   2   2   6  14  14  14
+- 18  18  18  38  38  38  10  10  10   2   2   6
+-  2   2   6   2   2   6   2   2   6  78  78  78
+- 58  58  58  22  22  22   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 54  54  54  82  82  82   2   2   6  26  26  26
+- 22  22  22   2   2   6 123 123 123 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-238 238 238 198 198 198   6   6   6  38  38  38
+- 58  58  58  26  26  26  38  38  38   2   2   6
+-  2   2   6   2   2   6   2   2   6  46  46  46
+- 78  78  78  30  30  30  10  10  10   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0  10  10  10  30  30  30
+- 74  74  74  58  58  58   2   2   6  42  42  42
+-  2   2   6  22  22  22 231 231 231 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 246 246 246  46  46  46  38  38  38
+- 42  42  42  14  14  14  38  38  38  14  14  14
+-  2   2   6   2   2   6   2   2   6   6   6   6
+- 86  86  86  46  46  46  14  14  14   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  14  14  14  42  42  42
+- 90  90  90  18  18  18  18  18  18  26  26  26
+-  2   2   6 116 116 116 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 250 250 250 238 238 238
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253  94  94  94   6   6   6
+-  2   2   6   2   2   6  10  10  10  34  34  34
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 74  74  74  58  58  58  22  22  22   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0  10  10  10  26  26  26  66  66  66
+- 82  82  82   2   2   6  38  38  38   6   6   6
+- 14  14  14 210 210 210 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 246 246 246 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 144 144 144   2   2   6
+-  2   2   6   2   2   6   2   2   6  46  46  46
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 42  42  42  74  74  74  30  30  30  10  10  10
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  42  42  42  90  90  90
+- 26  26  26   6   6   6  42  42  42   2   2   6
+- 74  74  74 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 242 242 242 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 182 182 182   2   2   6
+-  2   2   6   2   2   6   2   2   6  46  46  46
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 10  10  10  86  86  86  38  38  38  10  10  10
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 10  10  10  26  26  26  66  66  66  82  82  82
+-  2   2   6  22  22  22  18  18  18   2   2   6
+-149 149 149 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 206 206 206   2   2   6
+-  2   2   6   2   2   6   2   2   6  38  38  38
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  86  86  86  46  46  46  14  14  14
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 18  18  18  46  46  46  86  86  86  18  18  18
+-  2   2   6  34  34  34  10  10  10   6   6   6
+-210 210 210 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 221 221 221   6   6   6
+-  2   2   6   2   2   6   6   6   6  30  30  30
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  82  82  82  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 26  26  26  66  66  66  62  62  62   2   2   6
+-  2   2   6  38  38  38  10  10  10  26  26  26
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 238 238 238
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231   6   6   6
+-  2   2   6   2   2   6  10  10  10  30  30  30
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  58  58  58  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 38  38  38  78  78  78   6   6   6   2   2   6
+-  2   2   6  46  46  46  14  14  14  42  42  42
+-246 246 246 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234  10  10  10
+-  2   2   6   2   2   6  22  22  22  14  14  14
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  62  62  62  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50  74  74  74   2   2   6   2   2   6
+- 14  14  14  70  70  70  34  34  34  62  62  62
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234  14  14  14
+-  2   2   6   2   2   6  30  30  30   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  62  62  62  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 54  54  54  62  62  62   2   2   6   2   2   6
+-  2   2   6  30  30  30  46  46  46  70  70  70
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 226 226 226  10  10  10
+-  2   2   6   6   6   6  30  30  30   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6  66  66  66  58  58  58  22  22  22
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  22  22  22
+- 58  58  58  62  62  62   2   2   6   2   2   6
+-  2   2   6   2   2   6  30  30  30  78  78  78
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 206 206 206   2   2   6
+- 22  22  22  34  34  34  18  14   6  22  22  22
+- 26  26  26  18  18  18   6   6   6   2   2   6
+-  2   2   6  82  82  82  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  26  26  26
+- 62  62  62 106 106 106  74  54  14 185 133  11
+-210 162  10 121  92   8   6   6   6  62  62  62
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 158 158 158  18  18  18
+- 14  14  14   2   2   6   2   2   6   2   2   6
+-  6   6   6  18  18  18  66  66  66  38  38  38
+-  6   6   6  94  94  94  50  50  50  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 10  10  10  10  10  10  18  18  18  38  38  38
+- 78  78  78 142 134 106 216 158  10 242 186  14
+-246 190  14 246 190  14 156 118  10  10  10  10
+- 90  90  90 238 238 238 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 230 190
+-238 204  91 238 204  91 181 142  44  37  26   9
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  38  38  38  46  46  46
+- 26  26  26 106 106 106  54  54  54  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  14  14  14  22  22  22
+- 30  30  30  38  38  38  50  50  50  70  70  70
+-106 106 106 190 142  34 226 170  11 242 186  14
+-246 190  14 246 190  14 246 190  14 154 114  10
+-  6   6   6  74  74  74 226 226 226 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 228 184  62
+-241 196  14 241 208  19 232 195  16  38  30  10
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  30  30  30  26  26  26
+-203 166  17 154 142  90  66  66  66  26  26  26
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  38  38  38  58  58  58
+- 78  78  78  86  86  86 101 101 101 123 123 123
+-175 146  61 210 150  10 234 174  13 246 186  14
+-246 190  14 246 190  14 246 190  14 238 190  10
+-102  78  10   2   2   6  46  46  46 198 198 198
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 224 178  62
+-242 186  14 241 196  14 210 166  10  22  18   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   6   6   6 121  92   8
+-238 202  15 232 195  16  82  82  82  34  34  34
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+- 14  14  14  38  38  38  70  70  70 154 122  46
+-190 142  34 200 144  11 197 138  11 197 138  11
+-213 154  11 226 170  11 242 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-225 175  15  46  32   6   2   2   6  22  22  22
+-158 158 158 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 242 242 242 224 178  62
+-239 182  13 236 186  11 213 154  11  46  32   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  61  42   6 225 175  15
+-238 190  10 236 186  11 112 100  78  42  42  42
+- 14  14  14   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 22  22  22  54  54  54 154 122  46 213 154  11
+-226 170  11 230 174  11 226 170  11 226 170  11
+-236 178  12 242 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-241 196  14 184 144  12  10  10  10   2   2   6
+-  6   6   6 116 116 116 242 242 242 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 231 231 231 198 198 198 214 170  54
+-236 178  12 236 178  12 210 150  10 137  92   6
+- 18  14   6   2   2   6   2   2   6   2   2   6
+-  6   6   6  70  47   6 200 144  11 236 178  12
+-239 182  13 239 182  13 124 112  88  58  58  58
+- 22  22  22   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  70  70  70 180 133  36 226 170  11
+-239 182  13 242 186  14 242 186  14 246 186  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 232 195  16  98  70   6   2   2   6
+-  2   2   6   2   2   6  66  66  66 221 221 221
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 206 206 206 198 198 198 214 166  58
+-230 174  11 230 174  11 216 158  10 192 133   9
+-163 110   8 116  81   8 102  78  10 116  81   8
+-167 114   7 197 138  11 226 170  11 239 182  13
+-242 186  14 242 186  14 162 146  94  78  78  78
+- 34  34  34  14  14  14   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 30  30  30  78  78  78 190 142  34 226 170  11
+-239 182  13 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 241 196  14 203 166  17  22  18   6
+-  2   2   6   2   2   6   2   2   6  38  38  38
+-218 218 218 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 206 206 206 198 198 198 202 162  69
+-226 170  11 236 178  12 224 166  10 210 150  10
+-200 144  11 197 138  11 192 133   9 197 138  11
+-210 150  10 226 170  11 242 186  14 246 190  14
+-246 190  14 246 186  14 225 175  15 124 112  88
+- 62  62  62  30  30  30  14  14  14   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78 174 135  50 224 166  10
+-239 182  13 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 241 196  14 139 102  15
+-  2   2   6   2   2   6   2   2   6   2   2   6
+- 78  78  78 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 214 214 214 198 198 198 190 150  46
+-219 162  10 236 178  12 234 174  13 224 166  10
+-216 158  10 213 154  11 213 154  11 216 158  10
+-226 170  11 239 182  13 246 190  14 246 190  14
+-246 190  14 246 190  14 242 186  14 206 162  42
+-101 101 101  58  58  58  30  30  30  14  14  14
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  74  74  74 174 135  50 216 158  10
+-236 178  12 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 241 196  14 226 184  13
+- 61  42   6   2   2   6   2   2   6   2   2   6
+- 22  22  22 238 238 238 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 226 226 226 187 187 187 180 133  36
+-216 158  10 236 178  12 239 182  13 236 178  12
+-230 174  11 226 170  11 226 170  11 230 174  11
+-236 178  12 242 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 186  14 239 182  13
+-206 162  42 106 106 106  66  66  66  34  34  34
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 26  26  26  70  70  70 163 133  67 213 154  11
+-236 178  12 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 241 196  14
+-190 146  13  18  14   6   2   2   6   2   2   6
+- 46  46  46 246 246 246 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 221 221 221  86  86  86 156 107  11
+-216 158  10 236 178  12 242 186  14 246 186  14
+-242 186  14 239 182  13 239 182  13 242 186  14
+-242 186  14 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-242 186  14 225 175  15 142 122  72  66  66  66
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 26  26  26  70  70  70 163 133  67 210 150  10
+-236 178  12 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-232 195  16 121  92   8  34  34  34 106 106 106
+-221 221 221 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-242 242 242  82  82  82  18  14   6 163 110   8
+-216 158  10 236 178  12 242 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 242 186  14 163 133  67
+- 46  46  46  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  10  10  10
+- 30  30  30  78  78  78 163 133  67 210 150  10
+-236 178  12 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-241 196  14 215 174  15 190 178 144 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 218 218 218
+- 58  58  58   2   2   6  22  18   6 167 114   7
+-216 158  10 236 178  12 246 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 186  14 242 186  14 190 150  46
+- 54  54  54  22  22  22   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 38  38  38  86  86  86 180 133  36 213 154  11
+-236 178  12 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 232 195  16 190 146  13 214 214 214
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 170 170 170  26  26  26
+-  2   2   6   2   2   6  37  26   9 163 110   8
+-219 162  10 239 182  13 246 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 186  14 236 178  12 224 166  10 142 122  72
+- 46  46  46  18  18  18   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50 109 106  95 192 133   9 224 166  10
+-242 186  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-242 186  14 226 184  13 210 162  10 142 110  46
+-226 226 226 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-198 198 198  66  66  66   2   2   6   2   2   6
+-  2   2   6   2   2   6  50  34   6 156 107  11
+-219 162  10 239 182  13 246 186  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 242 186  14
+-234 174  13 213 154  11 154 122  46  66  66  66
+- 30  30  30  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  22  22  22
+- 58  58  58 154 121  60 206 145  10 234 174  13
+-242 186  14 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 186  14 236 178  12 210 162  10 163 110   8
+- 61  42   6 138 138 138 218 218 218 250 250 250
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 210 210 210 144 144 144  66  66  66
+-  6   6   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6  61  42   6 163 110   8
+-216 158  10 236 178  12 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 239 182  13 230 174  11 216 158  10
+-190 142  34 124 112  88  70  70  70  38  38  38
+- 18  18  18   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  22  22  22
+- 62  62  62 168 124  44 206 145  10 224 166  10
+-236 178  12 239 182  13 242 186  14 242 186  14
+-246 186  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 236 178  12 216 158  10 175 118   6
+- 80  54   7   2   2   6   6   6   6  30  30  30
+- 54  54  54  62  62  62  50  50  50  38  38  38
+- 14  14  14   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  80  54   7 167 114   7
+-213 154  11 236 178  12 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 190  14 242 186  14 239 182  13 239 182  13
+-230 174  11 210 150  10 174 135  50 124 112  88
+- 82  82  82  54  54  54  34  34  34  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  18  18  18
+- 50  50  50 158 118  36 192 133   9 200 144  11
+-216 158  10 219 162  10 224 166  10 226 170  11
+-230 174  11 236 178  12 239 182  13 239 182  13
+-242 186  14 246 186  14 246 190  14 246 190  14
+-246 190  14 246 190  14 246 190  14 246 190  14
+-246 186  14 230 174  11 210 150  10 163 110   8
+-104  69   6  10  10  10   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  91  60   6 167 114   7
+-206 145  10 230 174  11 242 186  14 246 190  14
+-246 190  14 246 190  14 246 186  14 242 186  14
+-239 182  13 230 174  11 224 166  10 213 154  11
+-180 133  36 124 112  88  86  86  86  58  58  58
+- 38  38  38  22  22  22  10  10  10   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0  14  14  14
+- 34  34  34  70  70  70 138 110  50 158 118  36
+-167 114   7 180 123   7 192 133   9 197 138  11
+-200 144  11 206 145  10 213 154  11 219 162  10
+-224 166  10 230 174  11 239 182  13 242 186  14
+-246 186  14 246 186  14 246 186  14 246 186  14
+-239 182  13 216 158  10 185 133  11 152  99   6
+-104  69   6  18  14   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   2   2   6   2   2   6   2   2   6
+-  2   2   6   6   6   6  80  54   7 152  99   6
+-192 133   9 219 162  10 236 178  12 239 182  13
+-246 186  14 242 186  14 239 182  13 236 178  12
+-224 166  10 206 145  10 192 133   9 154 121  60
+- 94  94  94  62  62  62  42  42  42  22  22  22
+- 14  14  14   6   6   6   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 18  18  18  34  34  34  58  58  58  78  78  78
+-101  98  89 124 112  88 142 110  46 156 107  11
+-163 110   8 167 114   7 175 118   6 180 123   7
+-185 133  11 197 138  11 210 150  10 219 162  10
+-226 170  11 236 178  12 236 178  12 234 174  13
+-219 162  10 197 138  11 163 110   8 130  83   6
+- 91  60   6  10  10  10   2   2   6   2   2   6
+- 18  18  18  38  38  38  38  38  38  38  38  38
+- 38  38  38  38  38  38  38  38  38  38  38  38
+- 38  38  38  38  38  38  26  26  26   2   2   6
+-  2   2   6   6   6   6  70  47   6 137  92   6
+-175 118   6 200 144  11 219 162  10 230 174  11
+-234 174  13 230 174  11 219 162  10 210 150  10
+-192 133   9 163 110   8 124 112  88  82  82  82
+- 50  50  50  30  30  30  14  14  14   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  14  14  14  22  22  22  34  34  34
+- 42  42  42  58  58  58  74  74  74  86  86  86
+-101  98  89 122 102  70 130  98  46 121  87  25
+-137  92   6 152  99   6 163 110   8 180 123   7
+-185 133  11 197 138  11 206 145  10 200 144  11
+-180 123   7 156 107  11 130  83   6 104  69   6
+- 50  34   6  54  54  54 110 110 110 101  98  89
+- 86  86  86  82  82  82  78  78  78  78  78  78
+- 78  78  78  78  78  78  78  78  78  78  78  78
+- 78  78  78  82  82  82  86  86  86  94  94  94
+-106 106 106 101 101 101  86  66  34 124  80   6
+-156 107  11 180 123   7 192 133   9 200 144  11
+-206 145  10 200 144  11 192 133   9 175 118   6
+-139 102  15 109 106  95  70  70  70  42  42  42
+- 22  22  22  10  10  10   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   6   6   6  10  10  10
+- 14  14  14  22  22  22  30  30  30  38  38  38
+- 50  50  50  62  62  62  74  74  74  90  90  90
+-101  98  89 112 100  78 121  87  25 124  80   6
+-137  92   6 152  99   6 152  99   6 152  99   6
+-138  86   6 124  80   6  98  70   6  86  66  30
+-101  98  89  82  82  82  58  58  58  46  46  46
+- 38  38  38  34  34  34  34  34  34  34  34  34
+- 34  34  34  34  34  34  34  34  34  34  34  34
+- 34  34  34  34  34  34  38  38  38  42  42  42
+- 54  54  54  82  82  82  94  86  76  91  60   6
+-134  86   6 156 107  11 167 114   7 175 118   6
+-175 118   6 167 114   7 152  99   6 121  87  25
+-101  98  89  62  62  62  34  34  34  18  18  18
+-  6   6   6   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6   6   6   6  10  10  10
+- 18  18  18  22  22  22  30  30  30  42  42  42
+- 50  50  50  66  66  66  86  86  86 101  98  89
+-106  86  58  98  70   6 104  69   6 104  69   6
+-104  69   6  91  60   6  82  62  34  90  90  90
+- 62  62  62  38  38  38  22  22  22  14  14  14
+- 10  10  10  10  10  10  10  10  10  10  10  10
+- 10  10  10  10  10  10   6   6   6  10  10  10
+- 10  10  10  10  10  10  10  10  10  14  14  14
+- 22  22  22  42  42  42  70  70  70  89  81  66
+- 80  54   7 104  69   6 124  80   6 137  92   6
+-134  86   6 116  81   8 100  82  52  86  86  86
+- 58  58  58  30  30  30  14  14  14   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  10  10  10  14  14  14
+- 18  18  18  26  26  26  38  38  38  54  54  54
+- 70  70  70  86  86  86  94  86  76  89  81  66
+- 89  81  66  86  86  86  74  74  74  50  50  50
+- 30  30  30  14  14  14   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6  18  18  18  34  34  34  58  58  58
+- 82  82  82  89  81  66  89  81  66  89  81  66
+- 94  86  66  94  86  76  74  74  74  50  50  50
+- 26  26  26  14  14  14   6   6   6   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  6   6   6   6   6   6  14  14  14  18  18  18
+- 30  30  30  38  38  38  46  46  46  54  54  54
+- 50  50  50  42  42  42  30  30  30  18  18  18
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   6   6   6  14  14  14  26  26  26
+- 38  38  38  50  50  50  58  58  58  58  58  58
+- 54  54  54  42  42  42  30  30  30  18  18  18
+- 10  10  10   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+-  6   6   6  10  10  10  14  14  14  18  18  18
+- 18  18  18  14  14  14  10  10  10   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   6   6   6
+- 14  14  14  18  18  18  22  22  22  22  22  22
+- 18  18  18  14  14  14  10  10  10   6   6   6
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
+-  0   0   0   0   0   0   0   0   0   0   0   0
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  3 3 3  0 0 0  0 0 0
++0 0 0  0 0 0  0 0 0  0 0 0  3 3 3  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  1 1 1  0 0 0
++0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  2 1 0  2 1 0  3 2 2
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  2 2 2  0 0 0  3 4 3  26 28 28
++37 38 37  37 38 37  14 17 19  2 2 2  0 0 0  2 2 2
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  3 3 3  0 0 0  1 1 1  6 6 6
++2 2 2  0 0 0  3 3 3  4 4 4  4 4 4  4 4 4
++4 4 5  3 3 3  1 0 0  0 0 0  1 0 0  0 0 0
++1 1 1  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++2 2 2  0 0 0  0 0 0  14 17 19  60 74 84  137 136 137
++153 152 153  137 136 137  125 124 125  60 73 81  6 6 6  3 1 0
++0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  0 0 0  4 4 4  41 54 63  125 124 125
++60 73 81  6 6 6  4 0 0  3 3 3  4 4 4  4 4 4
++4 4 4  0 0 0  6 9 11  41 54 63  41 65 82  22 30 35
++2 2 2  2 1 0  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  5 5 5  2 2 2  0 0 0
++4 0 0  6 6 6  41 54 63  137 136 137  174 174 174  167 166 167
++165 164 165  165 164 165  163 162 163  163 162 163  125 124 125  41 54 63
++1 1 1  0 0 0  0 0 0  3 3 3  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
++3 3 3  2 0 0  4 0 0  60 73 81  156 155 156  167 166 167
++163 162 163  85 115 134  5 7 8  0 0 0  4 4 4  5 5 5
++0 0 0  2 5 5  55 98 126  90 154 193  90 154 193  72 125 159
++37 51 59  2 0 0  1 1 1  4 5 5  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 4 4  1 1 1  0 0 0  3 3 3
++37 38 37  125 124 125  163 162 163  174 174 174  158 157 158  158 157 158
++156 155 156  156 155 156  158 157 158  165 164 165  174 174 174  166 165 166
++125 124 125  16 19 21  1 0 0  0 0 0  0 0 0  4 4 4
++5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  1 1 1
++0 0 0  0 0 0  37 38 37  153 152 153  174 174 174  158 157 158
++174 174 174  163 162 163  37 38 37  4 3 3  4 0 0  1 1 1
++0 0 0  22 40 52  101 161 196  101 161 196  90 154 193  101 161 196
++64 123 161  14 17 19  0 0 0  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++5 5 5  2 2 2  0 0 0  4 0 0  24 26 27  85 115 134
++156 155 156  174 174 174  167 166 167  156 155 156  154 153 154  157 156 157
++156 155 156  156 155 156  155 154 155  153 152 153  158 157 158  167 166 167
++174 174 174  156 155 156  60 74 84  16 19 21  0 0 0  0 0 0
++1 1 1  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  6 6 6  3 3 3  0 0 0  4 0 0
++13 16 17  60 73 81  137 136 137  165 164 165  156 155 156  153 152 153
++174 174 174  177 184 187  60 73 81  3 1 0  0 0 0  1 1 2
++22 30 35  64 123 161  136 185 209  90 154 193  90 154 193  90 154 193
++90 154 193  21 29 34  0 0 0  3 2 2  4 4 5  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  3 3 3
++0 0 0  0 0 0  10 13 16  60 74 84  157 156 157  174 174 174
++174 174 174  158 157 158  153 152 153  154 153 154  156 155 156  155 154 155
++156 155 156  155 154 155  154 153 154  157 156 157  154 153 154  153 152 153
++163 162 163  174 174 174  177 184 187  137 136 137  60 73 81  13 16 17
++4 0 0  0 0 0  3 3 3  5 5 5  4 4 4  4 4 4
++5 5 5  4 4 4  1 1 1  0 0 0  3 3 3  41 54 63
++131 129 131  174 174 174  174 174 174  174 174 174  167 166 167  174 174 174
++190 197 201  137 136 137  24 26 27  4 0 0  16 21 25  50 82 103
++90 154 193  136 185 209  90 154 193  101 161 196  101 161 196  101 161 196
++31 91 132  3 6 7  0 0 0  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  2 2 2  0 0 0  4 0 0
++4 0 0  43 57 68  137 136 137  177 184 187  174 174 174  163 162 163
++155 154 155  155 154 155  156 155 156  155 154 155  158 157 158  165 164 165
++167 166 167  166 165 166  163 162 163  157 156 157  155 154 155  155 154 155
++153 152 153  156 155 156  167 166 167  174 174 174  174 174 174  131 129 131
++41 54 63  5 5 5  0 0 0  0 0 0  3 3 3  4 4 4
++1 1 1  0 0 0  1 0 0  26 28 28  125 124 125  174 174 174
++177 184 187  174 174 174  174 174 174  156 155 156  131 129 131  137 136 137
++125 124 125  24 26 27  4 0 0  41 65 82  90 154 193  136 185 209
++136 185 209  101 161 196  53 118 160  37 112 160  90 154 193  34 86 122
++7 12 15  0 0 0  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  3 3 3  0 0 0  0 0 0  5 5 5  37 38 37
++125 124 125  167 166 167  174 174 174  167 166 167  158 157 158  155 154 155
++156 155 156  156 155 156  156 155 156  163 162 163  167 166 167  155 154 155
++137 136 137  153 152 153  156 155 156  165 164 165  163 162 163  156 155 156
++156 155 156  156 155 156  155 154 155  158 157 158  166 165 166  174 174 174
++167 166 167  125 124 125  37 38 37  1 0 0  0 0 0  0 0 0
++0 0 0  24 26 27  60 74 84  158 157 158  174 174 174  174 174 174
++166 165 166  158 157 158  125 124 125  41 54 63  13 16 17  6 6 6
++6 6 6  37 38 37  80 127 157  136 185 209  101 161 196  101 161 196
++90 154 193  28 67 93  6 10 14  13 20 25  13 20 25  6 10 14
++1 1 2  4 3 3  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++1 1 1  1 0 0  4 3 3  37 38 37  60 74 84  153 152 153
++167 166 167  167 166 167  158 157 158  154 153 154  155 154 155  156 155 156
++157 156 157  158 157 158  167 166 167  167 166 167  131 129 131  43 57 68
++26 28 28  37 38 37  60 73 81  131 129 131  165 164 165  166 165 166
++158 157 158  155 154 155  156 155 156  156 155 156  156 155 156  158 157 158
++165 164 165  174 174 174  163 162 163  60 74 84  16 19 21  13 16 17
++60 73 81  131 129 131  174 174 174  174 174 174  167 166 167  165 164 165
++137 136 137  60 73 81  24 26 27  4 0 0  4 0 0  16 19 21
++52 104 138  101 161 196  136 185 209  136 185 209  90 154 193  27 99 146
++13 20 25  4 5 7  2 5 5  4 5 7  1 1 2  0 0 0
++4 4 4  4 4 4  3 3 3  2 2 2  2 2 2  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  3 3 3  0 0 0
++0 0 0  13 16 17  60 73 81  137 136 137  174 174 174  166 165 166
++158 157 158  156 155 156  157 156 157  156 155 156  155 154 155  158 157 158
++167 166 167  174 174 174  153 152 153  60 73 81  16 19 21  4 0 0
++4 0 0  4 0 0  6 6 6  26 28 28  60 74 84  158 157 158
++174 174 174  166 165 166  157 156 157  155 154 155  156 155 156  156 155 156
++155 154 155  158 157 158  167 166 167  167 166 167  131 129 131  125 124 125
++137 136 137  167 166 167  167 166 167  174 174 174  158 157 158  125 124 125
++16 19 21  4 0 0  4 0 0  10 13 16  49 76 92  107 159 188
++136 185 209  136 185 209  90 154 193  26 108 161  22 40 52  6 10 14
++2 3 3  1 1 2  1 1 2  4 4 5  4 4 5  4 4 5
++4 4 5  2 2 1  0 0 0  0 0 0  0 0 0  2 2 2
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  3 3 3  0 0 0  1 0 0  4 0 0
++37 51 59  131 129 131  167 166 167  167 166 167  163 162 163  157 156 157
++157 156 157  155 154 155  153 152 153  157 156 157  167 166 167  174 174 174
++153 152 153  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
++4 3 3  4 3 3  4 0 0  6 6 6  4 0 0  37 38 37
++125 124 125  174 174 174  174 174 174  165 164 165  156 155 156  154 153 154
++156 155 156  156 155 156  155 154 155  163 162 163  158 157 158  163 162 163
++174 174 174  174 174 174  174 174 174  125 124 125  37 38 37  0 0 0
++4 0 0  6 9 11  41 54 63  90 154 193  136 185 209  146 190 211
++136 185 209  37 112 160  22 40 52  6 10 14  3 6 7  1 1 2
++1 1 2  3 3 3  1 1 2  3 3 3  4 4 4  4 4 4
++2 2 2  2 0 0  16 19 21  37 38 37  24 26 27  0 0 0
++0 0 0  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
++4 4 4  0 0 0  0 0 0  0 0 0  26 28 28  120 125 127
++158 157 158  174 174 174  165 164 165  157 156 157  155 154 155  156 155 156
++153 152 153  153 152 153  167 166 167  174 174 174  174 174 174  125 124 125
++37 38 37  4 0 0  0 0 0  4 0 0  4 3 3  4 4 4
++4 4 4  4 4 4  5 5 5  4 0 0  4 0 0  4 0 0
++4 3 3  43 57 68  137 136 137  174 174 174  174 174 174  165 164 165
++154 153 154  153 152 153  153 152 153  153 152 153  163 162 163  174 174 174
++174 174 174  153 152 153  60 73 81  6 6 6  4 0 0  4 3 3
++32 43 50  80 127 157  136 185 209  146 190 211  146 190 211  90 154 193
++28 67 93  28 67 93  40 71 93  3 6 7  1 1 2  2 5 5
++50 82 103  79 117 143  26 37 45  0 0 0  3 3 3  1 1 1
++0 0 0  41 54 63  137 136 137  174 174 174  153 152 153  60 73 81
++2 0 0  0 0 0
++4 4 4  4 4 4  4 4 4  4 4 4  6 6 6  2 2 2
++0 0 0  2 0 0  24 26 27  60 74 84  153 152 153  174 174 174
++174 174 174  157 156 157  154 153 154  156 155 156  154 153 154  153 152 153
++165 164 165  174 174 174  177 184 187  137 136 137  43 57 68  6 6 6
++4 0 0  2 0 0  3 3 3  5 5 5  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  6 6 6  4 3 3
++4 0 0  4 0 0  24 26 27  60 73 81  153 152 153  174 174 174
++174 174 174  158 157 158  158 157 158  174 174 174  174 174 174  158 157 158
++60 74 84  24 26 27  4 0 0  4 0 0  17 23 27  59 113 148
++136 185 209  191 222 234  146 190 211  136 185 209  31 91 132  7 11 13
++22 40 52  101 161 196  90 154 193  6 9 11  3 4 4  43 95 132
++136 185 209  172 205 220  55 98 126  0 0 0  0 0 0  2 0 0
++26 28 28  153 152 153  177 184 187  167 166 167  177 184 187  165 164 165
++37 38 37  0 0 0
++4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
++13 16 17  60 73 81  137 136 137  174 174 174  174 174 174  165 164 165
++153 152 153  153 152 153  155 154 155  154 153 154  158 157 158  174 174 174
++177 184 187  163 162 163  60 73 81  16 19 21  4 0 0  4 0 0
++4 3 3  4 4 4  5 5 5  5 5 5  4 4 4  5 5 5
++5 5 5  5 5 5  5 5 5  4 4 4  4 4 4  5 5 5
++6 6 6  4 0 0  4 0 0  4 0 0  24 26 27  60 74 84
++166 165 166  174 174 174  177 184 187  165 164 165  125 124 125  24 26 27
++4 0 0  4 0 0  5 5 5  50 82 103  136 185 209  172 205 220
++146 190 211  136 185 209  26 108 161  22 40 52  7 12 15  44 81 103
++71 116 144  28 67 93  37 51 59  41 65 82  100 139 164  101 161 196
++90 154 193  90 154 193  28 67 93  0 0 0  0 0 0  26 28 28
++125 124 125  167 166 167  163 162 163  153 152 153  163 162 163  174 174 174
++85 115 134  4 0 0
++4 4 4  5 5 5  4 4 4  1 0 0  4 0 0  34 47 55
++125 124 125  174 174 174  174 174 174  167 166 167  157 156 157  153 152 153
++155 154 155  155 154 155  158 157 158  166 165 166  167 166 167  154 153 154
++125 124 125  26 28 28  4 0 0  4 0 0  4 0 0  5 5 5
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  1 1 1
++0 0 0  0 0 0  1 1 1  4 4 4  4 4 4  4 4 4
++5 5 5  5 5 5  4 3 3  4 0 0  4 0 0  6 6 6
++37 38 37  131 129 131  137 136 137  37 38 37  0 0 0  4 0 0
++4 5 5  43 61 72  90 154 193  172 205 220  146 190 211  136 185 209
++90 154 193  28 67 93  13 20 25  43 61 72  71 116 144  44 81 103
++2 5 5  7 11 13  59 113 148  101 161 196  90 154 193  28 67 93
++13 20 25  6 10 14  0 0 0  13 16 17  60 73 81  137 136 137
++166 165 166  158 157 158  156 155 156  154 153 154  167 166 167  174 174 174
++60 73 81  4 0 0
++4 4 4  4 4 4  0 0 0  3 3 3  60 74 84  174 174 174
++174 174 174  167 166 167  163 162 163  155 154 155  157 156 157  155 154 155
++156 155 156  163 162 163  167 166 167  158 157 158  125 124 125  37 38 37
++4 3 3  4 0 0  4 0 0  6 6 6  6 6 6  5 5 5
++4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  2 3 3
++10 13 16  7 11 13  1 0 0  0 0 0  2 2 1  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  4 0 0
++4 0 0  7 11 13  13 16 17  4 0 0  3 3 3  34 47 55
++80 127 157  146 190 211  172 205 220  136 185 209  136 185 209  136 185 209
++28 67 93  22 40 52  55 98 126  55 98 126  21 29 34  7 11 13
++50 82 103  101 161 196  101 161 196  35 83 115  13 20 25  2 2 1
++1 1 2  1 1 2  37 51 59  131 129 131  174 174 174  174 174 174
++167 166 167  163 162 163  163 162 163  167 166 167  174 174 174  125 124 125
++16 19 21  4 0 0
++4 4 4  4 0 0  4 0 0  60 74 84  174 174 174  174 174 174
++158 157 158  155 154 155  155 154 155  156 155 156  155 154 155  158 157 158
++167 166 167  165 164 165  131 129 131  60 73 81  13 16 17  4 0 0
++4 0 0  4 3 3  6 6 6  4 3 3  5 5 5  4 4 4
++4 4 4  3 2 2  0 0 0  0 0 0  7 11 13  45 69 86
++80 127 157  71 116 144  43 61 72  7 11 13  0 0 0  1 1 1
++4 3 3  4 4 4  4 4 4  4 4 4  6 6 6  5 5 5
++3 2 2  4 0 0  1 0 0  21 29 34  59 113 148  136 185 209
++146 190 211  136 185 209  136 185 209  136 185 209  136 185 209  136 185 209
++68 124 159  44 81 103  22 40 52  13 16 17  43 61 72  90 154 193
++136 185 209  59 113 148  21 29 34  3 4 3  1 1 1  0 0 0
++24 26 27  125 124 125  163 162 163  174 174 174  166 165 166  165 164 165
++163 162 163  125 124 125  125 124 125  125 124 125  125 124 125  26 28 28
++4 0 0  4 3 3
++3 3 3  0 0 0  24 26 27  153 152 153  177 184 187  158 157 158
++156 155 156  156 155 156  155 154 155  155 154 155  165 164 165  174 174 174
++155 154 155  60 74 84  26 28 28  4 0 0  4 0 0  3 1 0
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 3 3
++2 0 0  0 0 0  0 0 0  32 43 50  72 125 159  101 161 196
++136 185 209  101 161 196  101 161 196  79 117 143  32 43 50  0 0 0
++0 0 0  2 2 2  4 4 4  4 4 4  3 3 3  1 0 0
++0 0 0  4 5 5  49 76 92  101 161 196  146 190 211  146 190 211
++136 185 209  136 185 209  136 185 209  136 185 209  136 185 209  90 154 193
++28 67 93  13 16 17  37 51 59  80 127 157  136 185 209  90 154 193
++22 40 52  6 9 11  3 4 3  2 2 1  16 19 21  60 73 81
++137 136 137  163 162 163  158 157 158  166 165 166  167 166 167  153 152 153
++60 74 84  37 38 37  6 6 6  13 16 17  4 0 0  1 0 0
++3 2 2  4 4 4
++3 2 2  4 0 0  37 38 37  137 136 137  167 166 167  158 157 158
++157 156 157  154 153 154  157 156 157  167 166 167  174 174 174  125 124 125
++37 38 37  4 0 0  4 0 0  4 0 0  4 3 3  4 4 4
++4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
++0 0 0  16 21 25  55 98 126  90 154 193  136 185 209  101 161 196
++101 161 196  101 161 196  136 185 209  136 185 209  101 161 196  55 98 126
++14 17 19  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
++22 40 52  90 154 193  146 190 211  146 190 211  136 185 209  136 185 209
++136 185 209  136 185 209  136 185 209  101 161 196  35 83 115  7 11 13
++17 23 27  59 113 148  136 185 209  101 161 196  34 86 122  7 12 15
++2 5 5  3 4 3  6 6 6  60 73 81  131 129 131  163 162 163
++166 165 166  174 174 174  174 174 174  163 162 163  125 124 125  41 54 63
++13 16 17  4 0 0  4 0 0  4 0 0  1 0 0  2 2 2
++4 4 4  4 4 4
++1 1 1  2 1 0  43 57 68  137 136 137  153 152 153  153 152 153
++163 162 163  156 155 156  165 164 165  167 166 167  60 74 84  6 6 6
++4 0 0  4 0 0  5 5 5  4 4 4  4 4 4  4 4 4
++4 5 5  6 6 6  4 3 3  0 0 0  0 0 0  11 15 18
++40 71 93  100 139 164  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  136 185 209
++101 161 196  45 69 86  6 6 6  0 0 0  17 23 27  55 98 126
++136 185 209  146 190 211  136 185 209  136 185 209  136 185 209  136 185 209
++136 185 209  136 185 209  90 154 193  22 40 52  7 11 13  50 82 103
++136 185 209  136 185 209  53 118 160  22 40 52  7 11 13  2 5 5
++3 4 3  37 38 37  125 124 125  157 156 157  166 165 166  167 166 167
++174 174 174  174 174 174  137 136 137  60 73 81  4 0 0  4 0 0
++4 0 0  4 0 0  5 5 5  3 3 3  3 3 3  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  41 54 63  137 136 137  125 124 125  131 129 131
++155 154 155  167 166 167  174 174 174  60 74 84  6 6 6  4 0 0
++4 3 3  6 6 6  4 4 4  4 4 4  4 4 4  5 5 5
++4 4 4  1 1 1  0 0 0  3 6 7  41 65 82  72 125 159
++101 161 196  101 161 196  101 161 196  90 154 193  90 154 193  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
++136 185 209  136 185 209  80 127 157  55 98 126  101 161 196  146 190 211
++136 185 209  136 185 209  136 185 209  101 161 196  136 185 209  101 161 196
++136 185 209  101 161 196  35 83 115  22 30 35  101 161 196  172 205 220
++90 154 193  28 67 93  7 11 13  2 5 5  3 4 3  13 16 17
++85 115 134  167 166 167  174 174 174  174 174 174  174 174 174  174 174 174
++167 166 167  60 74 84  13 16 17  4 0 0  4 0 0  4 3 3
++6 6 6  5 5 5  4 4 4  5 5 5  4 4 4  5 5 5
++5 5 5  5 5 5
++1 1 1  4 0 0  41 54 63  137 136 137  137 136 137  125 124 125
++131 129 131  167 166 167  157 156 157  37 38 37  6 6 6  4 0 0
++6 6 6  5 5 5  4 4 4  4 4 4  4 5 5  2 2 1
++0 0 0  0 0 0  26 37 45  58 111 146  101 161 196  101 161 196
++101 161 196  90 154 193  90 154 193  90 154 193  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  136 185 209  136 185 209  136 185 209  146 190 211  136 185 209
++136 185 209  101 161 196  136 185 209  136 185 209  101 161 196  136 185 209
++101 161 196  136 185 209  136 185 209  136 185 209  136 185 209  16 89 141
++7 11 13  2 5 5  2 5 5  13 16 17  60 73 81  154 154 154
++174 174 174  174 174 174  174 174 174  174 174 174  163 162 163  125 124 125
++24 26 27  4 0 0  4 0 0  4 0 0  5 5 5  5 5 5
++4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
++5 5 5  4 4 4
++4 0 0  6 6 6  37 38 37  137 136 137  137 136 137  131 129 131
++131 129 131  153 152 153  131 129 131  26 28 28  4 0 0  4 3 3
++6 6 6  4 4 4  4 4 4  4 4 4  0 0 0  0 0 0
++13 20 25  51 88 114  90 154 193  101 161 196  101 161 196  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  101 161 196
++101 161 196  136 185 209  101 161 196  136 185 209  136 185 209  101 161 196
++136 185 209  101 161 196  136 185 209  101 161 196  101 161 196  101 161 196
++136 185 209  136 185 209  136 185 209  37 112 160  21 29 34  5 7 8
++2 5 5  13 16 17  43 57 68  131 129 131  174 174 174  174 174 174
++174 174 174  167 166 167  157 156 157  125 124 125  37 38 37  4 0 0
++4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  41 54 63  153 152 153  137 136 137  137 136 137
++137 136 137  153 152 153  125 124 125  24 26 27  4 0 0  3 2 2
++4 4 4  4 4 4  4 3 3  4 0 0  3 6 7  43 61 72
++64 123 161  101 161 196  90 154 193  90 154 193  90 154 193  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  101 161 196  90 154 193
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++136 185 209  101 161 196  101 161 196  136 185 209  136 185 209  101 161 196
++101 161 196  90 154 193  28 67 93  13 16 17  7 11 13  3 6 7
++37 51 59  125 124 125  163 162 163  174 174 174  167 166 167  166 165 166
++167 166 167  131 129 131  60 73 81  4 0 0  4 0 0  4 0 0
++3 3 3  5 5 5  6 6 6  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  41 54 63  137 136 137  153 152 153  137 136 137
++153 152 153  157 156 157  125 124 125  24 26 27  0 0 0  2 2 2
++4 4 4  4 4 4  2 0 0  0 0 0  28 67 93  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  64 123 161  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
++90 154 193  101 161 196  101 161 196  101 161 196  90 154 193  136 185 209
++101 161 196  101 161 196  136 185 209  101 161 196  136 185 209  101 161 196
++101 161 196  101 161 196  136 185 209  101 161 196  101 161 196  90 154 193
++35 83 115  13 16 17  3 6 7  2 5 5  13 16 17  60 74 84
++154 154 154  166 165 166  165 164 165  158 157 158  163 162 163  157 156 157
++60 74 84  13 16 17  4 0 0  4 0 0  3 2 2  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  41 54 63  157 156 157  155 154 155  137 136 137
++153 152 153  158 157 158  137 136 137  26 28 28  2 0 0  2 2 2
++4 4 4  4 4 4  1 0 0  6 10 14  34 86 122  90 154 193
++64 123 161  90 154 193  64 123 161  90 154 193  90 154 193  90 154 193
++64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
++136 185 209  101 161 196  136 185 209  90 154 193  26 108 161  22 40 52
++13 16 17  5 7 8  2 5 5  2 5 5  37 38 37  165 164 165
++174 174 174  163 162 163  154 154 154  165 164 165  167 166 167  60 73 81
++6 6 6  4 0 0  4 0 0  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  41 54 63  156 155 156  158 157 158  153 152 153
++156 155 156  165 164 165  137 136 137  26 28 28  0 0 0  2 2 2
++4 4 5  4 4 4  2 0 0  7 12 15  31 96 139  64 123 161
++90 154 193  64 123 161  90 154 193  90 154 193  64 123 161  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
++90 154 193  90 154 193  90 154 193  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
++101 161 196  136 185 209  26 108 161  22 40 52  7 11 13  5 7 8
++2 5 5  2 5 5  2 5 5  2 2 1  37 38 37  158 157 158
++174 174 174  154 154 154  156 155 156  167 166 167  165 164 165  37 38 37
++4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++3 1 0  4 0 0  60 73 81  157 156 157  163 162 163  153 152 153
++158 157 158  167 166 167  137 136 137  26 28 28  2 0 0  2 2 2
++4 5 5  4 4 4  4 0 0  7 12 15  24 86 132  26 108 161
++37 112 160  64 123 161  90 154 193  64 123 161  90 154 193  90 154 193
++90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
++90 154 193  101 161 196  90 154 193  101 161 196  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  136 185 209  101 161 196  136 185 209
++90 154 193  35 83 115  13 16 17  13 16 17  7 11 13  3 6 7
++5 7 8  6 6 6  3 4 3  2 2 1  30 32 34  154 154 154
++167 166 167  154 154 154  154 154 154  174 174 174  165 164 165  37 38 37
++6 6 6  4 0 0  6 6 6  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  41 54 63  163 162 163  166 165 166  154 154 154
++163 162 163  174 174 174  137 136 137  26 28 28  0 0 0  2 2 2
++4 5 5  4 4 5  1 1 2  6 10 14  28 67 93  18 97 151
++18 97 151  18 97 151  26 108 161  37 112 160  37 112 160  90 154 193
++64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
++90 154 193  101 161 196  101 161 196  90 154 193  101 161 196  101 161 196
++101 161 196  101 161 196  101 161 196  136 185 209  90 154 193  16 89 141
++13 20 25  7 11 13  5 7 8  5 7 8  2 5 5  4 5 5
++3 4 3  4 5 5  3 4 3  0 0 0  37 38 37  158 157 158
++174 174 174  158 157 158  158 157 158  167 166 167  174 174 174  41 54 63
++4 0 0  3 2 2  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  165 164 165  174 174 174  158 157 158
++167 166 167  174 174 174  153 152 153  26 28 28  2 0 0  2 2 2
++4 5 5  4 4 4  4 0 0  7 12 15  10 87 144  10 87 144
++18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
++26 108 161  37 112 160  53 118 160  90 154 193  90 154 193  90 154 193
++90 154 193  90 154 193  101 161 196  101 161 196  101 161 196  101 161 196
++101 161 196  136 185 209  90 154 193  26 108 161  22 40 52  13 16 17
++7 11 13  3 6 7  5 7 8  5 7 8  2 5 5  4 5 5
++4 5 5  6 6 6  3 4 3  0 0 0  30 32 34  158 157 158
++174 174 174  156 155 156  155 154 155  165 164 165  154 153 154  37 38 37
++4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  167 166 167  174 174 174  163 162 163
++174 174 174  174 174 174  153 152 153  26 28 28  0 0 0  3 3 3
++5 5 5  4 4 4  1 1 2  7 12 15  28 67 93  18 97 151
++18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++90 154 193  26 108 161  90 154 193  90 154 193  90 154 193  101 161 196
++101 161 196  26 108 161  22 40 52  13 16 17  7 11 13  2 5 5
++2 5 5  6 6 6  2 5 5  4 5 5  4 5 5  4 5 5
++3 4 3  5 5 5  3 4 3  2 0 0  30 32 34  137 136 137
++153 152 153  137 136 137  131 129 131  137 136 137  131 129 131  37 38 37
++4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  167 166 167  174 174 174  166 165 166
++174 174 174  177 184 187  153 152 153  30 32 34  1 0 0  3 3 3
++5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
++18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  90 154 193  90 154 193  26 108 161
++35 83 115  13 16 17  7 11 13  5 7 8  3 6 7  5 7 8
++2 5 5  6 6 6  4 5 5  4 5 5  3 4 3  4 5 5
++3 4 3  6 6 6  3 4 3  0 0 0  26 28 28  125 124 125
++131 129 131  125 124 125  125 124 125  131 129 131  131 129 131  37 38 37
++4 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++3 1 0  4 0 0  60 73 81  174 174 174  177 184 187  167 166 167
++174 174 174  177 184 187  153 152 153  30 32 34  0 0 0  3 3 3
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
++18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  90 154 193  26 108 161  26 108 161  24 86 132  13 20 25
++7 11 13  13 20 25  22 40 52  5 7 8  3 4 3  3 4 3
++4 5 5  3 4 3  4 5 5  3 4 3  4 5 5  3 4 3
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
++174 174 174  190 197 201  157 156 157  30 32 34  1 0 0  3 3 3
++5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
++18 97 151  19 95 150  19 95 150  18 97 151  18 97 151  26 108 161
++18 97 151  26 108 161  26 108 161  26 108 161  26 108 161  90 154 193
++26 108 161  26 108 161  26 108 161  22 40 52  2 5 5  3 4 3
++28 67 93  37 112 160  34 86 122  2 5 5  3 4 3  3 4 3
++3 4 3  3 4 3  3 4 3  2 2 1  3 4 3  4 4 4
++4 5 5  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
++174 174 174  190 197 201  158 157 158  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
++10 87 144  19 95 150  19 95 150  18 97 151  18 97 151  18 97 151
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++18 97 151  22 40 52  2 5 5  2 2 1  22 40 52  26 108 161
++90 154 193  37 112 160  22 40 52  3 4 3  13 20 25  22 30 35
++3 6 7  1 1 1  2 2 2  6 9 11  5 5 5  4 3 3
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++1 1 1  4 0 0  60 73 81  177 184 187  193 200 203  174 174 174
++177 184 187  193 200 203  163 162 163  30 32 34  4 0 0  2 2 2
++5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
++10 87 144  10 87 144  19 95 150  19 95 150  19 95 150  18 97 151
++26 108 161  26 108 161  26 108 161  90 154 193  26 108 161  28 67 93
++6 10 14  2 5 5  13 20 25  24 86 132  37 112 160  90 154 193
++10 87 144  7 12 15  2 5 5  28 67 93  37 112 160  28 67 93
++2 2 1  7 12 15  35 83 115  28 67 93  3 6 7  1 0 0
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  174 174 174  190 197 201  174 174 174
++177 184 187  193 200 203  163 162 163  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
++10 87 144  16 89 141  19 95 150  10 87 144  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  28 67 93  6 10 14  1 1 2
++7 12 15  28 67 93  26 108 161  16 89 141  24 86 132  21 29 34
++3 4 3  21 29 34  37 112 160  37 112 160  27 99 146  21 29 34
++21 29 34  26 108 161  90 154 193  35 83 115  1 1 2  2 0 0
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++3 1 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
++190 197 201  193 200 203  165 164 165  37 38 37  4 0 0  2 2 2
++5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
++10 87 144  10 87 144  16 89 141  18 97 151  18 97 151  10 87 144
++24 86 132  24 86 132  13 20 25  4 5 7  4 5 7  22 40 52
++18 97 151  37 112 160  26 108 161  7 12 15  1 1 1  0 0 0
++28 67 93  37 112 160  26 108 161  28 67 93  22 40 52  28 67 93
++26 108 161  90 154 193  26 108 161  10 87 144  0 0 0  2 0 0
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  193 200 203  174 174 174
++190 197 201  193 200 203  165 164 165  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
++10 87 144  10 87 144  10 87 144  18 97 151  28 67 93  6 10 14
++0 0 0  1 1 2  4 5 7  13 20 25  16 89 141  26 108 161
++26 108 161  26 108 161  24 86 132  6 9 11  2 3 3  22 40 52
++37 112 160  16 89 141  22 40 52  28 67 93  26 108 161  26 108 161
++90 154 193  26 108 161  26 108 161  28 67 93  1 1 1  4 0 0
++4 4 4  5 5 5  3 3 3  4 0 0  26 28 28  124 126 130
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
++193 200 203  193 200 203  167 166 167  37 38 37  4 0 0  2 2 2
++5 5 5  4 4 4  4 0 0  6 10 14  28 67 93  10 87 144
++10 87 144  10 87 144  18 97 151  10 87 144  13 20 25  4 5 7
++1 1 2  1 1 1  22 40 52  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  24 86 132  22 40 52  22 40 52
++22 40 52  22 40 52  10 87 144  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  90 154 193  10 87 144  0 0 0  4 0 0
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++190 197 201  205 212 215  167 166 167  30 32 34  0 0 0  2 2 2
++5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
++10 87 144  10 87 144  10 87 144  10 87 144  22 40 52  1 1 2
++2 0 0  1 1 2  24 86 132  26 108 161  26 108 161  26 108 161
++26 108 161  19 95 150  16 89 141  10 87 144  22 40 52  22 40 52
++10 87 144  26 108 161  37 112 160  26 108 161  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  28 67 93  2 0 0  3 1 0
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
++193 200 203  193 200 203  174 174 174  37 38 37  4 0 0  2 2 2
++5 5 5  4 4 4  3 2 2  1 1 2  13 20 25  10 87 144
++10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  13 20 25
++13 20 25  22 40 52  10 87 144  18 97 151  18 97 151  26 108 161
++10 87 144  13 20 25  6 10 14  21 29 34  24 86 132  18 97 151
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  90 154 193  18 97 151  13 20 25  0 0 0  4 3 3
++4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++190 197 201  220 221 221  167 166 167  30 32 34  1 0 0  2 2 2
++5 5 5  4 4 4  4 4 5  2 5 5  4 5 7  13 20 25
++28 67 93  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
++10 87 144  10 87 144  18 97 151  10 87 144  18 97 151  18 97 151
++28 67 93  2 3 3  0 0 0  28 67 93  26 108 161  26 108 161
++26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
++26 108 161  10 87 144  13 20 25  1 1 2  3 2 2  4 4 4
++4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
++193 200 203  193 200 203  174 174 174  26 28 28  4 0 0  4 3 3
++5 5 5  4 4 4  4 4 4  4 4 5  1 1 2  2 5 5
++4 5 7  22 40 52  10 87 144  10 87 144  18 97 151  10 87 144
++10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  18 97 151
++10 87 144  28 67 93  22 40 52  10 87 144  26 108 161  18 97 151
++18 97 151  18 97 151  26 108 161  26 108 161  26 108 161  26 108 161
++22 40 52  1 1 2  0 0 0  2 3 3  4 4 4  4 4 4
++4 4 4  5 5 5  4 4 4  0 0 0  26 28 28  131 129 131
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++190 197 201  220 221 221  190 197 201  41 54 63  4 0 0  2 2 2
++6 6 6  4 4 4  4 4 4  4 4 5  4 4 5  3 3 3
++1 1 2  1 1 2  6 10 14  22 40 52  10 87 144  18 97 151
++18 97 151  10 87 144  10 87 144  10 87 144  18 97 151  10 87 144
++10 87 144  18 97 151  26 108 161  18 97 151  18 97 151  10 87 144
++26 108 161  26 108 161  26 108 161  10 87 144  28 67 93  6 10 14
++1 1 2  1 1 2  4 3 3  4 4 5  4 4 4  4 4 4
++5 5 5  5 5 5  1 1 1  4 0 0  37 51 59  137 136 137
++137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  4 0 0  60 73 81  220 221 221  193 200 203  174 174 174
++193 200 203  193 200 203  220 221 221  137 136 137  13 16 17  4 0 0
++2 2 2  4 4 4  4 4 4  4 4 4  4 4 4  4 4 5
++4 4 5  4 3 3  1 1 2  4 5 7  13 20 25  28 67 93
++10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
++10 87 144  18 97 151  18 97 151  10 87 144  18 97 151  26 108 161
++26 108 161  18 97 151  28 67 93  6 10 14  0 0 0  0 0 0
++2 3 3  4 5 5  4 4 5  4 4 4  4 4 4  5 5 5
++3 3 3  1 1 1  0 0 0  16 19 21  125 124 125  137 136 137
++131 129 131  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
++193 200 203  190 197 201  220 221 221  220 221 221  153 152 153  30 32 34
++0 0 0  0 0 0  2 2 2  4 4 4  4 4 4  4 4 4
++4 4 4  4 5 5  4 5 7  1 1 2  1 1 2  4 5 7
++13 20 25  28 67 93  10 87 144  18 97 151  10 87 144  10 87 144
++10 87 144  10 87 144  10 87 144  18 97 151  26 108 161  18 97 151
++28 67 93  7 12 15  0 0 0  0 0 0  2 2 1  4 4 4
++4 5 5  4 5 5  4 4 4  4 4 4  3 3 3  0 0 0
++0 0 0  0 0 0  37 38 37  125 124 125  158 157 158  131 129 131
++125 124 125  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 3 3  4 0 0  41 54 63  193 200 203  220 221 221  174 174 174
++193 200 203  193 200 203  193 200 203  220 221 221  244 246 246  193 200 203
++120 125 127  5 5 5  1 0 0  0 0 0  1 1 1  4 4 4
++4 4 4  4 4 4  4 5 5  4 5 5  4 4 5  1 1 2
++4 5 7  4 5 7  22 40 52  10 87 144  10 87 144  10 87 144
++10 87 144  10 87 144  18 97 151  10 87 144  10 87 144  13 20 25
++4 5 7  2 3 3  1 1 2  4 4 4  4 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 1 2
++24 26 27  60 74 84  153 152 153  163 162 163  137 136 137  125 124 125
++125 124 125  125 124 125  125 124 125  137 136 137  125 124 125  26 28 28
++0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 0 0  6 6 6  26 28 28  156 155 156  220 221 221  220 221 221
++174 174 174  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
++220 221 221  167 166 167  60 73 81  7 11 13  0 0 0  0 0 0
++3 3 3  4 4 4  4 4 4  4 4 4  4 4 5  4 4 5
++4 4 5  1 1 2  1 1 2  4 5 7  22 40 52  10 87 144
++10 87 144  10 87 144  10 87 144  22 40 52  4 5 7  1 1 2
++1 1 2  4 4 5  4 4 4  4 4 4  4 4 4  4 4 4
++5 5 5  2 2 2  0 0 0  4 0 0  16 19 21  60 73 81
++137 136 137  167 166 167  158 157 158  137 136 137  131 129 131  131 129 131
++125 124 125  125 124 125  131 129 131  155 154 155  60 74 84  5 7 8
++0 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 0 0  4 0 0  60 73 81  193 200 203  220 221 221
++193 200 203  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
++220 221 221  220 221 221  220 221 221  137 136 137  43 57 68  6 6 6
++4 0 0  1 1 1  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 5  4 4 5  3 2 2  1 1 2  2 5 5  13 20 25
++22 40 52  22 40 52  13 20 25  2 3 3  1 1 2  3 3 3
++4 5 7  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++1 1 1  0 0 0  2 3 3  41 54 63  131 129 131  166 165 166
++166 165 166  155 154 155  153 152 153  137 136 137  137 136 137  125 124 125
++125 124 125  137 136 137  137 136 137  125 124 125  37 38 37  4 3 3
++4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 3 3  6 6 6  6 6 6  13 16 17  60 73 81  167 166 167
++220 221 221  220 221 221  220 221 221  193 200 203  193 200 203  193 200 203
++205 212 215  220 221 221  220 221 221  244 246 246  205 212 215  125 124 125
++24 26 27  0 0 0  0 0 0  2 2 2  5 5 5  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 5  1 1 2  4 5 7
++4 5 7  4 5 7  1 1 2  3 2 2  4 4 5  4 4 4
++4 4 4  4 4 4  5 5 5  4 4 4  0 0 0  0 0 0
++2 0 0  26 28 28  125 124 125  174 174 174  174 174 174  166 165 166
++156 155 156  153 152 153  137 136 137  137 136 137  131 129 131  137 136 137
++137 136 137  137 136 137  60 74 84  30 32 34  4 0 0  4 0 0
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  6 6 6  4 0 0  4 0 0  6 6 6  26 28 28
++125 124 125  174 174 174  220 221 221  220 221 221  220 221 221  193 200 203
++205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
++193 200 203  60 74 84  13 16 17  4 0 0  0 0 0  3 3 3
++5 5 5  5 5 5  4 4 4  4 4 4  4 4 5  3 3 3
++1 1 2  3 3 3  4 4 5  4 4 5  4 4 4  4 4 4
++5 5 5  5 5 5  2 2 2  0 0 0  0 0 0  13 16 17
++60 74 84  174 174 174  193 200 203  174 174 174  167 166 167  163 162 163
++153 152 153  153 152 153  137 136 137  137 136 137  153 152 153  137 136 137
++125 124 125  41 54 63  24 26 27  4 0 0  4 0 0  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 3 3  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
++6 6 6  37 38 37  131 129 131  220 221 221  220 221 221  220 221 221
++193 200 203  193 200 203  220 221 221  205 212 215  220 221 221  244 246 246
++244 246 246  244 246 246  174 174 174  41 54 63  0 0 0  0 0 0
++0 0 0  4 4 4  5 5 5  5 5 5  4 4 4  4 4 5
++4 4 5  4 4 5  4 4 4  4 4 4  6 6 6  6 6 6
++3 3 3  0 0 0  2 0 0  13 16 17  60 73 81  156 155 156
++220 221 221  193 200 203  174 174 174  165 164 165  163 162 163  154 153 154
++153 152 153  153 152 153  158 157 158  163 162 163  137 136 137  60 73 81
++13 16 17  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 3 3  4 3 3  6 6 6  6 6 6  6 6 6
++6 6 6  6 6 6  6 6 6  37 38 37  167 166 167  244 246 246
++244 246 246  220 221 221  205 212 215  205 212 215  220 221 221  193 200 203
++220 221 221  244 246 246  244 246 246  244 246 246  137 136 137  37 38 37
++3 2 2  0 0 0  1 1 1  5 5 5  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 4 4  1 1 1
++0 0 0  5 5 5  43 57 68  153 152 153  193 200 203  220 221 221
++177 184 187  174 174 174  167 166 167  166 165 166  158 157 158  157 156 157
++158 157 158  166 165 166  156 155 156  85 115 134  13 16 17  4 0 0
++4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 3 3  6 6 6  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  6 6 6  6 6 6  13 16 17  60 73 81
++177 184 187  220 221 221  220 221 221  220 221 221  205 212 215  220 221 221
++220 221 221  205 212 215  220 221 221  244 246 246  244 246 246  205 212 215
++125 124 125  30 32 34  0 0 0  0 0 0  2 2 2  5 5 5
++4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 0 0
++37 38 37  131 129 131  205 212 215  220 221 221  193 200 203  174 174 174
++174 174 174  174 174 174  167 166 167  165 164 165  166 165 166  167 166 167
++158 157 158  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
++4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  4 3 3  6 6 6  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
++26 28 28  125 124 125  205 212 215  220 221 221  220 221 221  220 221 221
++205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
++244 246 246  190 197 201  60 74 84  16 19 21  4 0 0  0 0 0
++0 0 0  0 0 0  0 0 0  0 0 0  16 19 21  120 125 127
++177 184 187  220 221 221  205 212 215  177 184 187  174 174 174  177 184 187
++174 174 174  174 174 174  167 166 167  174 174 174  166 165 166  137 136 137
++60 73 81  13 16 17  4 0 0  4 0 0  4 3 3  6 6 6
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++5 5 5  4 3 3  5 5 5  4 3 3  6 6 6  4 0 0
++6 6 6  6 6 6  4 0 0  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  37 38 37  137 136 137  193 200 203  220 221 221
++220 221 221  205 212 215  220 221 221  205 212 215  205 212 215  220 221 221
++220 221 221  220 221 221  244 246 246  166 165 166  43 57 68  2 2 2
++0 0 0  4 0 0  16 19 21  60 73 81  157 156 157  202 210 214
++220 221 221  193 200 203  177 184 187  177 184 187  177 184 187  174 174 174
++174 174 174  174 174 174  174 174 174  157 156 157  60 74 84  24 26 27
++4 0 0  4 0 0  4 0 0  6 6 6  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
++6 6 6  4 0 0  6 6 6  6 6 6  6 6 6  4 0 0
++4 0 0  4 0 0  6 6 6  24 26 27  60 73 81  167 166 167
++220 221 221  220 221 221  220 221 221  205 212 215  205 212 215  205 212 215
++205 212 215  220 221 221  220 221 221  220 221 221  205 212 215  137 136 137
++60 74 84  125 124 125  137 136 137  190 197 201  220 221 221  193 200 203
++177 184 187  177 184 187  177 184 187  174 174 174  174 174 174  177 184 187
++190 197 201  174 174 174  125 124 125  37 38 37  6 6 6  4 0 0
++4 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  5 5 5  4 3 3  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
++125 124 125  193 200 203  244 246 246  220 221 221  205 212 215  205 212 215
++205 212 215  193 200 203  205 212 215  205 212 215  220 221 221  220 221 221
++193 200 203  193 200 203  205 212 215  193 200 203  193 200 203  177 184 187
++190 197 201  190 197 201  174 174 174  190 197 201  193 200 203  190 197 201
++153 152 153  60 73 81  4 0 0  4 0 0  4 0 0  3 2 2
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
++6 6 6  4 3 3  4 3 3  4 3 3  6 6 6  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  4 0 0
++4 0 0  26 28 28  131 129 131  220 221 221  244 246 246  220 221 221
++205 212 215  193 200 203  205 212 215  193 200 203  193 200 203  205 212 215
++220 221 221  193 200 203  193 200 203  193 200 203  190 197 201  174 174 174
++174 174 174  190 197 201  193 200 203  193 200 203  167 166 167  125 124 125
++6 6 6  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++5 5 5  4 3 3  5 5 5  6 6 6  4 3 3  5 5 5
++6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
++4 0 0  4 0 0  6 6 6  41 54 63  158 157 158  220 221 221
++220 221 221  220 221 221  193 200 203  193 200 203  193 200 203  190 197 201
++190 197 201  190 197 201  190 197 201  190 197 201  174 174 174  193 200 203
++193 200 203  220 221 221  174 174 174  125 124 125  37 38 37  4 0 0
++4 0 0  4 3 3  6 6 6  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  4 3 3  4 3 3  5 5 5
++4 3 3  6 6 6  5 5 5  4 3 3  6 6 6  6 6 6
++6 6 6  6 6 6  4 0 0  4 0 0  13 16 17  60 73 81
++174 174 174  220 221 221  220 221 221  205 212 215  190 197 201  174 174 174
++193 200 203  174 174 174  190 197 201  174 174 174  193 200 203  220 221 221
++193 200 203  131 129 131  37 38 37  6 6 6  4 0 0  4 0 0
++6 6 6  6 6 6  4 3 3  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
++5 5 5  4 3 3  4 3 3  5 5 5  4 3 3  4 3 3
++5 5 5  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
++6 6 6  125 124 125  174 174 174  220 221 221  220 221 221  193 200 203
++193 200 203  193 200 203  193 200 203  193 200 203  220 221 221  158 157 158
++60 73 81  6 6 6  4 0 0  4 0 0  5 5 5  6 6 6
++5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
++5 5 5  5 5 5  6 6 6  6 6 6  4 0 0  4 0 0
++4 0 0  4 0 0  26 28 28  125 124 125  174 174 174  193 200 203
++193 200 203  174 174 174  193 200 203  167 166 167  125 124 125  6 6 6
++6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  5 5 5
++4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++4 3 3  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
++6 6 6  4 0 0  4 0 0  6 6 6  37 38 37  125 124 125
++153 152 153  131 129 131  125 124 125  37 38 37  6 6 6  6 6 6
++6 6 6  4 0 0  6 6 6  6 6 6  4 3 3  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
++6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
++24 26 27  24 26 27  6 6 6  6 6 6  6 6 6  4 0 0
++6 6 6  6 6 6  4 0 0  6 6 6  5 5 5  4 3 3
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
++4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
++6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
++4 0 0  6 6 6  6 6 6  4 3 3  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  5 5 5
++5 5 5  5 5 5  4 0 0  6 6 6  4 0 0  6 6 6
++6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  4 0 0
++6 6 6  4 3 3  5 5 5  4 3 3  5 5 5  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
++4 3 3  6 6 6  4 3 3  6 6 6  6 6 6  6 6 6
++4 0 0  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
++6 6 6  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  5 5 5  4 0 0  6 6 6
++6 6 6  4 0 0  6 6 6  6 6 6  4 0 0  6 6 6
++4 3 3  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  5 5 5  4 3 3  5 5 5  6 6 6  4 3 3
++4 3 3  6 6 6  6 6 6  4 3 3  6 6 6  4 3 3
++5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  6 6 6
++5 5 5  4 3 3  4 3 3  4 3 3  5 5 5  5 5 5
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
++5 5 5  4 3 3  5 5 5  5 5 5  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
++4 4 4  4 4 4
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index d5dbdb9..8159bdd 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1588,7 +1588,7 @@ void xen_irq_resume(void)
+       restore_pirqs();
+ }
+-static struct irq_chip xen_dynamic_chip __read_mostly = {
++static struct irq_chip xen_dynamic_chip = {
+       .name                   = "xen-dyn",
+       .irq_disable            = disable_dynirq,
+@@ -1602,7 +1602,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
+       .irq_retrigger          = retrigger_dynirq,
+ };
+-static struct irq_chip xen_pirq_chip __read_mostly = {
++static struct irq_chip xen_pirq_chip = {
+       .name                   = "xen-pirq",
+       .irq_startup            = startup_pirq,
+@@ -1622,7 +1622,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
+       .irq_retrigger          = retrigger_dynirq,
+ };
+-static struct irq_chip xen_percpu_chip __read_mostly = {
++static struct irq_chip xen_percpu_chip = {
+       .name                   = "xen-percpu",
+       .irq_disable            = disable_dynirq,
+diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
+index 258b7c3..6aad74a 100644
+--- a/drivers/xen/xen-pciback/pci_stub.c
++++ b/drivers/xen/xen-pciback/pci_stub.c
+@@ -831,7 +831,7 @@ end:
+ */
+ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
+-      pci_channel_state_t error)
++      enum pci_channel_state error)
+ {
+       struct pcistub_device *psdev;
+       pci_ers_result_t result;
+diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
+index fef20db..d28b1ab 100644
+--- a/drivers/xen/xenfs/xenstored.c
++++ b/drivers/xen/xenfs/xenstored.c
+@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
+ static int xsd_kva_open(struct inode *inode, struct file *file)
+ {
+       file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                                             NULL);
++#else
+                                              xen_store_interface);
++#endif
++
+       if (!file->private_data)
+               return -ENOMEM;
+       return 0;
+diff --git a/firmware/Makefile b/firmware/Makefile
+index e297e1b..aeb0982 100644
+--- a/firmware/Makefile
++++ b/firmware/Makefile
+@@ -35,9 +35,11 @@ fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.9.0.fw \
+                             bnx2x/bnx2x-e1h-6.2.9.0.fw \
+                             bnx2x/bnx2x-e2-6.2.9.0.fw
+ fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
++                           bnx2/bnx2-mips-09-6.2.1b.fw \
+                            bnx2/bnx2-rv2p-09-6.0.17.fw \
+                            bnx2/bnx2-rv2p-09ax-6.0.17.fw \
+                            bnx2/bnx2-mips-06-6.2.1.fw \
++                           bnx2/bnx2-mips-06-6.2.3.fw \
+                            bnx2/bnx2-rv2p-06-6.0.15.fw
+ fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
+ fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
+diff --git a/firmware/WHENCE b/firmware/WHENCE
+index de6f22e..51fbae7 100644
+--- a/firmware/WHENCE
++++ b/firmware/WHENCE
+@@ -653,21 +653,23 @@ Found in hex form in kernel source.
+ Driver: BNX2 - Broadcom NetXtremeII
+ File: bnx2/bnx2-mips-06-6.2.1.fw
++File: bnx2/bnx2-mips-06-6.2.3.fw
+ File: bnx2/bnx2-rv2p-06-6.0.15.fw
+ File: bnx2/bnx2-mips-09-6.2.1a.fw
++File: bnx2/bnx2-mips-09-6.2.1b.fw
+ File: bnx2/bnx2-rv2p-09-6.0.17.fw
+ File: bnx2/bnx2-rv2p-09ax-6.0.17.fw
+ Licence:
+-
+- This file contains firmware data derived from proprietary unpublished
+- source code, Copyright (c) 2004 - 2010 Broadcom Corporation.
+-
+- Permission is hereby granted for the distribution of this firmware data
+- in hexadecimal or equivalent format, provided this copyright notice is
+- accompanying it.
+-
+-Found in hex form in kernel source.
++ 
++ This file contains firmware data derived from proprietary unpublished 
++ source code, Copyright (c) 2004 - 2010 Broadcom Corporation. 
++ 
++ Permission is hereby granted for the distribution of this firmware data 
++ in hexadecimal or equivalent format, provided this copyright notice is 
++ accompanying it. 
++ 
++Found in hex form in kernel source. 
+ --------------------------------------------------------------------------
+diff --git a/firmware/bnx2/bnx2-mips-06-6.2.3.fw.ihex b/firmware/bnx2/bnx2-mips-06-6.2.3.fw.ihex
+new file mode 100644
+index 0000000..da72bf1
+--- /dev/null
++++ b/firmware/bnx2/bnx2-mips-06-6.2.3.fw.ihex
+@@ -0,0 +1,5804 @@
++:10000000080001180800000000004A68000000C84D
++:1000100000000000000000000000000008004A6826
++:100020000000001400004B30080000A00800000091
++:100030000000569400004B44080058200000008443
++:100040000000A1D808005694000001580000A25CEE
++:100050000800321008000000000072F00000A3B495
++:10006000000000000000000000000000080072F026
++:1000700000000024000116A40800049008000400F9
++:10008000000017D4000116C80000000000000000A6
++:100090000000000000000000000000000000000060
++:1000A000080000A80800000000003BFC00012E9C96
++:1000B0000000000000000000000000000000000040
++:1000C00000000000000000000A00004600000000E0
++:1000D000000000000000000D636F6D362E322E33DD
++:1000E0000000000006020302000000000000000300
++:1000F000000000C800000032000000030000000003
++:1001000000000000000000000000000000000000EF
++:1001100000000010000001360000EA600000000549
++:1001200000000000000000000000000000000008C7
++:1001300000000000000000000000000000000000BF
++:1001400000000000000000000000000000000000AF
++:10015000000000000000000000000000000000009F
++:10016000000000020000000000000000000000008D
++:10017000000000000000000000000000000000007F
++:10018000000000000000000000000010000000005F
++:10019000000000000000000000000000000000005F
++:1001A000000000000000000000000000000000004F
++:1001B000000000000000000000000000000000003F
++:1001C000000000000000000000000000000000002F
++:1001D000000000000000000000000000000000001F
++:1001E0000000000010000003000000000000000DEF
++:1001F0000000000D3C02080024424AA03C03080015
++:1002000024634B9CAC4000000043202B1480FFFD76
++:10021000244200043C1D080037BD7FFC03A0F021F0
++:100220003C100800261001183C1C0800279C4AA01E
++:100230000E000168000000000000000D27470100CB
++:1002400090E3000B2402001A94E5000814620028D1
++:10025000000020218CE200003C0308008C63004475
++:1002600094E60014000211C20002104030A4000203
++:10027000005A10212463000130A50004A446008028
++:100280003C010800AC23004410A000190004202BFE
++:100290008F4202B804410008240400013C02080017
++:1002A0008C420060244200013C010800AC22006046
++:1002B00003E00008008010218CE2002094E3001687
++:1002C00000002021AF4202808CE20004A743028498
++:1002D000AF4202883C021000AF4202B83C02080064
++:1002E0008C42005C244200013C010800AC22005C0E
++:1002F00003E00008008010212747010090E3000B75
++:100300002402000394E50008146200280000202164
++:100310008CE200003C0308008C63004494E6001467
++:10032000000211C20002104030A40002005A102145
++:100330002463000130A50004A44600803C010800AD
++:10034000AC23004410A000190004202B8F4202B8F7
++:1003500004410008240400013C0208008C420060B3
++:10036000244200013C010800AC22006003E00008C8
++:10037000008010218CE2002094E300160000202170
++:10038000AF4202808CE20004A7430284AF4202889D
++:100390003C021000AF4202B83C0208008C42005CF4
++:1003A000244200013C010800AC22005C03E000088C
++:1003B000008010218F4301002402010050620003DD
++:1003C000000311C20000000D000311C20002104022
++:1003D000005A1021A440008003E000080000102112
++:1003E0009362000003E00008AF80000003E0000813
++:1003F0000000102103E00008000010212402010089
++:1004000014820008000000003C0208008C4200FC3E
++:10041000244200013C010800AC2200FC0A0000DD7F
++:1004200030A200203C0208008C42008424420001DB
++:100430003C010800AC22008430A2002010400008DB
++:1004400030A300103C0208008C4201082442000145
++:100450003C010800AC22010803E000080000000095
++:1004600010600008000000003C0208008C420104FB
++:10047000244200013C010800AC22010403E0000812
++:10048000000000003C0208008C42010024420001F0
++:100490003C010800AC22010003E00008000000005D
++:1004A00027BDFFE8AFBF0010274401009483000878
++:1004B000306200041040001B306600028F4202B818
++:1004C00004410008240500013C0208008C42006041
++:1004D000244200013C010800AC2200600A0001290E
++:1004E0008FBF00108C82002094830016000028210A
++:1004F000AF4202808C820004A7430284AF4202888C
++:100500003C021000AF4202B83C0208008C42005C82
++:10051000244200013C010800AC22005C0A000129D1
++:100520008FBF001010C00006006028218F4401001A
++:100530000E0000CD000000000A0001282405000183
++:100540008F8200088F4301045043000700002821D8
++:100550008F4401000E0000CD000000008F42010416
++:10056000AF820008000028218FBF001000A01021DA
++:1005700003E0000827BD001827BDFFE8AFBF001447
++:10058000AFB00010974201083043700024022000F1
++:100590001062000B286220011440002F000010217F
++:1005A00024024000106200250000000024026000C8
++:1005B00010620026000010210A0001658FBF0014A0
++:1005C00027500100920200091040001A2403000184
++:1005D0003C0208008C420020104000160000182148
++:1005E0000E00049300000000960300083C0608007B
++:1005F00094C64B5E8E0400188F8200209605000C76
++:1006000000031C0000661825AC440000AC45000443
++:1006100024040001AC400008AC40000CAC400010C9
++:10062000AC400014AC4000180E0004B8AC43001CF1
++:10063000000018210A000164006010210E0003254B
++:10064000000000000A000164000010210E000EE905
++:1006500000000000000010218FBF00148FB00010B8
++:1006600003E0000827BD001827BDFFE0AFB2001867
++:100670003C036010AFBF001CAFB10014AFB000105E
++:100680008C6450002402FF7F3C1A800000822024EA
++:100690003484380C24020037AC6450003C1208004B
++:1006A00026524AD8AF42000824020C80AF420024F0
++:1006B0003C1B80083C06080024C60324024010218D
++:1006C0002404001D2484FFFFAC4600000481FFFDCC
++:1006D000244200043C020800244204B03C0108000B
++:1006E000AC224AE03C020800244202303C010800EF
++:1006F000AC224AE43C020800244201743C03080096
++:100700002463032C3C040800248403D83C0508001F
++:1007100024A538F03C010800AC224B403C02080004
++:10072000244202EC3C010800AC264B243C010800AA
++:10073000AC254B343C010800AC234B3C3C01080089
++:10074000AC244B443C010800AC224B483C0108005F
++:10075000AC234ADC3C010800AC204AE83C0108001C
++:10076000AC204AEC3C010800AC204AF03C010800F7
++:10077000AC204AF43C010800AC204AF83C010800D7
++:10078000AC204AFC3C010800AC204B003C010800B6
++:10079000AC244B043C010800AC204B083C01080091
++:1007A000AC204B0C3C010800AC204B103C01080075
++:1007B000AC204B143C010800AC204B183C01080055
++:1007C000AC264B1C3C010800AC264B203C01080029
++:1007D000AC254B303C010800AC234B380E000623FF
++:1007E000000000003C028000344200708C42000097
++:1007F000AF8200143C0308008C6300208F82000449
++:10080000104300043C0280000E00045BAF83000430
++:100810003C028000344600703C0308008C6300A05A
++:100820003C0208008C4200A4104300048F84001492
++:100830003C010800AC2300A4A743009E8CCA000022
++:100840003C0308008C6300BC3C0208008C4200B8EA
++:100850000144202300641821000040210064202B63
++:1008600000481021004410213C010800AC2300BCCA
++:100870003C010800AC2200B88F5100003222000772
++:100880001040FFDCAF8A00148CC600003C05080055
++:100890008CA500BC3C0408008C8400B800CA30233E
++:1008A00000A628210000102100A6302B0082202164
++:1008B00000862021322700013C010800AC2500BC45
++:1008C0003C010800AC2400B810E0001F32220002F6
++:1008D0008F420100AF4200208F420104AF4200A8C6
++:1008E0009342010B0E0000C6305000FF2E02001E86
++:1008F00054400004001010800E0000C90A000213CA
++:1009000000000000005210218C4200000040F80955
++:1009100000000000104000053C0240008F4301042D
++:100920003C026020AC4300143C024000AF4201385E
++:100930003C0208008C420034244200013C010800C3
++:10094000AC220034322200021040000E3222000499
++:100950008F4201400E0000C6AF4200200E000295FB
++:10096000000000003C024000AF4201783C02080059
++:100970008C420038244200013C010800AC220038BF
++:10098000322200041040FF983C0280008F42018018
++:100990000E0000C6AF4200208F43018024020F00EA
++:1009A00014620005000000008F420188A742009CED
++:1009B0000A0002483C0240009362000024030050F9
++:1009C000304200FF144300083C0240000E00027B4E
++:1009D00000000000544000043C0240000E000D7571
++:1009E000000000003C024000AF4201B83C02080099
++:1009F0008C42003C244200013C010800AC22003C37
++:100A00000A0001C83C0280003C0290003442000110
++:100A100000822025AF4400208F4200200440FFFECA
++:100A20000000000003E00008000000003C0280001D
++:100A3000344200010082202503E00008AF4400207A
++:100A400027BDFFE0AFB10014AFB0001000808821D7
++:100A5000AFBF00180E00025030B000FF9362007D5F
++:100A60000220202102028025A370007D8F70007477
++:100A70003C0280000E000259020280241600000988
++:100A80008FBF00188F4201F80440FFFE24020002CD
++:100A9000AF5101C0A34201C43C021000AF4201F8B3
++:100AA0008FBF00188FB100148FB0001003E0000852
++:100AB00027BD002027BDFFE8AFBF0010974201848B
++:100AC0008F440188304202001040000500002821B8
++:100AD0000E000FAA000000000A00028D240500018C
++:100AE0003C02FF0004800005008218243C02040040
++:100AF000506200019362003E240500018FBF001088
++:100B000000A0102103E0000827BD0018A360002208
++:100B10008F4401400A00025E2405000127BDFFE862
++:100B2000AFBF0014AFB0001093620000304400FF6C
++:100B300038830020388200300003182B0002102B6D
++:100B40000062182410600003240200501482008008
++:100B50008FBF001493620005304200011040007CFA
++:100B60008FBF0014934201482443FFFF2C6200050D
++:100B7000104000788FB00010000310803C03080084
++:100B800024634A68004310218C42000000400008A2
++:100B9000000000000E0002508F4401408F70000CD6
++:100BA0008F4201441602000224020001AF62000CD1
++:100BB0000E0002598F4401408F420144145000043A
++:100BC0008FBF00148FB000100A000F2027BD00183F
++:100BD0008F62000C0A0003040000000097620010FE
++:100BE0008F4301443042FFFF1462001A00000000EE
++:100BF00024020001A76200108F4202380443001053
++:100C00008F4201403C02003F3446F0003C0560004A
++:100C10003C04FFC08CA22BBC0044182400461024C6
++:100C20000002130200031D82106200390000000060
++:100C30008F4202380440FFF7000000008F4201405D
++:100C4000AF4202003C021000AF4202380A00032209
++:100C50008FBF0014976200100A0003040000000018
++:100C60000E0002508F440140976200128F430144EE
++:100C70003050FFFF1603000224020001A762001299
++:100C80000E0002598F4401408F42014416020004B5
++:100C90008FBF00148FB000100A00029127BD00180A
++:100CA000976200120A00030400000000976200141B
++:100CB0008F4301443042FFFF14620006240200010A
++:100CC0008FBF00148FB00010A76200140A00124AF0
++:100CD00027BD0018976200141440001D8FBF001438
++:100CE0000A00031C00000000976200168F430144B5
++:100CF0003042FFFF1462000B240200018FBF00147A
++:100D00008FB00010A76200160A000B1227BD001852
++:100D10009742007824420004A76200100A000322D0
++:100D20008FBF001497620016240300013042FFFFBA
++:100D3000144300078FBF00143C0208008C4200706F
++:100D4000244200013C010800AC2200708FBF001457
++:100D50008FB0001003E0000827BD001827BDFFE892
++:100D6000AFBF0014AFB000108F50010093620000BD
++:100D700093430109304400FF2402001F106200A5C4
++:100D80002862002010400018240200382862000A5F
++:100D90001040000C2402000B286200081040002CB8
++:100DA00000000000046000E52862000214400028F2
++:100DB00024020006106200268FBF00140A00041FE0
++:100DC0008FB000101062005E2862000B144000DC3F
++:100DD0008FBF00142402000E106200738FB0001049
++:100DE0000A00041F00000000106200C028620039E1
++:100DF0001040000A2402008024020036106200CA5B
++:100E000028620037104000B424020035106200C18F
++:100E10008FBF00140A00041F8FB000101062002B57
++:100E20002862008110400006240200C82402003914
++:100E3000106200B48FBF00140A00041F8FB00010AE
++:100E4000106200998FBF00140A00041F8FB00010B9
++:100E50003C0208008C420020104000B98FBF0014F3
++:100E60000E000493000000008F4201008F830020D9
++:100E70009745010C97460108AC6200008F420104BF
++:100E80003C04080094844B5E00052C00AC62000416
++:100E90008F4201180006340000C43025AC620008FF
++:100EA0008F42011C24040001AC62000C9342010A31
++:100EB00000A22825AC650010AC600014AC600018DE
++:100EC000AC66001C0A0003F58FBF00143C0208004A
++:100ED0008C4200201040009A8FBF00140E00049333
++:100EE00000000000974401083C03080094634B5E37
++:100EF0009745010C000422029746010E8F820020C4
++:100F0000000426000083202500052C003C030080FF
++:100F100000A6282500832025AC400000AC4000043A
++:100F2000AC400008AC40000CAC450010AC400014D4
++:100F3000AC400018AC44001C0A0003F42404000177
++:100F40009742010C14400015000000009362000558
++:100F50003042001014400011000000000E0002504A
++:100F6000020020219362000502002021344200107B
++:100F70000E000259A36200059362000024030020C2
++:100F8000304200FF1043006D020020218FBF00148B
++:100F90008FB000100A000FC027BD00180000000D20
++:100FA0000A00041E8FBF00143C0208008C4200207F
++:100FB000104000638FBF00140E0004930000000077
++:100FC0008F4201048F8300209744010C3C050800E8
++:100FD00094A54B5EAC6200009762002C00042400D4
++:100FE0003042FFFF008220253C02400E00A228254F
++:100FF000AC640004AC600008AC60000CAC60001095
++:10100000AC600014AC600018AC65001C0A0003F46E
++:10101000240400010E00025002002021A7600008F5
++:101020000E00025902002021020020210E00025E63
++:10103000240500013C0208008C42002010400040C2
++:101040008FBF00140E000493000000009742010CB3
++:101050008F8300203C05080094A54B5E000214001D
++:10106000AC700000AC620004AC6000088F64004CFF
++:101070003C02401F00A22825AC64000C8F62005087
++:1010800024040001AC6200108F620054AC620014B2
++:10109000AC600018AC65001C8FBF00148FB000104E
++:1010A0000A0004B827BD0018240200205082002541
++:1010B0008FB000100E000F0A020020211040002007
++:1010C0008FBF0014020020218FB0001000002821E3
++:1010D0000A00025E27BD0018020020218FBF001405
++:1010E0008FB000100A00058027BD00189745010C3D
++:1010F000020020218FBF00148FB000100A0005A04D
++:1011000027BD0018020020218FB000100A0005C57D
++:1011100027BD00189345010D020020218FB000105B
++:101120000A00060F27BD0018020020218FBF0014FF
++:101130008FB000100A0005EB27BD00188FBF001408
++:101140008FB0001003E0000827BD00188F4202781E
++:101150000440FFFE2402000234840080AF440240B9
++:10116000A34202443C02100003E00008AF420278B0
++:101170003C04080094844B6A3C0208008C424B7487
++:101180003083FFFF000318C000431021AF42003C32
++:101190003C0208008C424B70AF4200383C020050C9
++:1011A00034420008AF4200300000000000000000A0
++:1011B000000000008F420000304200201040FFFD80
++:1011C000000000008F4204003C010800AC224B608C
++:1011D0008F4204043C010800AC224B643C02002016
++:1011E000AF420030000000003C02080094424B680F
++:1011F0003C03080094634B6C3C05080094A54B6EBF
++:1012000024840001004310213083FFFF3C010800CB
++:10121000A4224B683C010800A4244B6A1465000317
++:10122000000000003C010800A4204B6A03E0000815
++:10123000000000003C05000A27BDFFE80345282107
++:101240003C04080024844B50AFBF00100E00051D65
++:101250002406000A3C02080094424B523C0308005A
++:1012600094634B6E3042000F244200030043180485
++:1012700024027FFF0043102B10400002AF83001CAC
++:101280000000000D0E00042A000000003C020800CF
++:1012900094424B5A8FBF001027BD001803E000088E
++:1012A000A74200A23C02000A034210219443000618
++:1012B0003C02080094424B5A3C010800A4234B56C0
++:1012C000004310238F83001C00021400000214034B
++:1012D0000043102B03E000083842000127BDFFE85F
++:1012E000AFBF00103C02000A0342102194420006E6
++:1012F0003C010800A4224B560E00047700000000B9
++:101300005440FFF93C02000A8FBF001003E00008C0
++:1013100027BD001827BDFFE8AFBF00100E000477FF
++:101320000000000010400003000000000E000485D3
++:10133000000000003C0208008C424B608FBF001090
++:1013400027430400AF4200383C0208008C424B6443
++:1013500027BD0018AF830020AF42003C3C020005CF
++:10136000AF42003003E00008AF8000188F82001801
++:101370003C0300060002114000431025AF4200303C
++:101380000000000000000000000000008F4200008C
++:10139000304200101040FFFD27420400AF820020C1
++:1013A00003E00008AF8000183C0608008CC64B64C0
++:1013B0008F8500188F8300203C02080094424B5A0E
++:1013C00027BDFFE024A50001246300202442000182
++:1013D00024C70020AFB10014AFB00010AFBF001899
++:1013E000AF850018AF8300203C010800A4224B5AAF
++:1013F000309000FF3C010800AC274B6404C100089A
++:101400000000882104E00006000000003C02080003
++:101410008C424B60244200013C010800AC224B602E
++:101420003C02080094424B5A3C03080094634B680A
++:101430000010202B004310262C42000100441025F0
++:10144000144000048F830018240200101462000F5F
++:10145000000000000E0004A9241100013C03080054
++:1014600094634B5A3C02080094424B681462000398
++:10147000000000000E00042A000000001600000317
++:10148000000000000E000493000000003C03080070
++:1014900094634B5E3C02080094424B5C2463000161
++:1014A0003064FFFF3C010800A4234B5E148200035C
++:1014B000000000003C010800A4204B5E1200000662
++:1014C000000000003C02080094424B5AA74200A2D0
++:1014D0000A00050B022010210E0004770000000016
++:1014E00010400004022010210E00048500000000BE
++:1014F000022010218FBF00188FB100148FB0001090
++:1015000003E0000827BD00203084FFFF30A5FFFF67
++:101510000000182110800007000000003082000148
++:101520001040000200042042006518210A00051343
++:101530000005284003E000080060102110C00006EC
++:1015400024C6FFFF8CA2000024A50004AC8200008A
++:101550000A00051D2484000403E0000800000000C8
++:1015600010A0000824A3FFFFAC86000000000000CC
++:10157000000000002402FFFF2463FFFF1462FFFA53
++:101580002484000403E0000800000000240200019D
++:10159000AF62000CA7620010A7620012A7620014DD
++:1015A00003E00008A76200163082007F034210218A
++:1015B0003C08000E004818213C0208008C42002024
++:1015C00027BDFFD82407FF80AFB3001CAFB20018BF
++:1015D000AFB10014AFB00010AFBF00200080802179
++:1015E00030B100FF0087202430D200FF1040002FD0
++:1015F00000009821AF44002C9062000024030050AA
++:10160000304200FF1443000E000000003C020800BE
++:101610008C4200E00202102100471024AF42002C4F
++:101620003C0208008C4200E0020210213042007FA0
++:101630000342102100481021944200D43053FFFF90
++:101640000E000493000000003C02080094424B5E30
++:101650008F8300200011340000C2302500122C00BE
++:101660003C02400000C2302534A50001AC700000EF
++:101670008FBF0020AC6000048FB20018AC7300086C
++:101680008FB10014AC60000C8FB3001CAC6500106F
++:101690008FB00010AC60001424040001AC6000188E
++:1016A00027BD00280A0004B8AC66001C8FBF0020CC
++:1016B0008FB3001C8FB200188FB100148FB00010D0
++:1016C00003E0000827BD00289343010F2402001007
++:1016D0001062000E2865001110A0000724020012FD
++:1016E000240200082405003A1062000600003021A0
++:1016F00003E0000800000000240500351462FFFC30
++:10170000000030210A000538000000008F420074FC
++:1017100024420FA003E00008AF62000C27BDFFE8E1
++:10172000AFBF00100E00025E240500018FBF001045
++:1017300024020001A762001227BD00182402000144
++:1017400003E00008A360002227BDFFE0AFB1001452
++:10175000AFB00010AFBF001830B1FFFF0E00025055
++:10176000008080219362003F24030004304200FF88
++:101770001443000C02002021122000082402000A59
++:101780000E00053100000000936200052403FFFEF7
++:1017900000431024A362000524020012A362003F4C
++:1017A000020020210E000259A360008116200003D0
++:1017B000020020210E0005950000000002002021FB
++:1017C000322600FF8FBF00188FB100148FB00010B9
++:1017D000240500380A00053827BD002027BDFFE09A
++:1017E000AFBF001CAFB20018AFB10014AFB0001013
++:1017F0000E000250008080210E0005310000000024
++:101800009362003F24120018305100FF123200038F
++:101810000200202124020012A362003F936200050F
++:101820002403FFFE004310240E000259A3620005AA
++:10183000020020212405002016320007000030217C
++:101840008FBF001C8FB200188FB100148FB0001032
++:101850000A00025E27BD00208FBF001C8FB2001857
++:101860008FB100148FB00010240500390A0005382C
++:1018700027BD002027BDFFE8AFB00010AFBF0014A8
++:101880009742010C2405003600808021144000108E
++:10189000304600FF0E00025000000000240200123B
++:1018A000A362003F93620005344200100E00053130
++:1018B000A36200050E00025902002021020020212F
++:1018C0000E00025E240500200A000604000000004D
++:1018D0000E000538000000000E000250020020211A
++:1018E000936200232403FF9F020020210043102461
++:1018F0008FBF00148FB00010A36200230A000259AA
++:1019000027BD001827BDFFE0AFBF0018AFB100141E
++:10191000AFB0001030B100FF0E00025000808021F7
++:10192000240200120E000531A362003F0E0002598E
++:101930000200202102002021022030218FBF001848
++:101940008FB100148FB00010240500350A0005384F
++:1019500027BD0020A380002C03E00008A380002DF9
++:101960008F4202780440FFFE8F820034AF42024073
++:1019700024020002A34202443C02100003E00008DB
++:10198000AF4202783C0360008C6254003042000891
++:101990001440FFFD000000008C625408AF82000C70
++:1019A00024020052AC605408AC645430AC6254342D
++:1019B0002402000803E00008AC6254003C0260000E
++:1019C0008C42540030420008104000053C03600087
++:1019D0008C625400304200081440FFFD00000000FB
++:1019E0008F83000C3C02600003E00008AC43540805
++:1019F00090A3000024020005008040213063003FD6
++:101A000000004821146200050000502190A2001C33
++:101A100094A3001E304900FF306AFFFFAD00000CA8
++:101A2000AD000010AD000024950200148D05001CCF
++:101A30008D0400183042FFFF0049102300021100FE
++:101A4000000237C3004038210086202300A2102B5B
++:101A50000082202300A72823AD05001CAD04001838
++:101A6000A5090014A5090020A50A001603E0000836
++:101A7000A50A00228F4201F80440FFFE2402000262
++:101A8000AF4401C0A34201C43C02100003E00008BF
++:101A9000AF4201F83C0208008C4200B427BDFFE8C9
++:101AA000AFBF001424420001AFB000103C01080099
++:101AB000AC2200B48F4300243C02001F30AA00FF78
++:101AC0003442FF8030D800FF006280240080F8217B
++:101AD00030EF00FF1158003B01405821240CFF80DB
++:101AE0003C19000A3163007F000310C00003194055
++:101AF000006218213C0208008C4200DC25680001CD
++:101B0000310D007F03E21021004310213043007F9C
++:101B100003431821004C102400794821AF420024CF
++:101B20008D220024016C1824006C7026AD22000C5C
++:101B30008D220024310800FFAD22001095220014F0
++:101B4000952300208D27001C3042FFFF3063FFFFEC
++:101B50008D2600180043102300021100000227C345
++:101B60000040282100C4302300E2102B00C23023A3
++:101B700000E53823AD27001CAD2600189522002073
++:101B8000A522001495220022154B000AA52200165A
++:101B90008D2300248D220008254600013145008058
++:101BA0001462000430C4007F108F000238AA008045
++:101BB00000C0502151AF000131C800FF1518FFC906
++:101BC000010058218F8400343082007F03421821A5
++:101BD0003C02000A006218212402FF8000822024B7
++:101BE000AF440024A06A0079A06A00838C62005090
++:101BF0008F840034AC6200708C6500743C027FFFFF
++:101C00003442FFFF00A228240E00066BAC6500746E
++:101C1000AF5000248FBF00148FB0001003E0000805
++:101C200027BD001827BDFFC0AFBE0038AFB70034D6
++:101C3000AFB5002CAFB20020AFB1001CAFB00018A0
++:101C4000AFBF003CAFB60030AFB40028AFB3002444
++:101C50008F4500248F4600288F43002C3C02001F34
++:101C60003442FF800062182400C230240080A82182
++:101C7000AFA3001400A2F0240E00062FAFA60010A0
++:101C80003C0208008C4200E02410FF8003608821A1
++:101C900002A2102100501024AF4200243C02080090
++:101CA0008C4200E002A210213042007F0342182142
++:101CB0003C02000A00629021924200D293630084A9
++:101CC000305700FF306300FF24020001106200342F
++:101CD000036020212402000214620036000000008C
++:101CE0000E001216024028219223008392220083C4
++:101CF0003063007F3042007F000210C000031940B3
++:101D0000006218213C0208008C4200DC02A2102173
++:101D10000043382100F01024AF42002892250078BB
++:101D20009224008330E2007F034218213C02000C21
++:101D300014850007006280212402FFFFA24200F107
++:101D40002402FFFFA64200F20A0007272402FFFF39
++:101D500096020020A24200F196020022A64200F262
++:101D60008E020024AE4200F492220083A24200F0D0
++:101D70008E4200C8AE4200FC8E4200C4AE4200F863
++:101D80008E220050AE4201008E4200CCAE420104D1
++:101D9000922200853042003F0A0007823442004010
++:101DA0000E00123902402821922200850A00078283
++:101DB0003042003F936200852403FFDF3042003F42
++:101DC000A36200859362008500431024A36200850E
++:101DD0009363008393620078307400FF304200FF09
++:101DE00010540036240AFF803C0C000C3283007F24
++:101DF000000310C000031940006218213C020800D3
++:101E00008C4200DC268800013109007F02A21021EB
++:101E10000043382130E2007F0342182100EA1024F9
++:101E2000AF420028006C80218E020024028A182410
++:101E3000006A5826AE02000C8E020024310800FF12
++:101E4000AE02001096020014960300208E07001CBC
++:101E50003042FFFF3063FFFF8E060018004310235F
++:101E600000021100000227C30040282100C43023D3
++:101E700000E2102B00C2302300E53823AE07001C1F
++:101E8000AE06001896020020A60200149602002258
++:101E9000A602001692220079304200FF105400077B
++:101EA0000000000051370001316800FF92220078E5
++:101EB000304200FF1448FFCD0100A0219222008390
++:101EC000A22200798E2200500A0007E2AE220070A2
++:101ED000A22200858E22004C2405FF80AE42010C18
++:101EE0009222008534420020A2220085924200D135
++:101EF0003C0308008C6300DC305400FF3C02080007
++:101F00008C4200E400143140001420C002A31821C8
++:101F100000C4202102A210210064382100461021B3
++:101F20000045182400E52824AF450028AF43002CC5
++:101F30003042007F924400D030E3007F03422821EA
++:101F4000034318213C02000C006280213C02000E79
++:101F5000309600FF00A298211296002A000000008F
++:101F60008E02000C02002021026028211040002572
++:101F7000261000280E00064A000000009262000DA4
++:101F800026830001307400FF3042007FA262000D02
++:101F90002404FF801697FFF0267300203C020800FF
++:101FA0008C4200DC0000A02102A210210044102479
++:101FB000AF4200283C0208008C4200E43C030800C9
++:101FC0008C6300DC02A2102100441024AF42002CDC
++:101FD0003C0208008C4200E402A318213063007F19
++:101FE00002A210213042007F034220210343182126
++:101FF0003C02000C006280213C02000E0A0007A493
++:10200000008298218E4200D8AE2200508E4200D825
++:10201000AE22007092250083924600D19223008365
++:10202000924400D12402FF8000A228243063007F64
++:10203000308400FF00A628250064182A10600002E2
++:1020400030A500FF38A50080A2250083A2250079D5
++:102050000E00063D000000009222007E02A020211A
++:10206000A222007A8E2300743C027FFF3442FFFFDD
++:10207000006218240E00066BAE2300748FA20010BD
++:10208000AF5E00248FBF003CAF4200288FBE0038F7
++:102090008FA200148FB700348FB600308FB5002C9C
++:1020A0008FB400288FB300248FB200208FB1001CA2
++:1020B0008FB0001827BD004003E00008AF42002C9D
++:1020C00090A2000024420001A0A200003C030800EE
++:1020D0008C6300F4304200FF1443000F0080302175
++:1020E000A0A000003C0208008C4200E48F84003471
++:1020F000008220213082007F034218213C02000C24
++:10210000006218212402FF8000822024ACC300005A
++:1021100003E00008AF4400288C8200002442002025
++:1021200003E00008AC82000094C200003C080800F4
++:10213000950800CA30E7FFFF008048210102102106
++:10214000A4C2000094C200003042FFFF00E2102B46
++:1021500054400001A4C7000094A200003C03080002
++:102160008C6300CC24420001A4A2000094A20000D1
++:102170003042FFFF544300078F8600280107102BD1
++:10218000A4A000005440000101003821A4C70000B1
++:102190008F8600288CC4001CAF44003C94A2000031
++:1021A0008F43003C3042FFFF000210C00062182144
++:1021B000AF43003C8F42003C008220231880000483
++:1021C000000000008CC200180A00084324420001ED
++:1021D0008CC20018AF4200383C020050344200105C
++:1021E000AF420030000000000000000000000000CE
++:1021F0008F420000304200201040FFFD0000000030
++:102200008F420404AD2200048F420400AD2200007E
++:102210003C020020AF42003003E000080000000054
++:1022200027BDFFE0AFB20018AFB10014AFB000108F
++:10223000AFBF001C94C2000000C080213C12080007
++:10224000965200C624420001A60200009603000038
++:1022500094E2000000E03021144300058FB100300B
++:102260000E000818024038210A000875000000001E
++:102270008C8300048C820004244200400461000727
++:10228000AC8200048C8200040440000400000000C2
++:102290008C82000024420001AC8200009602000003
++:1022A0003042FFFF50520001A600000096220000BD
++:1022B00024420001A62200008F82002896230000FD
++:1022C00094420016144300048FBF001C2402000136
++:1022D000A62200008FBF001C8FB200188FB100141F
++:1022E0008FB0001003E0000827BD00208F89002870
++:1022F00027BDFFE0AFBF00188D220028274804004B
++:1023000030E700FFAF4200388D22002CAF8800304C
++:10231000AF42003C3C020005AF420030000000002C
++:1023200000000000000000000000000000000000AD
++:10233000000000008C82000C8C82000CAD020000BA
++:102340008C820010AD0200048C820018AD020008DF
++:102350008C82001CAD02000C8CA20014AD02001097
++:102360008C820020AD02001490820005304200FFF4
++:1023700000021200AD0200188CA20018AD02001C71
++:102380008CA2000CAD0200208CA20010AD02002433
++:102390008CA2001CAD0200288CA20020AD02002CF3
++:1023A000AD060030AD000034978300263402FFFFF5
++:1023B00014620002006020213404FFFF10E00011CD
++:1023C000AD04003895230036952400362402000120
++:1023D0003063FFFF000318C20069182190650040B8
++:1023E000308400070082100400451025A0620040E0
++:1023F0008F820028944200563042FFFF0A0008DC1A
++:10240000AD02003C952300369524003624020001DD
++:102410003063FFFF000318C2006918219065004077
++:1024200030840007008210040002102700451024A9
++:10243000A0620040AD00003C000000000000000071
++:10244000000000003C02000634420040AF42003071
++:102450000000000000000000000000008F420000AB
++:10246000304200101040FFFD8F860028AF880030FA
++:1024700024C2005624C7003C24C4002824C50032CE
++:1024800024C600360E000856AFA200108FBF0018F9
++:1024900003E0000827BD00208F8300243C060800CD
++:1024A0008CC600E88F82003430633FFF0003198040
++:1024B00000461021004310212403FF803046007F96
++:1024C00000431024AF420028034618213C02000CB0
++:1024D0000062302190C2000D30A500FF00003821BD
++:1024E00034420010A0C2000D8F8900288F8A00247A
++:1024F00095230036000A13823048000324020001AD
++:10250000A4C3000E1102000B2902000210400005B6
++:10251000240200021100000C240300010A0009201B
++:102520000000182111020006000000000A00092026
++:10253000000018218CC2002C0A000920244300014D
++:102540008CC20014244300018CC200180043102BDD
++:1025500050400009240700012402002714A20003B0
++:10256000000000000A00092C240700019522003E0B
++:1025700024420001A522003E000A138230430003DA
++:102580002C62000210400009008028211460000421
++:102590000000000094C200360A00093C3046FFFFEC
++:1025A0008CC600380A00093C008028210000302138
++:1025B0003C04080024844B780A00088900000000CD
++:1025C000274901008D22000C9523000601202021BF
++:1025D000000216023046003F3063FFFF240200274E
++:1025E00000C0282128C7002810C2000EAF83002495
++:1025F00010E00008240200312402002110C200096A
++:102600002402002510C200079382002D0A00095BF6
++:102610000000000010C200059382002D0A00095B33
++:10262000000000000A0008F4000000000A0006266E
++:102630000000000095230006912400058D25000C64
++:102640008D2600108D2700188D28001C8D29002054
++:10265000244200013C010800A4234B7E3C010800F9
++:10266000A0244B7D3C010800AC254B843C010800B4
++:10267000AC264B883C010800AC274B903C0108007D
++:10268000AC284B943C010800AC294B9803E00008AF
++:10269000A382002D8F87002827BDFFC0AFB3003471
++:1026A000AFB20030AFB1002CAFB00028AFBF0038E0
++:1026B0003C0208008C4200D094E3003030B0FFFFB1
++:1026C000005010073045FFFF3063FFFF00C0982126
++:1026D000A7A200103C110800963100C614A3000602
++:1026E0003092FFFF8CE2002424420030AF42003CD5
++:1026F0000A0009948CE2002094E200323042FFFF8D
++:1027000054A2000827A400188CE2002C24420030B8
++:10271000AF42003C8CE20028AF4200380A0009A218
++:102720008F84002827A5001027A60020022038212A
++:102730000E000818A7A000208FA200182442003025
++:10274000AF4200388FA2001CAF42003C8F840028AB
++:102750003C020005AF42003094820034274304005D
++:102760003042FFFF0202102B14400007AF830030FD
++:1027700094820054948300340202102100431023F9
++:102780000A0009B63043FFFF94830054948200345A
++:102790000223182100501023006218233063FFFF2A
++:1027A000948200163042FFFF144300030000000033
++:1027B0000A0009C424030001948200163042FFFF7E
++:1027C0000043102B104000058F82003094820016C9
++:1027D000006210233043FFFF8F820030AC530000B3
++:1027E000AC400004AC520008AC43000C3C020006B4
++:1027F00034420010AF420030000000000000000032
++:10280000000000008F420000304200101040FFFD29
++:10281000001018C2006418219065004032040007BF
++:10282000240200018FBF00388FB300348FB2003014
++:102830008FB1002C8FB000280082100400451025B5
++:1028400027BD004003E00008A062004027BDFFA8AC
++:10285000AFB60050AFB5004CAFB40048AFB30044C2
++:10286000AFB1003CAFBF0054AFB20040AFB00038D2
++:102870008C9000003C0208008C4200E88F860034F7
++:10288000960300022413FF8000C2302130633FFF13
++:102890000003198000C3382100F3102490B2000017
++:1028A000AF42002C9203000230E2007F034230214D
++:1028B0003C02000E00C28821306300C024020040A8
++:1028C0000080A82100A0B021146200260000A021F1
++:1028D0008E3400388E2200181440000224020001B9
++:1028E000AE2200189202000D304200201440001564
++:1028F0008F8200343C0308008C6300DC001238C077
++:10290000001231400043102100C730210046382119
++:1029100030E300073C02008030E6007800C230253A
++:102920000343182100F31024AF4208002463090078
++:10293000AF4608108E2200188C6300080043102157
++:10294000AE2200188E22002C8E2300182442000193
++:102950000062182B1060003D000000000A000A7899
++:1029600000000000920300022402FFC00043102474
++:10297000304200FF1440000524020001AE2200187E
++:10298000962200360A000A613054FFFF8E2200149E
++:1029900024420001AE22001892020000000216003C
++:1029A0000002160304410029000000009602000204
++:1029B00027A4001000802821A7A20016960200027A
++:1029C00024070001000030213042FFFFAF820024C5
++:1029D0000E000889AFA0001C960300023C0408000A
++:1029E0008C8400E88F82003430633FFF000319803D
++:1029F00000441021004310213043007F3C05000CAF
++:102A00000053102403431821AF4200280065182109
++:102A10009062000D001221403042007FA062000D44
++:102A20003C0308008C6300E48F82003400431021D3
++:102A30000044382130E2007F03421021004510217C
++:102A400000F31824AF430028AEA200009222000D2C
++:102A5000304200101040001302A020218F83002874
++:102A60008EA40000028030219462003E2442FFFFC9
++:102A7000A462003E948400029625000E3084FFFF7D
++:102A80000E00097330A5FFFF8F82002894430034A5
++:102A90009622000E1443000302A02021240200010C
++:102AA000A382002C02C028210E0007FE00000000B7
++:102AB0008FBF00548FB600508FB5004C8FB40048C4
++:102AC0008FB300448FB200408FB1003C8FB000380C
++:102AD00003E0000827BD00588F82002827BDFFD0E3
++:102AE000AFB40028AFB20020AFBF002CAFB30024BA
++:102AF000AFB1001CAFB00018904400D0904300D19B
++:102B00000000A021309200FFA3A30010306300FF5B
++:102B10008C5100D88C5300DC1072002B2402000171
++:102B20003C0308008C6300E493A400108F820034FF
++:102B30002406FF800004214000431021004410219E
++:102B40003043007F00461024AF4200280343182181
++:102B50003C02000C006218218C62000427A40014BF
++:102B600027A50010022280210270102304400015C6
++:102B7000AFA300149062000D00C21024304200FF89
++:102B800014400007020088219062000D344200408A
++:102B90000E0007FEA062000D0A000ABD93A20010FD
++:102BA0000E0009E1241400018F830028AC7000D8C6
++:102BB00093A20010A06200D193A200101452FFD87B
++:102BC0000000000024020001168200048FBF002CC8
++:102BD0000E000626000000008FBF002C8FB40028D6
++:102BE0008FB300248FB200208FB1001C8FB000186B
++:102BF00003E0000827BD003027BDFFD8AFB3001C9D
++:102C0000AFB20018AFB10014AFB00010AFBF0020DA
++:102C10000080982100E0802130B1FFFF0E00049376
++:102C200030D200FF000000000000000000000000A3
++:102C30008F820020AC510000AC520004AC5300085D
++:102C4000AC40000CAC400010AC400014AC4000188C
++:102C50003C03080094634B5E02038025AC50001CCB
++:102C6000000000000000000000000000240400013B
++:102C70008FBF00208FB3001C8FB200188FB10014DB
++:102C80008FB000100A0004B827BD002827BDFFE858
++:102C9000AFB00010AFBF001430A5FFFF30C600FF7B
++:102CA0000080802124020C80AF420024000000003C
++:102CB0000000000000000000000000000000000014
++:102CC0000E000ACC000000003C040800248400E050
++:102CD0008C8200002403FF808FBF001402021021A9
++:102CE00000431024AF4200248C8200003C03000A01
++:102CF000020280213210007F035010218FB000109B
++:102D00000043102127BD001803E00008AF8200280F
++:102D100027BDFFE8AFBF00108F4401403C0308000F
++:102D20008C6300E02402FF80AF840034008318210C
++:102D300000621024AF4200243C02000803424021FC
++:102D4000950500023063007F3C02000A034318210E
++:102D50000062182130A5FFFF3402FFFF0000302180
++:102D60003C07602010A20006AF8300282402FFFF6A
++:102D7000A5020002946500D40E000AF130A5FFFF01
++:102D80008FBF001024020C8027BD001803E000084C
++:102D9000AF4200243C020008034240219502000299
++:102DA0003C0A0800954A00C63046FFFF14C00007E1
++:102DB0003402FFFF8F8200288F8400343C0760209C
++:102DC000944500D40A000B5A30A5FFFF10C200241E
++:102DD0008F87002894E2005494E400163045FFFFEA
++:102DE00000A6102300A6182B3089FFFF10600004F6
++:102DF0003044FFFF00C51023012210233044FFFFA1
++:102E0000008A102B1040000C012A1023240200011C
++:102E1000A50200162402FFFFA502000294E500D4DB
++:102E20008F8400340000302130A5FFFF3C07602074
++:102E30000A000AF1000000000044102A10400008B7
++:102E4000000000009502001630420001104000040E
++:102E5000000000009742007E24420014A5020016E4
++:102E600003E00008000000008F84002827BDFFE079
++:102E7000AFBF0018948200349483003E1060001AA3
++:102E80003048FFFF9383002C2402000114620027C6
++:102E90008FBF00188F820028000818C23108000771
++:102EA000006218212447003A244900542444002099
++:102EB000244500302446003490620040304200FF38
++:102EC0000102100730420001104000168FBF0018A9
++:102ED0000E000856AFA900108F82002894420034DB
++:102EE0000A000B733048FFFF94830036948200344D
++:102EF0001043000E8FBF001894820036A482003465
++:102F000094820056A48200548C82002CAC8200244F
++:102F100094820032A48200309482003CA482003A61
++:102F20008FBF00180A000B3327BD002003E0000804
++:102F300027BD002027BDFFE8AFBF00108F4A01006A
++:102F40003C0508008CA500E03C02080090424B8440
++:102F50003C0C0800958C4B7E01452821304B003FEE
++:102F600030A2007F03424021396900323C02000A4E
++:102F70003963003F2C630001010240212D2900012B
++:102F80002402FF8000A2282401234825AF8A0034B0
++:102F900000801821AF450024000030210080282146
++:102FA00024070001AF8800283C04080024844B78E3
++:102FB000AF8C002415200007A380002D24020020E0
++:102FC0005562000F006020213402FFFF5582000C83
++:102FD000006020212402002015620005000000008E
++:102FE0008C6300142402FFFF106200070000000041
++:102FF0000E000889000000000A000BD0000000004D
++:103000000E0008F4016028210E000B68000000008B
++:103010008FBF001024020C8027BD001803E00008B9
++:10302000AF4200243C0208008C4200E027BDFFA014
++:10303000AFB1003C008210212411FF80AFBE0058C8
++:10304000AFB70054AFB20040AFB00038AFBF005CC4
++:10305000AFB60050AFB5004CAFB40048AFB30044BA
++:10306000005110248F4800248F4900288F470028E2
++:10307000AF4200243C0208008C4200E00080902116
++:1030800024060006008210213042007F03421821EE
++:103090003C02000A006280213C02001F3442FF8093
++:1030A00000E2382427A40010260500F00122F024B5
++:1030B0000102B8240E00051DAFA700308FA2001832
++:1030C000AE0200C48FA2001CAE0200C88FA2002472
++:1030D000AE0200CC93A40010920300D12402FF8022
++:1030E0000082102400431025304900FF3083007F08
++:1030F0003122007F0062102A10400004000310C03B
++:1031000001311026304900FF000310C000031940B0
++:10311000006218213C0208008C4200DC920400D2BC
++:10312000024210210043102100511024AF42002818
++:1031300093A300103063007F000310C00003194008
++:10314000006218213C0208008C4200DC024210217F
++:10315000004310213042007F034218213C02000C42
++:10316000006240218FA300142402FFFF1062003090
++:10317000309500FF93A2001195030014304400FF26
++:103180003063FFFF0064182B1060000D000000008A
++:10319000950400148D07001C8D0600183084FFFF75
++:1031A00000442023000421000000102100E4382105
++:1031B00000E4202B00C230210A000C4A00C4302158
++:1031C000950400148D07001C8D0600183084FFFF45
++:1031D000008220230004210000001021008018211B
++:1031E00000C2302300E4202B00C4302300E3382346
++:1031F000AD07001CAD06001893A20011A502001433
++:1032000097A20012A50200168FA20014AD020010B2
++:103210008FA20014AD02000C93A20011A5020020A1
++:1032200097A20012A50200228FA20014AD02002472
++:103230002406FF80024610243256007FAF4200244D
++:10324000035618213C02000A006280218E02004CC5
++:103250008FA200203124007F000428C0AE0200505D
++:103260008FA200200004214000852821AE020070BA
++:1032700093A2001001208821A202008393A20010D3
++:10328000A2020079920200853042003FA20200852E
++:103290003C0208008C4200DC024210210045102153
++:1032A00000461024AF42002C3C0208008C4200E48F
++:1032B0003C0308008C6300DC024210210044102112
++:1032C00000461024AF4200283C0208008C4200E473
++:1032D00002431821006518210242102100441021E8
++:1032E0003042007F3063007F93A50010034220210D
++:1032F000034318213C02000E006240213C02000CF6
++:1033000010B1008C008248213233007F1660001912
++:103310002404FF803C0208008C4200DC02421021A1
++:1033200000441024AF42002C3C0208008C4200E410
++:103330003C0308008C6300DC02421021004410248E
++:10334000AF4200283C0208008C4200E402431821EE
++:103350003063007F024210213042007F034220216F
++:10336000034318213C02000E006240213C02000C85
++:10337000008248219124000D2414FF8000001021B8
++:1033800000942025A124000D950400029505001449
++:103390008D07001C3084FFFF30A5FFFF8D0600184D
++:1033A000008520230004210000E4382100C23021E0
++:1033B00000E4202B00C43021AD07001CAD0600182E
++:1033C00095020002A5020014A50000168D02000857
++:1033D000AD0200108D020008AD02000C9502000243
++:1033E000A5020020A50000228D020008AD020024E5
++:1033F0009122000D30420040104000422622000180
++:103400003C0208008C4200E0A3B300283C10000AF4
++:103410000242102100541024AF4200243C02080054
++:103420008C4200E0A380002C27A4002C0242102133
++:103430003042007F03421821007018218C6200D8AE
++:103440008D26000427A50028AFA9002C00461021D6
++:10345000AC6200D80E0009E1AF83002893A30028D6
++:103460008F8200280E000626A04300D10E000B68B4
++:103470000000000002541024AF4200243C02080067
++:103480008C4200DC00132940001320C000A420213E
++:10349000024210210044102100541024AF42002C9D
++:1034A0003C0208008C4200E43C0308008C6300DC12
++:1034B00003563021024210210045102100541024EF
++:1034C000AF4200283C0208008C4200E4024318216D
++:1034D0000064182102421021004510213042007F73
++:1034E0003063007F03422021034318213C02000E79
++:1034F000006240213C02000C00D080210082482163
++:10350000262200013043007F14750005304400FF7F
++:103510002403FF800223102400431026304400FFC0
++:1035200093A2001000808821250800281444FF760B
++:103530002529002093A400108FA300142402FFFF6C
++:103540001062000A308900FF2482000124830001F8
++:103550003042007F14550005306900FF2403FF80CE
++:103560000083102400431026304900FF92020078A7
++:10357000305300FF11330032012088213C02080043
++:103580008C4200DC3225007F000520C00005294068
++:1035900000A42021024210212406FF8000441021B3
++:1035A00000461024AF42002C3C0308008C6300DC72
++:1035B0003C0208008C4200E4024318210242102120
++:1035C0000045102100641821004610243063007F5C
++:1035D000AF420028034318213C02000E0062402144
++:1035E0003C0208008C4200E48D06000C0100202102
++:1035F00002421021004510213042007F0342182171
++:103600003C02000C0062482110C0000D012028215E
++:103610000E00064A000000002402FF800222182447
++:1036200026240001006228263082007F1455000203
++:10363000308300FF30A300FF1473FFD000608821A7
++:103640008E0300743C027FFF3442FFFF00621824A7
++:10365000AE0300740E00066B02402021AF57002419
++:103660008FA20030AF5E00288FBF005C8FBE005875
++:103670008FB700548FB600508FB5004C8FB4004800
++:103680008FB300448FB200408FB1003C8FB0003840
++:1036900027BD006003E00008AF42002C27BDFFD823
++:1036A000AFB1001CAFBF0020AFB000182751018898
++:1036B000922200032408FF803C03000A3047007F69
++:1036C000A3A700108F4601803C0208008C4200E056
++:1036D000AF86003400C2282100A81024AF42002485
++:1036E0009224000030A2007F0342102100431021E9
++:1036F000AF8200283084007F24020002148200255B
++:10370000000719403C0208008C4200E400C210216E
++:103710000043282130A2007F0342182100A8102472
++:10372000AF4200283C02000C006218219062000D9C
++:10373000AFA3001400481025A062000D8FA3001451
++:103740009062000D304200405040006A8FBF002060
++:103750008F860028A380002C27A400148CC200D8D8
++:103760008C63000427A50010004310210E0009E11E
++:10377000ACC200D893A300108F8200280E0006264A
++:10378000A04300D10E000B68000000000A000E0BE1
++:103790008FBF00200E00062F00C020210E00063D26
++:1037A000000000003C020008034280219223000137
++:1037B0009202007B1443004F8FBF00209222000032
++:1037C0003044007F24020004108200172882000584
++:1037D00010400006240200052402000310820007A6
++:1037E0008FB1001C0A000E0C0000000010820012B5
++:1037F0008FBF00200A000E0C8FB1001C92050083C1
++:10380000920600788E0700748F84003430A500FF84
++:1038100000073E0230C600FF0E00067330E7007F4F
++:103820000A000E0B8FBF00200E000BD78F840034D0
++:103830000A000E0B8FBF002024020C80AF42002430
++:103840009202003E30420040104000200000000084
++:103850009202003E00021600000216030441000618
++:10386000000000008F8400340E0005A024050093A2
++:103870000A000E0B8FBF00209202003F24030018A5
++:10388000304200FF1443000C8F84003424050039BB
++:103890000E000538000030210E0002508F840034E5
++:1038A00024020012A202003F0E0002598F8400344D
++:1038B0000A000E0B8FBF0020240500360E000538CD
++:1038C000000030210A000E0B8FBF00200E000250B6
++:1038D0008F8400349202000534420020A2020005C9
++:1038E0000E0002598F8400340E000FC08F84003404
++:1038F0008FBF00208FB1001C8FB0001824020C80F5
++:1039000027BD002803E00008AF42002427BDFFE8E0
++:10391000AFB00010AFBF001427430100946200084D
++:103920000002140000021403044100020000802180
++:103930002410000194620008304200801040001AF8
++:10394000020010219462000830422000104000164E
++:10395000020010218C6300183C021C2D344219ED2A
++:10396000240600061062000F3C0760213C0208009C
++:103970008C4200D4104000078F8200288F830028DB
++:10398000906200623042000F34420040A062006248
++:103990008F8200288F840034944500D40E000AF1F1
++:1039A00030A5FFFF020010218FBF00148FB0001060
++:1039B00003E0000827BD001827BDFFE0AFB10014E9
++:1039C000AFB00010A380002CAFBF00188F450100DE
++:1039D0003C0308008C6300E02402FF80AF850034C4
++:1039E00000A318213064007F0344202100621824C2
++:1039F0003C02000A00822021AF430024275001002E
++:103A00008E0200148C8300DCAF8400280043102356
++:103A100018400004000088218E0200140E000A8461
++:103A2000AC8200DC9202000B24030002304200FF53
++:103A30001443002F0000000096020008304300FFEE
++:103A40002402008214620005240200840E00093E54
++:103A5000000000000A000E97000000001462000938
++:103A6000240200818F8200288F8400343C0760216B
++:103A7000944500D49206000530A5FFFF0A000E868B
++:103A800030C600FF14620027000000009202000A06
++:103A9000304300FF306200201040000430620040DC
++:103AA0008F8400340A000E82240600401040000477
++:103AB000000316008F8400340A000E8224060041A1
++:103AC00000021603044100178F84003424060042CC
++:103AD0008F8200283C076019944500D430A5FFFF71
++:103AE0000E000AF1000000000A000E97000000001E
++:103AF0009202000B24030016304200FF1043000620
++:103B0000000000009202000B24030017304200FF67
++:103B100014430004000000000E000E11000000001D
++:103B2000004088210E000B68000000009202000A8D
++:103B3000304200081040000624020C808F850028C7
++:103B40003C0400080E0011EE0344202124020C80E6
++:103B5000AF4200248FBF0018022010218FB0001048
++:103B60008FB1001403E0000827BD002027BDFFE847
++:103B7000AFBF0014AFB000108F5000243C0308000A
++:103B80008C6300E08F4501002402FF8000A3182110
++:103B90003064007F03442021006218243C02000AA4
++:103BA00000822021AF850034AF4300249082006260
++:103BB000AF8400283042000F34420050A0820062DF
++:103BC0003C02001F3442FF800E00062602028024C1
++:103BD000AF5000248FBF00148FB0001003E0000826
++:103BE00027BD00183C0208008C4200201040001D38
++:103BF0002745010090A300093C0200080342202150
++:103C000024020018546200033C0200080A000ED887
++:103C10002402000803422021240200161462000539
++:103C20002402001724020012A082003F0A000EE2C4
++:103C300094A700085462000694A700089362000548
++:103C40002403FFFE00431024A362000594A700088C
++:103C500090A6001B8CA4000094A500060A000ACCC4
++:103C600000073C0003E000080000000027440100BA
++:103C700094820008304500FF38A3008238A20084F7
++:103C80002C6300012C420001006218251060000620
++:103C9000240200839382002D1040000D00000000DC
++:103CA0000A000B9B0000000014A2000524A2FF8064
++:103CB0008F4301043C02602003E00008AC43001481
++:103CC000304200FF2C420002104000032402002278
++:103CD0000A000E3C0000000014A2000300000000D7
++:103CE0000A000EA9000000000A000EC70000000034
++:103CF0009363007E9362007A144300090000202140
++:103D00009362000024030050304200FF144300047B
++:103D1000240400019362007E24420001A362007E1D
++:103D200003E00008008010218F4201F80440FFFEEC
++:103D300024020002AF4401C0A34201C43C021000AF
++:103D400003E00008AF4201F827BDFFE8AFBF001055
++:103D50009362003F2403000A304200FF14430046F0
++:103D6000000000008F6300548F62004C1062007DE1
++:103D7000036030219362000024030050304200FFB2
++:103D80001443002F000000008F4401403C02080053
++:103D90008C4200E02403FF800082102100431024A5
++:103DA000AF4200243C0208008C4200E08F650054C2
++:103DB0003C03000A008220213084007F034410214C
++:103DC00000431021AC4501089762003C8F63004C12
++:103DD0003042FFFF0002104000621821AF63005C18
++:103DE0008F6300548F64004C9762003C006418237A
++:103DF0003042FFFF00031843000210400043102A26
++:103E000010400006000000008F6200548F63004CD9
++:103E1000004310230A000F58000210439762003C31
++:103E20003042FFFF00021040ACC2006424020001D7
++:103E3000A0C0007CA0C2008424020C80AF420024F9
++:103E40000E000F0A8F440140104000478FBF001042
++:103E50008F4301408F4201F80440FFFE240200021C
++:103E6000AF4301C0A34201C43C021000AF4201F8BD
++:103E70000A000FA88FBF00109362003F24030010B8
++:103E8000304200FF14430004000000008F44014052
++:103E90000A000F94000028219362003F24030016BB
++:103EA000304200FF1443000424020014A362003FC8
++:103EB0000A000FA2000000008F62004C8F630050C8
++:103EC00000431023044100288FBF0010936200813B
++:103ED00024420001A3620081936200812C4200040D
++:103EE00014400010000000009362003F240300040F
++:103EF000304200FF14430006000000008F440140E0
++:103F00008FBF0010240500930A0005A027BD0018EC
++:103F10008F440140240500938FBF00100A00060F54
++:103F200027BD00188F4401400E0002500000000021
++:103F30008F6200542442FFFFAF6200548F62005032
++:103F40002442FFFFAF6200500E0002598F4401402F
++:103F50008F4401408FBF0010240500040A00025E58
++:103F600027BD00188FBF001003E0000827BD001810
++:103F70008F4201889363007E00021402304400FFE8
++:103F8000306300FF1464000D0000000093620080A5
++:103F9000304200FF1044000900000000A3640080CC
++:103FA0009362000024030050304200FF14430004D9
++:103FB000000000000A0006D78F440180A36400803F
++:103FC00003E000080000000027BDFFE8AFB00010CC
++:103FD000AFBF00149362000524030030304200306C
++:103FE00014430089008080213C0208008C4200209C
++:103FF00010400080020020210E0004930000000009
++:104000008F850020ACB000009362003E9363003FB8
++:10401000304200FF00021200306300FF0043102511
++:10402000ACA2000493620082000216000002160394
++:1040300004410005000000003C0308008C630048B8
++:104040000A000FE6000000009362003E304200408C
++:10405000144000030000182193620081304300FFE8
++:104060009362008200031E00304200FF0002140031
++:1040700000621825ACA300088F620040ACA2000CBF
++:104080008F620048ACA200108F62004CACA20014FA
++:104090008F6200508F63004C0043102304410003E3
++:1040A000000000000A000FFA8F62004C8F6200507F
++:1040B000ACA200183C02080094424B5E3C03C00BCB
++:1040C00000002021004310250E0004B8ACA2001C03
++:1040D0008F6200548F840020AC8200008F620058F1
++:1040E000AC8200048F62005CAC8200088F620060CA
++:1040F0008F43007400431021AC82000C8F62006477
++:10410000AC820010976300689762006A00031C008D
++:104110003042FFFF00621825AC83001493620082D6
++:1041200024030080304200FF14430003000000001D
++:104130000A00102EAC8000188F63000C24020001CE
++:104140001062000E2402FFFF9362003E30420040E6
++:104150001440000A2402FFFF8F63000C8F4200749A
++:10416000006218233C020800006210241440000280
++:10417000000028210060282100051043AC820018AF
++:104180003C02080094424B5E3C03C00C000020211E
++:10419000004310258F8300200E0004B8AC62001C81
++:1041A0008F6200188F8300203C05080094A54B5EA9
++:1041B00024040001AC620000AC6000048F66006C57
++:1041C0003C02400D00A22825AC6600088F6200DC8E
++:1041D000AC62000CAC600010936200050002160097
++:1041E000AC620014AC6000180E0004B8AC65001C92
++:1041F000020020218FBF00148FB00010A3600005C3
++:104200000A00042127BD00188FBF00148FB00010D2
++:1042100003E0000827BD00189742007C30C600FF6D
++:10422000A08600843047FFFF2402000514C2000B63
++:1042300024E3465090A201122C42000710400007D0
++:1042400024E30A0090A30112240200140062100467
++:1042500000E210210A0010663047FFFF3067FFFFC1
++:1042600003E00008A4870014AC87004C8CA201086E
++:104270000080402100A0482100E2102330C600FF4A
++:104280001840000393AA001324E2FFFCACA201082B
++:1042900030C2000110400008000000008D020050F4
++:1042A00000E2102304410013240600058D0200548F
++:1042B00010E20010000000008D02005414E2001A09
++:1042C000000000003C0208008C4200D83042002070
++:1042D0001040000A2402000191030078910200833B
++:1042E000144300062402000101002021012028219E
++:1042F000240600040A00105400000000A1000084FD
++:1043000011400009A50200148F4301008F4201F8FB
++:104310000440FFFE24020002AF4301C0A34201C4D7
++:104320003C021000AF4201F803E00008000000006A
++:1043300027BDFFE88FA90028AFBF001000804021F3
++:1043400000E918231860007330C600FFA080007CCD
++:10435000A08000818CA2010800E210230440004DDF
++:10436000000000008C8200509483003C8C84006428
++:10437000004748233063FFFF012318210083202BCF
++:1043800010800004000000008D0200640A0010B7D5
++:1043900000E210219502003C3042FFFF0122102173
++:1043A00000E21021AD02005C9502003C8D03005C30
++:1043B0003042FFFF0002104000E210210043102BAA
++:1043C00010400003000000000A0010C68D02005CCF
++:1043D0009502003C3042FFFF0002104000E2102135
++:1043E000AD02005CA1000084AD07004C8CA2010866
++:1043F00000E210231840000224E2FFFCACA20108F6
++:1044000030C200011040000A000000008D02005080
++:1044100000E2102304410004010020218D02005419
++:1044200014E20003000000000A0010E82406000562
++:104430008D02005414E200478FBF00103C020800B8
++:104440008C4200D8304200201040000A24020001B3
++:1044500091030078910200831443000624020001B6
++:1044600001002021240600048FBF00100A00105410
++:1044700027BD0018A1000084A50200148F4301008D
++:104480008F4201F80440FFFE240200020A00110DD1
++:10449000000000008C82005C004910230043102BB8
++:1044A00054400001AC87005C9502003C3042FFFFA5
++:1044B0000062102B14400007240200029502003C09
++:1044C0008D03005C3042FFFF00621821AD03005CE9
++:1044D00024020002AD07004CA10200840E000F0A66
++:1044E0008F4401001040001B8FBF00108F4301005C
++:1044F0008F4201F80440FFFE24020002AF4301C0D6
++:10450000A34201C43C021000AF4201F80A0011238B
++:104510008FBF001030C200101040000E8FBF00107F
++:104520008C83005C9482003C006918233042FFFFBA
++:10453000006218213C023FFF3444FFFF0083102B30
++:10454000544000010080182101231021AD02005CBD
++:104550008FBF001003E0000827BD001827BDFFE84B
++:104560008FAA0028AFBF00100080402100EA482336
++:104570001920002130C600FF8C83005C8C8200640F
++:10458000006A18230043102B5040001000691821C6
++:1045900094A2011001221021A4A2011094A20110E2
++:1045A0003042FFFF0043102B1440000A3C023FFF43
++:1045B00094A2011000431023A4A201109482003C95
++:1045C0003042FFFF0A00114200621821A4A001102E
++:1045D0003C023FFF3444FFFF0083102B5440000196
++:1045E0000080182100671021AD02005CA100007C52
++:1045F0000A00118AA100008130C200101040003C66
++:10460000000000008C820050004A1023184000383F
++:10461000000000009082007C24420001A082007C07
++:104620009082007C3C0308008C630024304200FF31
++:104630000043102B1440005C8FBF00108CA20108B7
++:1046400000E2102318400058000000008C83005442
++:104650009482003C006A18233042FFFF0003184395
++:10466000000210400043102A104000050000000026
++:104670008C820054004A10230A001171000210437A
++:104680009482003C3042FFFF00021040AD02006403
++:104690009502003C8D0400649503003C3042FFFF0E
++:1046A00000021040008220213063FFFF00831821A8
++:1046B00001431021AD02005C8D020054ACA2010840
++:1046C00024020002A10200840E000F0A8F440100A0
++:1046D000104000358FBF00108F4301008F4201F85A
++:1046E0000440FFFE240200020A0011B30000000093
++:1046F000AD07004C8CA2010800E210231840000214
++:1047000024E2FFFCACA2010830C200011040000A04
++:10471000000000008D02005000E21023044100045C
++:10472000010020218D02005414E20003000000006B
++:104730000A0011AA240600058D02005414E2001A92
++:104740008FBF00103C0208008C4200D8304200208D
++:104750001040000A240200019103007891020083B6
++:104760001443000624020001010020212406000455
++:104770008FBF00100A00105427BD0018A10000844C
++:10478000A50200148F4301008F4201F80440FFFE90
++:1047900024020002AF4301C0A34201C43C02100046
++:1047A000AF4201F88FBF001003E0000827BD0018DA
++:1047B0008FAA00108C8200500080402130C600FF7C
++:1047C000004A102300A048211840000700E01821EB
++:1047D00024020001A0800084A0A00112A482001481
++:1047E0000A001125AFAA0010A0800081AD07004C7F
++:1047F0008CA2010800E210231840000224E2FFFC12
++:10480000ACA2010830C20001104000080000000006
++:104810008D0200500062102304410013240600059D
++:104820008D02005410620010000000008D02005440
++:1048300014620011000000003C0208008C4200D805
++:10484000304200201040000A240200019103007849
++:10485000910200831443000624020001010020217C
++:1048600001202821240600040A0010540000000042
++:10487000A1000084A502001403E00008000000006D
++:1048800027BDFFE0AFBF0018274201009046000A95
++:104890008C4800148C8B004C9082008430C900FF3F
++:1048A00001681823304A00FF1C60001A2D460006DC
++:1048B000240200010142100410C00016304300031E
++:1048C000012030210100382114600007304C000C19
++:1048D00015800009304200301440000B8FBF0018D3
++:1048E0000A001214000000000E001125AFAB0010EA
++:1048F0000A0012148FBF00180E00109AAFAB001000
++:104900000A0012148FBF0018AFAB00100E0011BACE
++:10491000AFAA00148FBF001803E0000827BD0020D5
++:1049200024020003A08200848C82005403E000086B
++:10493000ACA201083C0200080342182190620081E9
++:10494000240600433C07601924420001A062008154
++:10495000906300813C0208008C4200C0306300FF7D
++:10496000146200102403FF803C0208008C4200E027
++:104970000082102100431024AF4200243C020800B2
++:104980008C4200E03C03000A008210213042007F8C
++:104990000342102100431021944500D40A000AF17B
++:1049A00030A5FFFF03E000080000000027BDFFE086
++:1049B000AFBF0018AFB10014AFB000108F4201803C
++:1049C0000080802100A088210E00121B00402021C1
++:1049D000A20000848E0200548FBF00188FB0001018
++:1049E000AE2201088FB1001403E0000827BD0020AB
++:1049F00027BDFFE03C020008AFB00010AFBF0018B9
++:104A0000AFB10014034280218F5101409203008412
++:104A10008E0400508E02004C14820040306600FF6D
++:104A20003C0208008C4200E02403FF800222102197
++:104A300000431024AF4200243C0208008C4200E0F6
++:104A40009744007C92050081022210213042007FB1
++:104A5000034218213C02000A0062182114A0000B36
++:104A60003084FFFF2402000554C20014248205DCB8
++:104A70009062011224420001A062011224020C8003
++:104A8000AF4200240A00127324020005A060011244
++:104A90002402000514C20009248205DC9202008170
++:104AA0002C4200075040000524820A009203008136
++:104AB0002402001400621004008210213044FFFF21
++:104AC000A60400140E00121B022020219602003CB6
++:104AD0008E03004C022020213042FFFF00021040D4
++:104AE000006218210E000250AE03005C9202007DAD
++:104AF00002202021344200400E000259A202007D13
++:104B00008F4201F80440FFFE24020002AF5101C0B1
++:104B1000A34201C43C021000AF4201F88FBF00184D
++:104B20008FB100148FB0001003E0000827BD0020F3
++:104B300008000ACC08000B1408000B9808000BE4CE
++:104B400008000C200A0000280000000000000000FF
++:104B50000000000D6370362E322E3300000000007E
++:104B60000602030400000000000000000000000036
++:104B70000000000000000000000000000000000035
++:104B80000000000000000000000000000000002005
++:104B90000000000000000000000000000000000015
++:104BA0000000000000000000000000000000000005
++:104BB00000000000000000000000000000000001F4
++:104BC0000000002B000000000000000400030D4066
++:104BD00000000000000000000000000000000000D5
++:104BE00000000000000000001000000300000000B2
++:104BF0000000000D0000000D3C020800244258A4F3
++:104C00003C03080024635F70AC4000000043202B8D
++:104C10001480FFFD244200043C1D080037BD7FFCCA
++:104C200003A0F0213C100800261000A03C1C080046
++:104C3000279C58A40E0001AC000000000000000DED
++:104C400027BDFFE83C096018AFBF00108D2C500055
++:104C5000240DFF7F24080031018D5824356A380C5B
++:104C600024070C003C1A8000AD2A50003C04800A46
++:104C7000AF4800083C1B8008AF4700240E00091510
++:104C8000AF8400100E0008D8000000000E000825B8
++:104C9000000000000E001252000000003C046016EC
++:104CA0008C8500003C06FFFF3C02535300A61824ED
++:104CB0001062004734867C0094C201F2A780002C69
++:104CC00010400003A78000CC38581E1EA798002C67
++:104CD00094C201F810400004978300CC38591E1E7E
++:104CE000A79900CC978300CC2C7F006753E000018C
++:104CF000240300669784002C2C82040114400002D7
++:104D000000602821240404003C0760008CE904387A
++:104D10002403103C3128FFFF1103001F30B9FFFFAF
++:104D200057200010A38000CE24020050A38200CEA2
++:104D3000939F00CE53E0000FA78500CCA78000CC46
++:104D4000978500CC8FBF0010A780002CA78000346F
++:104D5000A78000E63C010800AC25008003E00008C5
++:104D600027BD0018939F00CE57E0FFF5A78000CC29
++:104D7000A78500CC978500CC8FBF0010A784002C9E
++:104D8000A7800034A78000E63C010800AC25008025
++:104D900003E0000827BD0018A38000CE8CCB003CA8
++:104DA000316A00011140000E0000000030A7FFFF33
++:104DB00010E0FFDE240200508CCC00C831860001D8
++:104DC00014C0FFDC939F00CE0A00007A2402005139
++:104DD0008C8F00043C0E60000A00005D01EE302163
++:104DE0008CEF0808240D5708000F740211CD000441
++:104DF00030B8FFFF240500660A00007B240404008D
++:104E00001700FFCC939F00CE0A00007A24020050C6
++:104E10008F8600103089FFFF000939408CC30010D5
++:104E20003C08005000E82025AF4300388CC5001432
++:104E300027420400AF82001CAF45003CAF44003065
++:104E40000000000000000000000000000000000062
++:104E50000000000000000000000000000000000052
++:104E60008F4B0000316A00201140FFFD0000000060
++:104E700003E00008000000008F840010948A001AEC
++:104E80008C8700243149FFFF000940C000E8302131
++:104E9000AF46003C8C8500248F43003C00A31023C8
++:104EA00018400029000000008C8B002025620001C2
++:104EB0003C0D005035AC0008AF420038AF4C00301C
++:104EC00000000000000000000000000000000000E2
++:104ED00000000000000000000000000000000000D2
++:104EE0008F4F000031EE002011C0FFFD00000000D8
++:104EF0008F4A04003C080020AC8A00108F4904044B
++:104F0000AC890014AF4800300000000094860018FF
++:104F10009487001C00C71821A48300189485001AE8
++:104F200024A20001A482001A9498001A9499001EE9
++:104F3000133800030000000003E000080000000038
++:104F400003E00008A480001A8C8200200A0000DC24
++:104F50003C0D00500A0000CD000000003C0308009A
++:104F60008C6300208F82001827BDFFE810620008C4
++:104F7000AFBF00100E000104AF8300183C0308000F
++:104F80008C63002024040001106400048F89001049
++:104F90008FBF001003E0000827BD00188FBF00106E
++:104FA0003C076012A520000A9528000A34E500108D
++:104FB00027BD00183106FFFF03E00008ACA60090F3
++:104FC0003C0208008C42002027BDFFC8AFBF003460
++:104FD000AFBE0030AFB7002CAFB60028AFB500248D
++:104FE000AFB40020AFB3001CAFB20018AFB10014D3
++:104FF00010400050AFB000108F840010948600065F
++:105000009483000A00C3282330B6FFFF12C0004A71
++:105010008FBF003494890018948A000A012A402323
++:105020003102FFFF02C2382B14E0000202C020212F
++:10503000004020212C8C0005158000020080A0215A
++:10504000241400040E0000B3028020218F8700107A
++:1050500002809821AF80001494ED000A028088211C
++:105060001280004E31B2FFFF3C1770003C1540002B
++:105070003C1E60008F8F001C8DEE000001D71824AD
++:10508000507500500220202102A3802B160000350D
++:105090003C182000507800470220202124100001F5
++:1050A0008F83001414600039029158230230F823D2
++:1050B0000250C82133F1FFFF1620FFEE3332FFFF0D
++:1050C0008F8700103C110020AF510030000000001D
++:1050D00094E6000A3C1E601237D5001002662821B3
++:1050E000A4E5000A94E2000A94F2000A94F400187D
++:1050F0003057FFFF1292003BAEB700908CED0014CA
++:105100008CE400100013714001AE4021000E5FC31B
++:10511000010E502B008B4821012A1821ACE8001405
++:10512000ACE3001002D3382330F6FFFF16C0FFB9FE
++:105130008F8400108FBF00348FBE00308FB7002CDB
++:105140008FB600288FB500248FB400208FB3001CC9
++:105150008FB200188FB100148FB0001003E0000868
++:1051600027BD0038107E001B000000001477FFCC24
++:10517000241000010E00159B000000008F83001416
++:105180001060FFCB0230F823029158238F87001064
++:10519000017020210A0001973093FFFF8F830014D4
++:1051A0001460FFCB3C110020AF5100300A000163B6
++:1051B000000000000E00077D024028210A00015770
++:1051C000004080210E00033A024028210A000157C6
++:1051D000004080210E001463022020210A000157A4
++:1051E000004080210E0000CD000000000A0001797F
++:1051F00002D3382327BDFFE8AFB00010AFBF0014C3
++:105200000E00003F000000003C028000345000709F
++:105210000A0001BA8E0600008F4F000039EE00012F
++:1052200031C20001104000248F8600A88E070000C4
++:105230003C0C08008D8C003C3C0908008D2900388E
++:1052400000E66823018D28210000502100AD302B9D
++:10525000012A4021010620213C010800AC25003C28
++:10526000AF8700A83C010800AC2400380E000106FE
++:10527000000000003C0308008C6300701060FFE633
++:10528000006020213C0508008CA500683C06080051
++:105290008CC6006C0E00152A000000003C010800BE
++:1052A000AC2000708F4F000039EE000131C20001C8
++:1052B0001440FFDE8F8600A88E0A00008F8B00A8A6
++:1052C0003C0508008CA5003C3C0408008C84003898
++:1052D000014B482300A938210082182100E9402B06
++:1052E000006810213C010800AC27003C3C0108008C
++:1052F000AC2200388F5F01002419FF0024180C0035
++:1053000003F9202410980012AF840000AF4400205D
++:10531000936D0000240C002031A600FF10CC001279
++:10532000240E005010CE00043C194000AF59013843
++:105330000A0001B3000000000E0011C800000000C8
++:105340003C194000AF5901380A0001B300000000C9
++:105350000E00011F000000003C194000AF59013849
++:105360000A0001B3000000008F58010000802821CE
++:10537000330F00FF01E020210E0002F1AF8F000487
++:105380003C194000AF5901380A0001B30000000089
++:1053900000A4102B2403000110400009000030215C
++:1053A0000005284000A4102B04A0000300031840AF
++:1053B0005440FFFC000528405060000A0004182BF0
++:1053C0000085382B54E000040003184200C3302548
++:1053D00000852023000318421460FFF900052842CD
++:1053E0000004182B03E0000800C310218F4201B80D
++:1053F0000440FFFE00000000AF4401803C031000A9
++:1054000024040040AF450184A3440188A3460189D8
++:10541000A747018A03E00008AF4301B83084FFFFCB
++:105420000080382130A5FFFF000020210A00022A59
++:10543000240600803087FFFF8CA40000240600387B
++:105440000A00022A000028218F8300388F8600304E
++:105450001066000B008040213C07080024E75A1822
++:10546000000328C000A710218C4400002463000121
++:10547000108800053063000F5466FFFA000328C04F
++:1054800003E00008000010213C07080024E75A1C34
++:1054900000A7302103E000088CC200003C0390000C
++:1054A0003462000100822025AF4400208F45002097
++:1054B00004A0FFFE0000000003E000080000000060
++:1054C0003C038000346200010082202503E00008D4
++:1054D000AF44002027BDFFE0AFB100143091FFFFC3
++:1054E000AFB00010AFBF00181220001300A0802141
++:1054F0008CA2000024040002240601401040000F8A
++:10550000004028210E000C5C00000000000010216B
++:10551000AE000000022038218FBF00188FB10014A8
++:105520008FB0001000402021000028210000302111
++:105530000A00022A27BD00208CA200000220382188
++:105540008FBF00188FB100148FB0001000402021D1
++:1055500000002821000030210A00022A27BD002077
++:1055600000A010213087FFFF8CA500048C440000B0
++:105570000A00022A2406000627BDFFE0AFB0001093
++:10558000AFBF0018AFB100149363003E00808021CC
++:105590000080282130620040000020211040000FD0
++:1055A0008E1100000E000851022020219367000098
++:1055B0002404005030E500FF50A400128E0F0000BC
++:1055C000022020218FBF00188FB100148FB000106F
++:1055D000A762013C0A00091127BD00200E000287C6
++:1055E000000000000E0008510220202193670000F7
++:1055F0002404005030E500FF14A4FFF20220202113
++:105600008E0F00003C1008008E1000503C0D000C66
++:10561000240BFF8001F05021314E007F01DA602120
++:10562000018D4021014B4824AF4900280220202150
++:105630008FBF00188FB100148FB00010A50200D6E4
++:1056400027BD00200A000911AF8800D027BDFFE068
++:10565000AFBF0018AFB10014AFB0001093660001E7
++:10566000008080210E00025630D1000493640005B2
++:10567000001029C2A765000034830040A363000521
++:105680000E00025F020020210E00091302002021FB
++:1056900024020001AF62000C02002821A762001062
++:1056A00024040002A762001224060140A76200142D
++:1056B0000E000C5CA76200161620000F8FBF0018AA
++:1056C000978C00343C0B08008D6B00782588FFFF19
++:1056D0003109FFFF256A0001012A382B10E000067E
++:1056E000A78800343C0F6006240E001635ED00102C
++:1056F000ADAE00508FBF00188FB100148FB00010F6
++:1057000003E0000827BD002027BDFFE0AFB1001473
++:10571000AFBF0018AFB0001000A088211080000AB1
++:105720003C03600024020080108200120000000090
++:105730000000000D8FBF00188FB100148FB0001053
++:1057400003E0000827BD00208C682BF80500FFFE51
++:1057500000000000AC712BC08FBF00188FB1001487
++:105760008FB000103C09100027BD002003E00008A6
++:10577000AC692BF80E00025600A0202193650005AD
++:10578000022020210E00025F30B000FF2403003E03
++:105790001603FFE7000000008F4401780480FFFE3D
++:1057A000240700073C061000AF51014002202021D1
++:1057B000A34701448FBF00188FB100148FB00010B1
++:1057C000AF4601780A0002C227BD002027BDFFE8CE
++:1057D000AFBF0014AFB000108F50002000000000D9
++:1057E0000E000913AF440020AF5000208FBF0014FB
++:1057F0008FB0001003E0000827BD00183084FFFFC1
++:10580000008038212406003500A020210A00022A49
++:10581000000028213084FFFF008038212406003654
++:1058200000A020210A00022A0000282127BDFFD065
++:10583000AFB3001C3093FFFFAFB50024AFB2001828
++:10584000AFBF0028AFB40020AFB10014AFB000105C
++:1058500030B5FFFF12600027000090218F90001CE0
++:105860008E0300003C0680002402004000033E023C
++:1058700000032C0230E4007F006688241482001D9F
++:1058800030A500FF8F8300282C68000A510000100B
++:105890008F910014000358803C0C0800258C56940E
++:1058A000016C50218D49000001200008000000001B
++:1058B00002B210213045FFFF0E000236240400849E
++:1058C000162000028F90001CAF8000288F910014DA
++:1058D000260C002026430001018080213072FFFF4A
++:1058E00016200004AF8C001C0253502B1540FFDC27
++:1058F00000000000024010218FBF00288FB5002457
++:105900008FB400208FB3001C8FB200188FB1001429
++:105910008FB0001003E0000827BD0030240E0034D3
++:1059200014AE00F9000000009203000E241F168040
++:105930003C07000CA36300219202000D0347C8211D
++:105940003C066000A3620020961100123C0A7FFF13
++:10595000354CFFFFA771003C960B00102403000597
++:105960003168FFFFAF6800848E05001CAF5F002820
++:105970008F3800008CC4444803057826008F3021FE
++:10598000AF66004C8F69004C24CE00013C057F00BF
++:10599000AF6900508F740050AF740054AF66007050
++:1059A000AF6E00588F6D005824140050AF6D005C2E
++:1059B000A3600023AF6C0064A36300378E02001461
++:1059C000AF6200488F710048AF7100248E0B001841
++:1059D000AF6B006C9208000CA3680036937F003E0A
++:1059E00037F90020A379003E8F78007403058024E6
++:1059F000360F4000AF6F007493640000308900FFE1
++:105A0000513402452404FF803C04080024845A9841
++:105A10000E00028D000000003C1008008E105A9805
++:105A20000E00025602002021240600042407000173
++:105A3000A366007D020020210E00025FA36700051F
++:105A40008F5F017807E0FFFE240B0002AF5001409A
++:105A5000A34B01448F90001C3C081000AF48017814
++:105A60000A000362AF8000282CAD003751A0FF98D8
++:105A70008F9100140005A0803C180800271856BC20
++:105A8000029878218DEE000001C00008000000009F
++:105A90002418000614B80011000000003C0808009B
++:105AA0008D085A9824040005AF4800208E1F001866
++:105AB000AF7F00188F79004CAF79001C8F650050C4
++:105AC000122000C0AF6500700A000362AF84002896
++:105AD0002406000710A60083240300063C050800E6
++:105AE00024A55A980E000264240400818F90001CA3
++:105AF0000011102B0A000362AF8200282407000463
++:105B000014A7FFF6240500503C1808008F185A9877
++:105B1000AF5800208E0F0008AF6F00408E090008BC
++:105B2000AF6900448E14000CAF7400488E0E001054
++:105B3000AF6E004C8E0D0010AF6D00848E0A001405
++:105B4000AF6A00508E0C0018AF6C00548E04001C1D
++:105B5000AF64005893630000306B00FF116501D8FB
++:105B6000000000008F7400488F6900400289702394
++:105B700005C000042404008C1620FFDE240200036C
++:105B8000240400823C05080024A55A980E000287D0
++:105B9000000000008F90001C000010210A0003622A
++:105BA000AF820028240F000514AFFFCC240520008D
++:105BB0003C0708008CE75A98AF4700208E06000487
++:105BC000AF66005C9208000824100008A36800215A
++:105BD0008F9F001C93F90009A37900208F86001C79
++:105BE00090D8000A330400FF10900011000000005C
++:105BF0002885000914A0006924020002240A00205C
++:105C0000108A000B34058000288D002115A00008A3
++:105C100024054000240E0040108E00053C050001C4
++:105C200024140080109400023C050002240540006A
++:105C30008F7800743C19FF00031980240205782531
++:105C4000AF6F007490C4000BA36400818F84001CAC
++:105C50009489000C11200192000000009490000C27
++:105C60002406FFBF24050004A770003C908F000E9F
++:105C7000A36F003E8F84001C9089000FA369003F32
++:105C80008F8B001C8D6E00108F54007401D468231C
++:105C9000AF6D00608D6A0014AF6A0064956C0018E7
++:105CA000A76C00689563001AA763006A8D62001CE8
++:105CB000AF62006C9167000EA367003E9368003EE0
++:105CC0000106F8241220014BA37F003E8F90001C98
++:105CD0000A000362AF8500282407002214A7FF7F73
++:105CE000240300073C0B08008D6B5A981220000C0F
++:105CF000AF4B00200A000362AF830028240C00335E
++:105D000010AC0014240A00283C05080024A55A9869
++:105D10000E00023C240400810A0003EB8F90001C5B
++:105D20003C04080024845A980E00028D00000000F4
++:105D30009363000024110050306200FF10510135C0
++:105D4000000000008F90001C000018210A00036270
++:105D5000AF8300283C0D08008DAD5A9824040081C3
++:105D6000AF4D00203C05080024A55A980E00023CC7
++:105D7000A36A00348F90001C240200090A00036209
++:105D8000AF82002802B288213225FFFF0E000236C2
++:105D9000240400840A0003628F90001C1082FFA478
++:105DA00024050400288B000311600170240C0004FA
++:105DB000240300015483FF9E240540000A00043B95
++:105DC000240501003C04080024845A988F62004C8A
++:105DD0000E00028D8F6300508F90001C0000202168
++:105DE0000A000362AF8400288E1000042404008A95
++:105DF000AF50002093790005333800021700015F8F
++:105E0000020028219368002302002821311F00206E
++:105E100017E0015A2404008D9367003F2406001206
++:105E200030E200FF10460155240400810E000256A6
++:105E30000200202193630023240500040200202196
++:105E4000346B0042A36B00230E00025FA365007D4C
++:105E50008F4401780480FFFE240A0002AF50014005
++:105E6000A34A01448F90001C3C0C1000AF4C0178F9
++:105E70000A0003EC0011102B8E1000042404008A89
++:105E8000AF500020936E000531CD000215A0001622
++:105E900002002821936F003F2414000402002821EF
++:105EA00031E900FF11340010240400810E00025675
++:105EB000020020219362002324080012241FFFFE09
++:105EC00034460020A3660023A368003F93790005B1
++:105ED00002002021033FC0240E00025FA3780005CA
++:105EE00002002821000020210E00033400000000E1
++:105EF0000A0003EB8F90001C8E1000043C03000886
++:105F00000343A021AF500020928B000024050050D5
++:105F1000316400FF10850161240700880200202100
++:105F2000000028210E00022A2406000E928D000097
++:105F3000240EFF800200282101AE8025A2900000DF
++:105F4000240400040E000C5C240600300A0003EB5D
++:105F50008F90001C8E0800043C14080026945A9868
++:105F60003C010800AC285A98AF480020921F00035B
++:105F700033F9000413200002240200122402000658
++:105F8000A362003F920B001B2404FFC03165003F59
++:105F900000A43825A367003E9206000330C200012A
++:105FA00014400132000000008E020008AE8200089A
++:105FB0003C0208008C425AA010400131000249C244
++:105FC000A76900088E14000C240C0001240300149F
++:105FD000AF74002C8E0E0010AF6E0030960D0016C0
++:105FE000A76D0038960A0014A76A003AAF6C000C3F
++:105FF000A76C0010A76C0012A76C0014A76C001609
++:1060000012200136A3630034920F000331F0000226
++:106010002E1100018F90001C262200080A00036246
++:10602000AF8200288E0400043C0E0008034E30218D
++:10603000AF4400208E05000890CD0000240C0050D5
++:1060400031AA00FF114C00862407008824060009AD
++:106050000E00022A000000000A0003EB8F90001CD3
++:106060008E04001C0E00024100000000104000F4ED
++:10607000004050218F89001C240700890140202105
++:106080008D25001C240600010E00022A00000000DD
++:106090000A0003EB8F90001C960D00023C140800D0
++:1060A00026945A9831AA0004514000B83C10600070
++:1060B0008E0E001C3C010800AC2E5A98AF4E0020FA
++:1060C000920700102408001430E200FF144800D6A4
++:1060D00000000000960B00023163000114600165AE
++:1060E000000000008E020004AE8200083C1408008C
++:1060F0008E945AA01280015B000000008F7400741F
++:106100003C0380002404000102835825AF6B007417
++:10611000A3600005AF64000C3C0708008CE75AA0A0
++:106120008F86001CA7640010000711C2A76400122C
++:10613000A7640014A7640016A76200088CC80008B2
++:1061400024040002AF68002C8CC5000CAF65003041
++:1061500090DF0010A37F00348F99001C9330001152
++:10616000A37000358F98001C930F0012A36F0036A8
++:106170008F89001C912E0013A36E00378F90001C96
++:10618000960D0014A76D0038960A0016A76A003A0B
++:106190008E0C0018AF6C00245620FDCCAF84002874
++:1061A0003C05080024A55A980E0002640000202136
++:1061B0008F90001C0A0004A7000020218E1000040C
++:1061C00024070081AF500020936900233134001070
++:1061D000128000170000000002002021000028218A
++:1061E0002406001F0E00022A000000000A0003EB34
++:1061F0008F90001C3C05080024A55A980E000287C9
++:10620000240400828F90001C000028210A000362F1
++:10621000AF8500283C0408008C845A980E0014E8CE
++:10622000000000008F90001C0A000482000018216A
++:106230000E00025602002021937800230200202144
++:10624000370F00100E00025FA36F002300003821FB
++:1062500002002021000028210A0005A82406001FB2
++:10626000920F000C31E90001112000030000000032
++:106270009618000EA4D8002C921F000C33F90002CF
++:1062800013200005000038218E0200149608001229
++:10629000ACC2001CA4C8001A0A0005432406000969
++:1062A0003C05080024A55A980E0002872404008BA0
++:1062B0008F90001C0011282B0A000362AF85002874
++:1062C000AF6000843C0A08008D4A5A983C0D0800D3
++:1062D0008DAD0050240CFF803C02000C014D1821B4
++:1062E000006C2024AF4400288E070014306B007F20
++:1062F000017A282100A2C821AF2700D88E060014F9
++:10630000AF9900D0AF2600DC8E080010251FFFFEDD
++:106310000A000408AF3F01083C0508008CA55A9804
++:106320003C1908008F39005024CCFFFE00B9C02171
++:1063300003047824AF4F00283C1408008E945A9828
++:106340003C0908008D2900500289702131CD007F61
++:1063500001BA502101478021AE0600D8AF9000D08D
++:10636000AE0000DC0A0003B1AE0C0108548CFE3014
++:10637000240540000A00043B240510000E00032EF3
++:10638000000000000A0003EB8F90001C8E0F442CCD
++:106390003C186C62370979703C010800AC205A98AF
++:1063A00015E9000824050140979F00349786002CCA
++:1063B0000280282103E6C82B132000112404009238
++:1063C000240501400E000C7A240400023C01080060
++:1063D000AC225A98AF4200203C0508008CA55A9880
++:1063E00010A00005240400830E00084500000000F2
++:1063F00010400009240400833C05080024A55A9895
++:106400000E000264000000008F90001C0011202B81
++:106410000A000362AF8400280E0008490000000053
++:106420000A00055F8F90001C0E00084D0000000060
++:106430003C05080024A55A980A00062F2404008B66
++:10644000240400040E000C7A240500301440002AB5
++:10645000004050218F89001C240700830140202127
++:106460008D25001C0A000551240600018E04000839
++:106470000E000241000000000A00051BAE82000869
++:106480003C05080024A55A980E00023C240400870D
++:106490008F90001C0A0005360011102B8F830038E6
++:1064A0008F8600301066FE9D000038213C070800F2
++:1064B00024E75A1C000320C0008728218CAC000070
++:1064C00011900061246A00013143000F5466FFFA05
++:1064D000000320C00A0004F6000038213C05080033
++:1064E00024A55A980E000287240400828F90001C75
++:1064F0000A000536000010213C0B0008034B202148
++:106500002403005024070001AF420020A0830000B4
++:10651000A08700018F82001C90480004A08800180A
++:106520008F85001C90A60005A08600198F9F001C77
++:1065300093F90006A099001A8F90001C921800078A
++:10654000A098001B8F94001C928F0008A08F001C45
++:106550008F89001C912E0009A08E001D8F8D001CBC
++:1065600091AC000AA08C001E8F8B001C3C0C080014
++:10657000258C5A1C9163000B3C0B0800256B5A18A4
++:10658000A083001F8F87001C90E8000CA0880020CB
++:106590008F82001C9045000D24024646A0850021F4
++:1065A0008F86001C90DF000EA09F00228F99001C98
++:1065B0009330000FA09000238F98001C93140010BC
++:1065C000A09400248F8F001C91E90011A089002560
++:1065D0008F89001C8F8E00308F900038952D00140D
++:1065E000000E18C025C80001A48D002895270016AC
++:1065F000006C3021006BC821A487002A9525001863
++:106600003108000FA485002CA482002E8D3F001CB1
++:10661000ACCA0000AF88003011100006AF3F000088
++:10662000000038218D25001C014020210A00055161
++:1066300024060001250C00013184000F00003821E0
++:106640000A0006B8AF8400383C07080024E75A184F
++:106650000087302100003821ACA000000A0004F6B9
++:10666000ACC000003C05080024A55A980A00062F7B
++:10667000240400878E0400040E0002410000000084
++:106680000A00056AAE8200083084FFFF30C600FFB2
++:106690008F4201B80440FFFE00064400010430258B
++:1066A0003C07200000C720253C031000AF400180BC
++:1066B000AF450184AF44018803E00008AF4301B84F
++:1066C00027BDFFE8AFB00010AFBF00143C0760006B
++:1066D000240600021080000600A080210010102B6C
++:1066E0008FBF00148FB0001003E0000827BD001812
++:1066F0003C09600EAD2000348CE5201C8F82001C0C
++:106700002408FFFC00A81824ACE3201C0E0006D1CE
++:106710008C45000C0010102B8FBF00148FB00010A0
++:1067200003E0000827BD00183C02600E344701005A
++:1067300024090018274A040000000000000000009F
++:10674000000000003C06005034C30200AF44003893
++:10675000AF45003CAF430030014018218F4B000093
++:10676000316800201100FFFD2406007F2408FFFF90
++:106770008C6C000024C6FFFF24630004ACEC000016
++:1067800014C8FFFB24E70004000000000000000024
++:10679000000000003C0F0020AF4F00300000000060
++:1067A00024AD020001A5702B2529FFFF008E2021BA
++:1067B0001520FFE101A0282103E0000800000000EF
++:1067C00027BDFFE0AFB10014AFBF0018AFB000109D
++:1067D0003C05600E8CA20034008088211440000625
++:1067E0003C0460008C87201C2408FFFC00E8302457
++:1067F00034C30001AC83201C8F8B001C24090001D2
++:10680000ACA90034956900028D6500148D70000CF0
++:106810002D2400818D6700048D660008108000071C
++:106820008D6A00102D2C00041580000E30CE00075C
++:10683000312D000311A0000B000000002404008B88
++:10684000020028210E0006D1240600030011102B9F
++:106850008FBF00188FB100148FB0001003E0000844
++:1068600027BD002015C0FFF62404008B3C03002048
++:10687000AF4300300000000024020001AF8200148A
++:106880000000000000000000000000003C1F01505C
++:10689000013FC825253800033C0F600EAF47003884
++:1068A00000181882AF46003C35E8003CAF59003074
++:1068B000274704008F4400003086002010C0FFFDF1
++:1068C00000000000106000082466FFFF2403FFFFA3
++:1068D0008CEB000024C6FFFF24E70004AD0B000092
++:1068E00014C3FFFB250800043C08600EAD09003806
++:1068F0000000000000000000000000003C07002035
++:10690000AF470030000000000E0006F901402021D2
++:1069100002002821000020210E0006D124060003D9
++:106920000011102B8FBF00188FB100148FB0001012
++:1069300003E0000827BD002027BDFFE0AFB200182C
++:106940003092FFFFAFB10014AFBF001CAFB000101A
++:106950001640000D000088210A0007AA022010211D
++:1069600024050001508500278CE5000C0000000D77
++:10697000262300013071FFFF24E200200232382B71
++:1069800010E00019AF82001C8F8200141440001622
++:106990008F87001C3C0670003C0320008CE5000043
++:1069A00000A62024148300108F84003C00054402BC
++:1069B0003C09800000A980241480FFE9310600FF13
++:1069C0002CCA00095140FFEB262300010006688015
++:1069D0003C0E080025CE579801AE60218D8B00003B
++:1069E0000160000800000000022010218FBF001C81
++:1069F0008FB200188FB100148FB0001003E00008B0
++:106A000027BD00200E0006D1240400841600FFD804
++:106A10008F87001C0A00078BAF80003C90EF0002BC
++:106A200000002021240600090E0006D1000F2E00D0
++:106A30008F87001C0010102B0A00078BAF82003CD0
++:106A4000020028210E0006DF240400018F87001CAD
++:106A50000A00078BAF82003C020028210E0006DFEF
++:106A6000000020210A0007C38F87001C0E00071FAB
++:106A7000020020210A0007C38F87001C30B0FFFFEF
++:106A8000001019C08F5801B80700FFFE3C1F2004FA
++:106A90003C191000AF430180AF400184AF5F018813
++:106AA000AF5901B80A00078C262300013082FFFF8E
++:106AB00014400003000018210004240224030010E5
++:106AC000308500FF14A000053087000F2466000801
++:106AD0000004220230C300FF3087000F14E00005DD
++:106AE000308900032468000400042102310300FF00
++:106AF0003089000315200005388B0001246A00024C
++:106B000000042082314300FF388B00013164000112
++:106B100010800002246C0001318300FF03E00008B4
++:106B200000601021308BFFFF000B394230E600FF80
++:106B30003C09080025295998000640800109602178
++:106B40008D8700003164001F240A0001008A1804A8
++:106B500030A500FF00E3202514A000020003102749
++:106B600000E22024240F000100CF700401096821F5
++:106B7000000E282714800005ADA400008F86000CAD
++:106B800000A6102403E00008AF82000C8F88000CE0
++:106B900001C8102503E00008AF82000C3C06001F6E
++:106BA0003C0360003084FFFF34C5FF8024020020D6
++:106BB000AC602008AC60200CAC602010AC652014E8
++:106BC000AC642018AC62200000000000000000004F
++:106BD00003E000080000000027BDFFE82402FFFFDB
++:106BE000AFBF0010AF82000C000020213C0608005F
++:106BF00024C659982405FFFF248900010004408021
++:106C00003124FFFF010618212C87002014E0FFFA31
++:106C1000AC6500000E0008160000202124020001CF
++:106C20003C04600024050020AC822018AC852000C4
++:106C3000000000000000000000000000244A0001E5
++:106C40003142FFFF2C46040014C0FFF78FBF001035
++:106C500003E0000827BD00188F8300082C620400A1
++:106C600003E00008384200018F830008246200011D
++:106C700003E00008AF8200088F8300082462FFFF52
++:106C800003E00008AF82000827BDFFE0AFB10014A9
++:106C9000AFBF0018AFB000108F6B00303C06600033
++:106CA00000808821ACCB20088F6A002C3C02800039
++:106CB00024030008ACCA200C9769003A9768003892
++:106CC00000092C003107FFFF00A72025ACC42010CD
++:106CD000ACC22014ACC32000000000000000000083
++:106CE000000000003C0360008C6D200031AC000807
++:106CF0001580FFF9000000008C6E201405C00020F4
++:106D0000000000000E0007DA8F84000C00024080B3
++:106D10003C09080025295998010938218CE4000014
++:106D20000E0007DA00028140020220213090FFFFAE
++:106D3000020020210E0007F8000028213C0C8000F2
++:106D4000022C58253210FFFF3C116000240A00205D
++:106D5000AE2B2014AE302018AE2A20000000000018
++:106D60000000000000000000020010218FBF00188A
++:106D70008FB100148FB0001003E0000827BD002081
++:106D80008C6620143C02001F3443FF803C1FFFE848
++:106D900000C3C02437F9080003198021001079C20C
++:106DA0003C0C8000022C582531F0FFFF3C116000A4
++:106DB000240A0020AE2B2014AE302018AE2A20006A
++:106DC0000000000000000000000000000200102190
++:106DD0008FBF00188FB100148FB0001003E00008BF
++:106DE00027BD002027BDFFE8AFB000103402FFFF31
++:106DF0003090FFFFAFBF00141202000602002021F6
++:106E00000E00081600000000020020210E0007F806
++:106E1000240500018F8400088FBF00148FB000107C
++:106E20002483FFFF27BD001803E00008AF8300089C
++:106E3000000439C230E6003F00043B42000718401E
++:106E4000240210002CC4002024C8FFE0AF42002C14
++:106E5000246300011480000330A900FF00071840DC
++:106E6000310600FF0003608024080001019A5821C8
++:106E70003C0A000E00C82804016A382111200005D0
++:106E8000000530278CE900000125302503E00008CB
++:106E9000ACE600008CEE000001C6682403E00008A8
++:106EA000ACED000027BDFFE8AFBF0014AFB000108D
++:106EB0003C0460008C8508083403F00030A2F00028
++:106EC00050430006240200018C8708083404E000C7
++:106ED00030E6F00010C4001E24020002AF82004021
++:106EE0003C1060003C0A0200AE0A0814240910009D
++:106EF0003C08000E8E03440003482021AF49002CBB
++:106F0000240501200E000CC0000030218F830040BA
++:106F1000106000043C021691240B0001106B000E5F
++:106F20003C023D2C344F0090AE0F44088FBF00143C
++:106F30008FB000103C0C6000240E10003C0D0200CD
++:106F400027BD0018AD8E442003E00008AD8D081069
++:106F50000A0008E7AF8000403C0218DA344F009086
++:106F6000AE0F44088FBF00148FB000103C0C6000BF
++:106F7000240E10003C0D020027BD0018AD8E4420E9
++:106F800003E00008AD8D08100A0008BB24050001CD
++:106F90000A0008BB000028213C08080025085DA461
++:106FA0002404FFFF010018212402001E2442FFFFD9
++:106FB000AC6400000441FFFD246300043C070800AA
++:106FC00024E75E208CE5FFFC2404001C240600015D
++:106FD000308A001F0146480424840001000910275C
++:106FE0002C8300201460FFFA00A22824ACE5FFFCEB
++:106FF0003C05666634A4616E3C06080024C65EE06B
++:10700000AF840058AF88009C2404FFFF00C0182103
++:107010002402001F2442FFFFAC6400000441FFFD76
++:10702000246300043C0766663C05080024A55EA0B6
++:10703000AF86004834E6616EAF8600982404FFFFF7
++:1070400000A018212402000F2442FFFFAC640000BE
++:107050000441FFFD246300043C0B66663C06080007
++:1070600024C65E203568616EAF8500A4AF880070CD
++:107070002404FFFF00C018212402001F2442FFFF48
++:10708000AC6400000441FFFD246300043C0D66660F
++:107090003C0A0800254A5F6035AC616EAF860090FF
++:1070A000AF8C005C2404FFFF014018212402000380
++:1070B0002442FFFFAC6400000441FFFD2463000490
++:1070C0003C09080025295F708D27FFFC2404000679
++:1070D000240500013099001F0325C0042484000109
++:1070E000001878272C8E002015C0FFFA00EF3824F6
++:1070F000AD27FFFC3C09666624030400240403DC7E
++:1071000024050200240600663522616E3C08080052
++:1071100025085AA4AF820074AF830044AF83006C8B
++:10712000AF830050AF830084AF8A008CAF840064CB
++:10713000AF85004CAF860054AF840078AF85006007
++:10714000AF86008001001821240200022442FFFFC4
++:10715000AC6000000441FFFD24630004240400032C
++:107160002403000C3C0A0800254A5AB0AF8A006884
++:107170000A00098E2405FFFF000418802484000102
++:10718000006858212C8700C014E0FFFBAD650000AB
++:107190003C0E666635CD616E240C17A024081800DD
++:1071A000AF8D0088AF8C009403E00008AF88007CAE
++:1071B0002484007F000421C200004021000030210F
++:1071C00000003821000028210A0009A5AF8400A092
++:1071D0001060000624E7000100C4302124A500014E
++:1071E0002CC20BF51440FFFA2CA300663C090800E2
++:1071F00025295F6001201821240200032442FFFF9B
++:10720000AC6000000441FFFD2463000410E0001A9C
++:1072100024E3FFFF0003294210A0000A0000202100
++:107220002406FFFF3C03080024635F602484000100
++:107230000085502BAC660000250800011540FFFBBF
++:107240002463000430E2001F10400008000868803A
++:10725000240C0001004C38040008588001692821E2
++:1072600024E6FFFF03E00008ACA6000001A94021CE
++:107270002409FFFFAD09000003E000080000000042
++:10728000AF4400283C04000C034420210005288260
++:107290000A000CC000003021000421803C03600083
++:1072A000AC6410080000000000052980AC65100CDB
++:1072B0000000000003E000088C62100C27BDFFE80E
++:1072C0000080282124040038AFBF00140E0009D527
++:1072D000AFB0001024040E00AF4400283C10000C96
++:1072E00003502021240500100E000CC000003021A6
++:1072F00003501021AC400000AC40000424040038CE
++:107300008FBF00148FB0001024053FFF27BD001869
++:107310000A0009D58C430000000421803C03600072
++:10732000AC641008000000008C62100C03E0000840
++:107330000002118227BDFFC8AFB400208F940068FF
++:10734000AFBE0030AFB7002CAFB600280000B821A8
++:107350000080B021241E00C0AFBF0034AFB50024B0
++:10736000AFB3001CAFB20018AFB10014AFB0001043
++:107370000A000A12AFA5003C504000018F9400683B
++:1073800027DEFFFF13C00028269400048E92000021
++:107390003C03080024635DA01240FFF70283102B1A
++:1073A0003C04080024845AA4028410230002A8C0CC
++:1073B000000098210A000A212411000100118840D0
++:1073C000122000260000000002B380210251282470
++:1073D0000200202110A0FFF9267300010E0009DE33
++:1073E000000000000016684032EC000101AC2021D2
++:1073F0000E0009D5020028218F89009426F700018C
++:107400008FA6003C3AEB0001316A00012528FFFFFE
++:107410000011382702CAB021AF88009416E6FFE7B2
++:1074200002479024AE92000002E010218FBF00348A
++:107430008FBE00308FB7002C8FB600288FB5002488
++:107440008FB400208FB3001C8FB200188FB10014CE
++:107450008FB0001003E0000827BD00383C0E080084
++:1074600025CE5DA0028E102B0A000A0DAE92000000
++:1074700027BDFFD8AFB10014AFB00010AFBF0020E0
++:10748000AFB3001CAFB2001800A0882110A0001FED
++:10749000000480403C13080026735AA40A000A5ACC
++:1074A0002412000112200019261000010E0009F517
++:1074B00002002021000231422444FFA0000618806F
++:1074C0003045001F2C8217A1007318212631FFFFC1
++:1074D0001040FFF400B230048C690000020020214B
++:1074E00024053FFF012640241500FFEE0126382524
++:1074F0000E0009D5AC6700008F8A009426100001A9
++:10750000254700011620FFE9AF8700948FBF0020B8
++:107510008FB3001C8FB200188FB100148FB0001011
++:1075200003E0000827BD00288F85009C00805821BB
++:107530000000402100004821240A001F3C0C0800E4
++:10754000258C5E1C3C0D080025AD5DA48CA60000BA
++:1075500050C000140000402100AD1023000238C0CC
++:10756000240300010A000A930000202115000003F3
++:1075700000E410212448202400004821252900018E
++:10758000512B00132506DFDC106000062484000167
++:1075900000C3702415C0FFF5000318400A000A91CB
++:1075A0000000402110AC002624A300040060282124
++:1075B000254AFFFF1540FFE5AF85009C512B0004D5
++:1075C0002506DFDC0000402103E000080100102157
++:1075D0000006614230C5001F000C50803C070800C7
++:1075E00024E75DA424040001014730211120000F8D
++:1075F00000A420043C05080024A55E20148000059A
++:107600002529FFFF24C6000410C50011000000005A
++:10761000240400018CCF00000004C0270004204097
++:1076200001F868241520FFF5ACCD00008F99007893
++:1076300001001021032B482303E00008AF890078E4
++:107640003C05080024A55DA40A000A9B0000402117
++:107650003C06080024C65DA40A000AB42404000104
++:10766000308800FF240200021102000A24030003F4
++:107670001103005C8F8900A4240400041104005F3E
++:1076800024050005110500670000182103E000082B
++:10769000006010218F8900483C0C0800258C5EE0BA
++:1076A0003C04080024845F60240300201060000F65
++:1076B00000005821240D0002240E00033C0F080096
++:1076C00025EF5EE08D27000014E0000B30F9FFFF8E
++:1076D000252900040124C02B53000001018048210A
++:1076E0002463FFFF5460FFF88D270000016018211C
++:1076F00003E0000800601021132000323C0500FF69
++:1077000030E200FF004030211040004200005021D4
++:1077100024050001000020210005C84000A6C02467
++:1077200017000003332500FF14A0FFFB2484000191
++:10773000012CC023001828C000AA6021008C502111
++:107740003144001F240C0001008C18040003102792
++:1077500000E23024110D0041AD260000110E004C56
++:10776000000A1840110D00368F87006C510E00562C
++:107770008F8C0060240D0004110D005A8F8E008440
++:10778000240E0005150EFFDA01601821240B1430B9
++:1077900011400006000018218F8400A0246300011E
++:1077A000006A402B1500FFFD016458218F8A00807C
++:1077B000AF89008C016018212549FFFF0A000AEB00
++:1077C000AF89008000E52024000736021080FFD03A
++:1077D000240A001800075402314600FF0A000AF389
++:1077E000240A00103C0C0800258C5EA03C04080014
++:1077F00024845EE00A000ADA240300103C0C08002E
++:10780000258C5E203C04080024845EA00A000AD96E
++:107810008F89009000071A02306600FF0A000AF301
++:10782000240A00088F89008C3C0C0800258C5F60BE
++:107830003C04080024845F700A000ADA2403000470
++:10784000000A4080250B003024E6FFFF016018216C
++:10785000AF8900480A000AEBAF86006C000AC982B3
++:10786000001978803C07080024E75EA001E720218A
++:10787000000A18428C8F00003079001F032C380456
++:107880000007C02701F860240A000B08AC8C000038
++:10789000000331420006288000AF28213062001F1B
++:1078A0008CB8000024630001004CC804000321428E
++:1078B000001938270004108003073024004F2021CE
++:1078C0000A000B4CACA60000000A68C025AB0032D1
++:1078D000258AFFFF01601821AF8900A40A000AEB86
++:1078E000AF8A0060254B1030AF89009001601821ED
++:1078F00025C9FFFF0A000AEBAF8900843086000724
++:107900002CC2000610400014000000000006408059
++:107910003C030800246357BC010338218CE40000B9
++:1079200000800008000000002409000310A9000ED8
++:1079300000000000240A000510AA000B000000004F
++:10794000240B000110AB0008000000008F8C00A089
++:1079500010AC00050000000003E00008000010214A
++:107960000A000A7900A020210A000AC700C02021CD
++:1079700027BDFFE8308400FF240300021083000BC2
++:10798000AFBF0010240600031086003A240800044C
++:1079900010880068240E0005108E007F2CAF143074
++:1079A0008FBF001003E0000827BD00182CA2003094
++:1079B0001440FFFC8FBF001024A5FFD0000531C28A
++:1079C000000668803C07080024E75EE001A730213C
++:1079D0008CC900000005288230AC001F240B000178
++:1079E000018B50048F840048012A4025ACC8000058
++:1079F0008C83000050600001AF8600488F98006CB7
++:107A000030AE000124A6FFFF270F000115C00002C1
++:107A1000AF8F006C24A600010006414200082080C0
++:107A2000008718218C79000030C2001F2406000155
++:107A30000046F804033F382410E0FFDA8FBF00103F
++:107A40000005C182001870803C0F080025EF5EA081
++:107A500001CF48218D2B00000005684231A5001F91
++:107A600000A66004016C502527BD001803E0000843
++:107A7000AD2A00002CA7003014E0FFCA8FBF001011
++:107A800030B900071723FFC724A8FFCE00086A02F9
++:107A9000000D60803C0B0800256B5EA0018B30213F
++:107AA0008CC40000000828C230AA001F240800016E
++:107AB000014848048F8200A400891825ACC3000047
++:107AC0008C5F000053E00001AF8600A40005704009
++:107AD000000E7942000F28803C04080024845EE0F8
++:107AE00000A418218C6B000025DF000131CD001FA0
++:107AF000001F514201A86004016C4825000A108053
++:107B0000AC690000004428218CA600008F9800601A
++:107B100033F9001F8FBF00100328380400C77825F1
++:107B2000270E000127BD0018ACAF000003E00008DD
++:107B3000AF8E006024A5EFD02CB804001300FF998D
++:107B40008FBF001000053142000658803C0A080033
++:107B5000254A5E20016A30218CC4000030A3001F3A
++:107B600024090001006910048F9900900082F82513
++:107B7000ACDF00008F27000050E00001AF860090CE
++:107B80008F8D00848FBF001027BD001825AC000129
++:107B900003E00008AF8C008415E0FF828FBF001067
++:107BA0008F8600A0000610400046F821001F21002B
++:107BB00003E4C8210019384024F8143000B8402BE1
++:107BC0001100FF788FBF001024A4EBD00E00021329
++:107BD00000C0282100027942000F70803C0D08008F
++:107BE00025AD5F6001CD20218C8B0000304C001F43
++:107BF00024060001018618048F89008C016350253A
++:107C0000AC8A00008D25000050A00001AF84008CDC
++:107C10008F9800808FBF001027BD00182708000133
++:107C200003E00008AF88008030A5000724030003AC
++:107C300010A3001028A2000414400008240700022A
++:107C40002403000410A300152408000510A8000F49
++:107C50008F8500A003E000080000000014A7FFFDCE
++:107C60000080282114C3FFFB240400020A000B8BB0
++:107C700000000000240900050080282110C9FFFB36
++:107C80002404000303E000080000000014C5FFF115
++:107C9000008028210A000B8B24040005240A00011F
++:107CA0000080282110CAFFF12404000403E000082A
++:107CB0000000000027BDFFE0AFB00010000581C24A
++:107CC0002603FFD024C5003F2C6223D024C6007FAA
++:107CD000AFB20018AFB10014AFBF001C309100FF6D
++:107CE000000691C2000529820200202110400008F0
++:107CF0002403FFFF0E000A4B0000000002002021B9
++:107D0000022028210E000C390240302100001821E9
++:107D10008FBF001C8FB200188FB100148FB00010FD
++:107D20000060102103E0000827BD002027BDFFD818
++:107D300024A2007FAFB3001CAFB20018000299C2AA
++:107D4000309200FF24A3003F02402021026028213E
++:107D5000AFB10014AFB00010AFBF00200E000B6E2B
++:107D60000003898200408021004020210220282138
++:107D700014400009000018218FBF00208FB3001CA1
++:107D80008FB200188FB100148FB000100060102166
++:107D900003E0000827BD00280E0009FC00000000D9
++:107DA00000402821020020211051FFF3001019C0CB
++:107DB0000E000A4B00000000020020210240282192
++:107DC0000E000C39026030218FBF00208FB3001CE1
++:107DD0008FB200188FB100148FB00010000018216E
++:107DE0000060102103E0000827BD00283084FFFF59
++:107DF00030A5FFFF1080000700001821308200012D
++:107E00001040000200042042006518211480FFFB8E
++:107E10000005284003E000080060102110C00007A2
++:107E2000000000008CA2000024C6FFFF24A500046F
++:107E3000AC82000014C0FFFB2484000403E00008AF
++:107E40000000000010A0000824A3FFFFAC86000083
++:107E500000000000000000002402FFFF2463FFFF79
++:107E60001462FFFA2484000403E00008000000000C
++:107E700030A5FFFF8F4201B80440FFFE3C076015AC
++:107E800000A730253C031000AF440180AF400184BF
++:107E9000AF46018803E00008AF4301B88F8500D0EA
++:107EA0002C864000008018218CA700840087102BAE
++:107EB00014400010000000008CA800842D06400033
++:107EC00050C0000F240340008CAA0084008A482B75
++:107ED000512000018CA3008400035A42000B208033
++:107EE0003C05080024A558200085182103E000085F
++:107EF0008C62000014C0FFF4000000002403400066
++:107F000000035A42000B20803C05080024A558209D
++:107F10000085182103E000088C6200008F8300D0E8
++:107F2000906600D024C50001A06500D08F8500D0E8
++:107F3000906400D090A200D210440017000000000E
++:107F4000936C00788F8B00BC318A00FFA16A000C13
++:107F500025490001938700C4312200FF3048007F8B
++:107F60001107000B00026827A36200788F4E01788A
++:107F700005C0FFFE8F9900B0241800023C0F1000CE
++:107F8000AF590140A358014403E00008AF4F017806
++:107F90000A000D0931A20080A0A000D00A000CFF49
++:107FA000000000008F8700D027BDFFC8AFBF0030A2
++:107FB000AFB7002CAFB60028AFB50024AFB4002097
++:107FC000AFB3001CAFB20018AFB10014AFB00010D7
++:107FD00094E300E094E200E2104300D72405FFFFA1
++:107FE0003C047FFF3497FFFF2415FF800A000DF04B
++:107FF0003C16000E108A00D18FBF00308F9100B068
++:108000003C1808008F18005C001230C0001291402C
++:108010000311702101D57824AF4F002C94EC00E2BD
++:1080200031CD007F01BA5821318A7FFF0176482186
++:10803000000A804002091021945300003C08080007
++:108040008D0800580246C02132733FFF001319808B
++:10805000010320210224282130BF007F03FAC82118
++:1080600000B5A024AF54002C0336A0218E87001049
++:108070008E8F003003785821256D008800EF702323
++:10808000240C0002AE8E0010AF8D00ACA16C0088F5
++:10809000976A003C8E8400308F9100AC0E000CD6A5
++:1080A0003150FFFF00024B80020940253C02420094
++:1080B00001022025AE2400048E8300048F8D00ACC5
++:1080C0008E860000240E0008ADA3001CADA600188B
++:1080D000ADA0000CADA00010929F000A33F900FF84
++:1080E000A5B90014968500083C1F000CA5A5001634
++:1080F0009298000A331100FFA5B100209690000865
++:1081000024180005A5B00022ADA00024928F000B1A
++:108110002410C00031E700FFA5A70002A1AE0001B6
++:108120008E8C00308F8B00AC8F8400B0AD6C00085B
++:108130003C0A08008D4A005401444821013540247E
++:10814000AF4800283C0208008C4200540044302113
++:1081500030C3007F007AC821033F282102458821CF
++:10816000AF9100BCAF8500C0A23800008F8A00BC70
++:108170002403FFBF2418FFDF954F000201F03824CD
++:1081800000F37025A54E0002914D000231AC003F76
++:10819000358B0040A14B00028F8600BC8F8900D038
++:1081A000ACC000048D28007C3C098000ACC80008ED
++:1081B00090C4000D3082007FA0C2000D8F8500BCEE
++:1081C00090BF000D03E3C824A0B9000D8F9100BC3F
++:1081D0009233000D02789024A232000D8E9000346C
++:1081E0008F8B00BCAD7000108E87002C8E8F0030FE
++:1081F00000EF7023AD6E0014916D001831AC007F5C
++:10820000A16C00188F9F00BC8E8A00308FE8001888
++:10821000015720240109302400C41025AFE20018C2
++:108220009283000AA3E3001C969900088F8500BC86
++:108230008F9800D0A4B9001E8E9000308E8400303C
++:108240000E0002138F0500848F8500D0000291403C
++:108250000002990090AF00BC0253882100403021F9
++:1082600031E7000210E0000302118021000290803B
++:108270000212802190B900BC3327000410E00002F4
++:108280000006F880021F80218E9800308F8B00BC82
++:1082900024068000330F0003000F702331CD00034C
++:1082A000020D6021AD6C000494A400E294AA00E2E7
++:1082B00094B000E231497FFF2522000130537FFF57
++:1082C0000206182400734025A4A800E294A400E24A
++:1082D0003C1408008E94006030917FFF123400221D
++:1082E000000000000E000CF6000000008F8700D098
++:1082F0000000282194F300E094F000E21213000F34
++:108300008FBF003090E900D090E800D1313200FFFB
++:10831000310400FF0244302B14C0FF36264A00010E
++:1083200090EE00D2264B000131CD00FF008D602180
++:10833000158BFF338F9100B08FBF00308FB7002CAB
++:108340008FB600288FB500248FB400208FB3001C97
++:108350008FB200188FB100148FB0001000A0102150
++:1083600003E0000827BD003894A300E20066402423
++:10837000A4A800E290A400E290B900E2309100FFCE
++:108380000011A1C20014F827001F39C03332007F4A
++:10839000024730250A000DE8A0A600E23084FFFF66
++:1083A00030A5FFFFAF440018AF45001C03E00008F4
++:1083B0008F42001427BDFFB8AFB000208F9000D0CF
++:1083C0003084FFFFAFA40010AFBF0044AFBE004039
++:1083D000AFB7003CAFB60038AFB50034AFB4003033
++:1083E000AFB3002CAFB20028AFB10024A7A0001893
++:1083F000920600D1920500D030C400FF30A300FFE8
++:108400000064102B10400122AFA00014920900D08C
++:108410008FB50010312800FF0088382324F4FFFFB7
++:108420000014882B0015982B02339024524001260B
++:108430008FB40014961E0012961F00108FB7001004
++:1084400003DFC823001714000019C400000224032E
++:108450000018140302E2B02A52C00001004020219B
++:108460000284282B10A0000200801821028018210D
++:1084700000033C0000071C033064FFFF2C8600094A
++:1084800014C000020060B821241700088E0A0008FA
++:10849000001769808E09000C31ABFFFF3C0C001007
++:1084A000016C402527520400AF4A0038AF9200B853
++:1084B000AF49003CAF480030000000000000000061
++:1084C00000000000000000000000000000000000AC
++:1084D00000000000000000008F4F000031EE00207F
++:1084E00011C0FFFD0017982A027110240A000E83A4
++:1084F0000000B02155E001019258000131130080C5
++:10850000126001CF012020219655001232A5FFFFF5
++:108510000E000CCBA7B500188F9000D00291A023BD
++:1085200026CD00018F9100B8000DB4000016B403F1
++:108530002638004002D7582A0014882B2405000151
++:108540000300902101711024AF9800B8AFA500146A
++:10855000104001BC8F8900B03C0C08008D8C005489
++:10856000240BFF80921E00D001895021014B28244A
++:10857000921900D0AF4500288E4700103C08080033
++:108580008D0800583C1808008F18005430E33FFF56
++:108590000003218001043021012658212402FF809C
++:1085A0000162F824920C00D0AF5F002C92480000CA
++:1085B00033D100FF333500FF0309982100117140CA
++:1085C000001578C0326D007F01CF382101BA282113
++:1085D000318300FF3164007F3C0A000C00AA88212F
++:1085E0000367F02100033140009A10213108003F59
++:1085F0003C1F000E00D1C021005F982127D90088C0
++:108600002D150008AF9100C0AF9900ACAF9800BC29
++:10861000AF9300B412A0018A00008821240E00014B
++:10862000010E4004310D005D11A0FFB2310F0002B8
++:108630008E4A00283C0300803C04FFEFAE6A000035
++:108640008E450024A260000A3488FFFFAE65000456
++:108650009247002C3C1FFF9F37FEFFFFA267000CD4
++:108660008E62000C3C180040A267000B00433025CE
++:1086700000C8C824033E88240238A825AE75000C23
++:108680008E490004AE6000183C0F00FFAE69001474
++:108690008E4D002C35EEFFFF8F8B00B001AE6024B5
++:1086A000AE6C00108E470008A660000896450012C8
++:1086B000AE6700208E42000C30B03FFF00105180AA
++:1086C000AE6200248E5E0014014B182130A400011C
++:1086D000AE7E00288E590018000331C2000443808A
++:1086E000AE79002C8E51001C00C8F821A67F001C1A
++:1086F000AE710030965800028E550020A678001EFC
++:10870000AE75003492490033313000045600000544
++:10871000925000008F8C00D08D8B007CAE6B0030AF
++:10872000925000008F8F00BCA1F00000924E0033E9
++:1087300031CD000251A00007925E00018F8900BC7C
++:108740002418FF80913100000311A825A1350000F5
++:10875000925E00018F9900BC2409FFBF240BFFDF4C
++:10876000A33E00018F9500BC92B8000D3311007F2D
++:10877000A2B1000D8F8E00BC91D0000D02097824AB
++:10878000A1CF000D8F8800BC8E6D0014910A000DE2
++:108790002DAC0001000C2940014B382400E51825C0
++:1087A000A103000D964200128F8800BC8F8700D075
++:1087B000A50200028E45000490FF00BC30A4000317
++:1087C0000004302330DE000300BE102133F9000224
++:1087D00017200002244400342444003090E200BCFE
++:1087E00000A2302430DF000417E0000224830004DC
++:1087F000008018218F8F00AC24090002AD03000413
++:10880000A1E90000924E003F8F8D00ACA1AE0001A7
++:108810008F9500AC924C003F8E440004A6AC000241
++:10882000976B003C0E000CD63170FFFF00025380A6
++:10883000020A38253C05420000E51825AEA30004D5
++:108840008F8600AC8E480038ACC800188E440034C7
++:10885000ACC4001CACC0000CACC00010A4C0001420
++:10886000A4C00016A4C00020A4C00022ACC00024F4
++:108870008E6400145080000124040001ACC4000880
++:108880000E000CF6241100010A000E768F9000D025
++:10889000920F00D2920E00D08FB5001031EB00FF86
++:1088A00031CD00FF008D6023016C50212554FFFF66
++:1088B0000014882B0015982B023390241640FEDDFF
++:1088C000000000008FB400148FBF00448FBE004032
++:1088D0003A8200018FB7003C8FB600388FB5003464
++:1088E0008FB400308FB3002C8FB200288FB10024DA
++:1088F0008FB0002003E0000827BD0048331100209E
++:10890000122000EF24150001921E00BC241F00015C
++:108910000000A82133D900011320000DAFBF001CB7
++:108920008E4400148E0800840088102B144000022E
++:10893000008030218E0600848E03006400C3A82BC3
++:1089400016A0000200C020218E0400640080A8212F
++:108950008E4700148E05006400E5302B14C0000221
++:1089600000E020218E0400640095F02313C0000471
++:108970008FAC001C240A0002AFAA001C8FAC001CA4
++:10898000028C582B156000A8000018218E4F00386B
++:108990008E6D000C3C0E0080AE6F00008E4A0034DD
++:1089A0003C10FF9F01AE5825AE6A00049246003F7E
++:1089B000360CFFFF016C38243C0500203C03FFEF20
++:1089C000A266000B00E510253468FFFF8F8700B812
++:1089D0000048F8243C04000803E4C825AE79000CE4
++:1089E0008CF80014AE60001802BE7821AE78001436
++:1089F0008CF10018AE71001C8CE90008AE690024EF
++:108A00008CEE000CAE6F002CAE600028AE6E002025
++:108A1000A6600038A660003A8CED001401B58023F2
++:108A2000021E902312400011AE72001090EA003D29
++:108A30008E6500048E640000000A310000A6C82183
++:108A4000000010210326402B0082F82103E8C021FA
++:108A5000AE790004AE78000090F1003DA271000AEA
++:108A60008F8900B895320006A67200088F9800AC76
++:108A70002419000202A02021A31900009769003CDC
++:108A80008F9200AC0E000CD63131FFFF00027B80CC
++:108A90008F8500B8022F68253C0E420001AE80256C
++:108AA000AE5000048F8400AC8CAC0038AC8C001845
++:108AB0008CAB0034AC8B001CAC80000CAC80001084
++:108AC000A4800014A4800016A4800020A4800022AA
++:108AD000AC80002490A7003FA487000212A00135BB
++:108AE0002403000153C0000290A2003D90A2003E6A
++:108AF00024480001A08800018F9F00ACAFF500085A
++:108B00008F8300D024070034906600BC30C500027B
++:108B100050A00001240700308F9200B88F8A00BC5B
++:108B2000906D00BC924B00002412C00032A50003DF
++:108B3000A14B00008F8600B88F8800BC240200047F
++:108B400090C400010045182330790003A1040001FE
++:108B50008F8A00BC8F9F00B800F53821955800021D
++:108B600097E9001200F9382103128824312F3FFFC2
++:108B7000022F7025A54E00029150000231A800047A
++:108B8000320C003F358B0040A14B000212A00002C6
++:108B90008F8500BC00E838218F8E00D0ACA7000480
++:108BA000240BFFBF8DCD007C2EA400012403FFDF2A
++:108BB000ACAD000890B0000D00044140320C007FC5
++:108BC000A0AC000D8F8600BC90CA000D014B102494
++:108BD000A0C2000D8F8700BC90E5000D00A3F82413
++:108BE00003E8C825A0F9000D8F9100B88F8D00BC57
++:108BF0008E380020ADB800108E290024ADA90014D5
++:108C00008E2F0028ADAF00188E2E002C0E000CF613
++:108C1000ADAE001C8FB0001C240C0002120C00EE44
++:108C20008F9000D08FA3001C006088211460000288
++:108C30000060A8210000A02156A0FE390291A023C7
++:108C40000014882B8FA90010960700103C1E0020EE
++:108C50000136402302C750213112FFFFA60A00103F
++:108C6000AFB20010AF5E0030000000009617001099
++:108C7000961300121277008F000000008E05000C82
++:108C80008E0B00080016698000AD7021000DC7C36F
++:108C900001CDA82B0178782101F56021AE0E000CE2
++:108CA000AE0C00088FB300100013B82B02378024DD
++:108CB0001200FF048F9000D00A000E3C000000005C
++:108CC0008E4D0038A6600008240B0003AE6D000036
++:108CD0008E500034A260000A8F9800B8AE70000475
++:108CE0003C0500809311003FA26B000C8E6F000CBE
++:108CF0003C0EFF9FA271000B01E5102535CCFFFF54
++:108D00003C03FFEF8F9200B8004C30243464FFFF27
++:108D100000C4F824AE7F000C8E590014964800124F
++:108D20008F8A00B0AE7900108E490014AE60001832
++:108D3000AE600020AE690014AE6000248E470018BB
++:108D400031093FFF0009F180AE6700288E4D000811
++:108D500003CA802131180001AE6D00308E4F000C27
++:108D60008F8C00AC001089C200185B80022B282178
++:108D7000240E0002A665001CA6600036AE6F002C13
++:108D8000A18E00009763003C8F8A00AC3C04420037
++:108D90003062FFFF00443025AD4600048F9F00B8CD
++:108DA000240700012411C0008FF30038240600348A
++:108DB000AD5300188FF90034AD59001CAD40000CC4
++:108DC000AD400010A5400014A5400016A5400020AD
++:108DD000A5400022AD400024A5550002A147000196
++:108DE0008F9E00AC8F8800B88F9200BCAFD5000872
++:108DF000910D0000A24D00008F9000B88F8B00BC39
++:108E000092180001A17800018F8400BC94850002B3
++:108E100000B1782401E97025A48E0002908C000234
++:108E20003183003FA08300028F8300D08F8400BC79
++:108E3000906200BC305300025260000124060030F2
++:108E4000AC8600048C6F007C2403FFBF02A0882145
++:108E5000AC8F0008908E000D31CC007FA08C000DEF
++:108E60008F8600BC90C2000D00432024A0C4000DDA
++:108E70008F8900BC913F000D37F90020A139000D0A
++:108E80008F8800B88F9300BC8D070020AE6700105C
++:108E90008D0A0024AE6A00148D1E0028AE7E0018D4
++:108EA0008D12002C0E000CF6AE72001C0A00103D54
++:108EB0008F9000D0960E00148E03000431CCFFFF7B
++:108EC000000C10C000622021AF44003C8E1F000443
++:108ED0008F46003C03E6C8231B20003C0000000036
++:108EE0008E0F000025E200013C05001034B500089B
++:108EF000AF420038AF550030000000000000000015
++:108F00000000000000000000000000000000000061
++:108F100000000000000000008F580000330B00200C
++:108F20001160FFFD000000008F5304003C0D002085
++:108F3000AE1300088F570404AE17000CAF4D00307D
++:108F4000000000003C0608008CC600442416000106
++:108F500010D600BD00000000961F00123C0508005E
++:108F60008CA5004000BFC821A61900129609001464
++:108F700025270001A6070014960A00143144FFFFBC
++:108F80005486FF498FB30010A60000140E000E1681
++:108F900030A5FFFF3C0408008C84002496030012D7
++:108FA0000044102300623023A60600120A00105964
++:108FB0008FB30010A08300018F8200AC2404000155
++:108FC000AC4400080A000FF08F8300D08E0200002E
++:108FD0000A0010EA3C0500108F8200C08FA7001C19
++:108FE000921800D0920B00D0920E00D0331100FFE7
++:108FF000316900FF00117940000928C001E56021B6
++:1090000031C300FF036C50210003314000C2C8216E
++:10901000255F0088AF9F00ACAF9900BCA1470088D6
++:109020009768003C03C020218F9100AC0E000CD645
++:109030003110FFFF00026B80020DC0253C0442008E
++:109040008F8D00B803045825AE2B00048DA900387D
++:109050008F8B00AC0000882100118100AD690018E1
++:109060008DAF00343C087FFF3504FFFFAD6F001C5F
++:1090700091AC003E8D65001C8D660018000C190037
++:10908000000C770200A33821020E102500E3F82B14
++:1090900000C2C821033F5021AD67001CAD6A001813
++:1090A000AD60000CAD60001091B8003E24050005D5
++:1090B00003C45024A578001495A9000403C02021FE
++:1090C000A569001691AF003EA56F002095B1000480
++:1090D000A5710022AD60002491AE003FA56E000294
++:1090E00091B0003E91AC003D01901023244300015B
++:1090F000A16300018F8600AC8F9F00BCACDE00082E
++:10910000A3E500008F9000BC8F9900B82405FFBF35
++:1091100096070002973800120247782433093FFF70
++:1091200001E98825A6110002921200022418FFDF2F
++:10913000324E003F35CD0040A20D00028F8600BCAC
++:109140008F8C00D02412FFFFACC000048D8B007CFC
++:109150003C0C8000ACCB000890C2000D3043007F77
++:10916000A0C3000D8F8700BC90FF000D03E5C8244D
++:10917000A0F9000D8F9100BC9229000D01387824D0
++:10918000A22F000D8F9000BCAE120010AE1500147F
++:10919000920E00182415FF8002AE6825A20D00185B
++:1091A0008F8500BC8F8300B88CAB0018016C102435
++:1091B000004A3025ACA600189068003EA0A8001C0C
++:1091C0008F9F00B88F8700BC8F9800D097F900045C
++:1091D000A4F9001E0E0002138F0500848F8600D0B4
++:1091E000000279400002490090D200BC01E98821C8
++:1091F000004028213255000212A0000303D1202193
++:109200000002A8800095202190CD00BC31B200045E
++:109210001240000333DF0003000540800088202156
++:10922000240600048F9E00BC00DFC8233327000300
++:1092300000875021AFCA00040E000CF6A665003866
++:109240000A0010388F9000D0961E00123C080800CB
++:109250008D080024011E9021A61200120A00105948
++:109260008FB3001027BDFFE03C1808008F18005096
++:10927000AFB00010AFBF0018AFB10014AF8400B0A2
++:1092800093710074030478212410FF8031EE007F75
++:109290003225007F01F0582401DA68213C0C000AD5
++:1092A000A38500C401AC2821AF4B002494A9001071
++:1092B0009768000690A600620080382124020030E2
++:1092C0000109202330C300F0AF8500D010620019DF
++:1092D0003090FFFF90AE0062240DFFF0240A005092
++:1092E00001AE6024318B00FF116A002F00000000E6
++:1092F00016000007241F0C00AF5F00248FB100147C
++:109300008FBF00188FB0001003E0000827BD0020B9
++:109310000E000E1C02002021241F0C00AF5F002451
++:109320008FB100148FBF00188FB0001003E0000849
++:1093300027BD002094A200E094A400E290BF011396
++:10934000008218263079FFFF33E700C014E00009DF
++:109350002F31000116000038000000005620FFE603
++:10936000241F0C000E000D18000000000A0011ED73
++:10937000241F0C001620FFDE000000000E000D1858
++:10938000000000001440FFDC241F0C001600002227
++:109390008F8300D0906901133122003FA062011336
++:1093A0000A0011ED241F0C0094AF00D48F8600D466
++:1093B00000E02821240400050E000C5C31F0FFFFC2
++:1093C0001440000524030003979100E600001821D3
++:1093D0002625FFFFA78500E68F5801B80700FFFE8E
++:1093E0003C196013AF400180241F0C00AF50018472
++:1093F000007938253C101000AF4701888FB1001468
++:10940000AF5001B8AF5F00248FB000108FBF0018BD
++:1094100003E0000827BD00200E000E1C02002021E2
++:109420005040FFB5241F0C008F8300D090690113BA
++:109430000A0012163122003F0E000E1C02002021ED
++:109440001440FFAD241F0C00122000078F8300D0B2
++:10945000906801133106003F34C20040A06201133E
++:109460000A0011ED241F0C000E000D180000000072
++:109470005040FFA1241F0C008F8300D0906801137F
++:109480003106003F0A00124634C20040AF9B00C8BC
++:1094900003E00008AF8000EC3089FFFF0009404284
++:1094A0002D020041000921801440000200095040B3
++:1094B00024080040000830C0000811400046582130
++:1094C000256701A800E2C821272F007F2418FF800C
++:1094D00001F818240064302100CA702125CC00FF57
++:1094E000240DFF00018D202425650088240A0088B2
++:1094F0003C010800AC2A004C3C010800AC2500509F
++:10950000AF8400D43C010800AC2900603C01080095
++:10951000AC2800643C010800AC2700543C01080062
++:10952000AC2300583C010800AC26005C03E00008B6
++:1095300000000000308300FF30C6FFFF30E400FF72
++:109540008F4201B80440FFFE00034C00012438257F
++:109550003C08600000E820253C031000AF45018076
++:10956000AF460184AF44018803E00008AF4301B86F
++:109570008F86001C3C096012352700108CCB00043C
++:109580003C0C600E35850010316A00062D48000144
++:10959000ACE800C48CC40004ACA431808CC20008C8
++:1095A00094C30002ACA2318403E00008A78300E466
++:1095B0003C0308008C6300508F8400E88F86001CF9
++:1095C0002402FF800064C0210302C824AF59002890
++:1095D0008CCD00043305007F00BA78213C0E000CCE
++:1095E00001EE2821ACAD00588CC80008AF8500D032
++:1095F0003C076012ACA8005C8CCC001034E8001072
++:10960000ACAC000C8CCB000CACAB000894AA0014E2
++:109610003C0208008C42004425490001A4A9001422
++:1096200094A400143083FFFF106200178F8400D0D1
++:109630003C0A08008D4A0040A4AA00128CCE0018F3
++:10964000AC8E00248CCD0014AC8D00208CC700188B
++:10965000AC87002C8CCC001424060001AC8C0028B4
++:109660008D0B00BC5166001A8D0200B48D0200B84B
++:10967000A482003A948F003AA48F003C948800D4CE
++:1096800003E000083102FFFF3C0908008D29002497
++:10969000A4A000148F8400D0A4A900128CCE0018BE
++:1096A000AC8E00248CCD0014AC8D00208CC700182B
++:1096B000AC87002C8CCC001424060001AC8C002854
++:1096C0008D0B00BC5566FFEA8D0200B88D0200B418
++:1096D000A482003A948F003AA48F003C948800D46E
++:1096E00003E000083102FFFF8F86001C3C0C0800DD
++:1096F0008D8C0050240BFF808CCD00083C03000CA7
++:10970000000D51C0018A4021010B4824AF8A00E8B6
++:10971000AF49002890C700073105007F00BA10212B
++:109720000043282130E4000410800039AF8500D0C8
++:1097300090CF000731EE000811C000380000000093
++:109740008CD9000C8CC400140324C02B13000030EF
++:10975000000000008CC2000CACA200648CCD00188C
++:109760002402FFF8ACAD00688CCC0010ACAC0080DB
++:109770008CCB000CACAB00848CCA001CACAA007C67
++:1097800090A900BC01224024A0A800BC90C30007FF
++:109790003067000810E000048F8500D090AF00BC57
++:1097A00035EE0001A0AE00BC90D9000733380001AF
++:1097B000130000088F8300D08F8700D0240400346A
++:1097C00090E800BC35030002A0E300BC8F8300D00A
++:1097D000AC6400C090C900073126000210C000052B
++:1097E00000000000906A00BC35420004A06200BC8A
++:1097F0008F8300D09065011330AD003FA06D011341
++:109800008F8C00D0958B00D403E000083162FFFFFD
++:109810008CC200140A001305000000000A001306A1
++:10982000ACA0006427BDFFD8AFB000108F90001C23
++:10983000AFBF0024AFB40020AFB20018AFB1001426
++:10984000AFB3001C9613000E3C07600A3C14600680
++:109850003264FFFF369300100E00125534F40410EA
++:109860008F8400D43C11600E0E00099B363100102D
++:10987000920E00153C0708008CE700603C12601255
++:1098800031CD000FA38D00F08E0E00048E0D000868
++:1098900096080012961F00109619001A9618001EBE
++:1098A000960F001C310CFFFF33EBFFFF332AFFFF45
++:1098B0003309FFFF31E6FFFF3C010800AC2B0040FD
++:1098C0003C010800AC2C00243C010800AC2A0044F8
++:1098D000AE293178AE26317C92020015960300162F
++:1098E00036520010304400FF3065FFFF3C06080090
++:1098F0008CC60064AE243188AE4500B492080014D2
++:1099000096190018241F0001011FC004332FFFFF08
++:109910003C0508008CA50058AE5800B8AE4F00BCFE
++:10992000920C0014AF8E00D8AF8D00DC318B00FF9D
++:10993000AE4B00C0920A0015AE670048AE66004C00
++:10994000314900FFAE4900C8AE65007C3C03080009
++:109950008C6300503C0408008C84004C3C080800D8
++:109960008D0800543C0208008C42005C8FBF00242C
++:10997000AE6300808FB00010AE8300748FB3001C04
++:10998000AE22319CAE4200DCAE2731A0AE2631A41F
++:10999000AE24318CAE233190AE283194AE2531986F
++:1099A000AE870050AE860054AE8500708FB10014B3
++:1099B000AE4700E0AE4600E4AE4400CCAE4300D07B
++:1099C000AE4800D4AE4500D88FB400208FB2001846
++:1099D00003E0000827BD002827BDFFE0AFB1001459
++:1099E000AFBF0018241100010E000845AFB00010F1
++:1099F00010510005978400E6978300CC0083102B5C
++:109A0000144000088F8500D4240700028FBF00187F
++:109A10008FB100148FB0001000E0102103E00008A7
++:109A200027BD00200E000C7A24040005AF8200E858
++:109A30001040FFF6240700020E0008498F90001C1A
++:109A4000979F00E68F9900E88F8D00C827EF0001EF
++:109A5000240E0050AF590020A78F00E6A1AE0000F1
++:109A60003C0C08008D8C00648F8600C8240A80009E
++:109A7000000C5E00ACCB0074A4C0000694C9000AC0
++:109A8000241FFF803C0D000C012AC024A4D8000A2A
++:109A900090C8000A24182000011F1825A0C3000A3E
++:109AA0008F8700C8A0E000788F8500C800003821AB
++:109AB000A0A000833C0208008C4200508F8400E884
++:109AC0000044782101FFC824AF590028960B0002FA
++:109AD00031EE007F01DA6021018D3021A4CB00D46A
++:109AE000960A0002AF8600D03C0E000425492401EE
++:109AF000A4C900E68E080004ACC800048E03000868
++:109B0000ACC30000A4C00010A4C00014A0C000D0CA
++:109B10008F8500D02403FFBFA0A000D13C04080023
++:109B20008C8400648F8200D0A04400D28E1F000C71
++:109B30008F8A00D0978F00E4AD5F001C8E19001053
++:109B400024100030AD590018A5400030A551005434
++:109B5000A5510056A54F0016AD4E0068AD580080C7
++:109B6000AD580084914D006231AC000F358B001070
++:109B7000A14B00628F8600D090C900633128007F1E
++:109B8000A0C800638F8400D02406FFFF9085006387
++:109B900000A31024A08200638F9100D000E0102168
++:109BA000923F00BC37F90001A23900BC8F8A00D077
++:109BB000938F00F0AD580064AD5000C0914E00D3BB
++:109BC000000F690031CC000F018D5825A14B00D347
++:109BD0008F8500D08F8900DCACA900E88F8800D881
++:109BE0008FBF00188FB100148FB0001027BD002068
++:109BF000ACA800ECA4A600D6A4A000E0A4A000E2BB
++:109C000003E000080000000027BDFFE0AFB0001037
++:109C10008F90001CAFB10014AFBF00188E19000464
++:109C20003C1808008F180050240FFF80001989C0CD
++:109C30000238702131CD007F01CF602401BA50215C
++:109C40003C0B000CAF4C0028014B4021950900D47F
++:109C5000950400D68E0700043131FFFFAF8800D095
++:109C60000E000913000721C08E0600048F8300C870
++:109C7000000629C0AF4500209064003E30820040BD
++:109C8000144000068F8400D0341FFFFF948300D659
++:109C90003062FFFF145F000400000000948400D6CF
++:109CA0000E0008A83084FFFF8E050004022030213A
++:109CB0008FBF00188FB100148FB000102404002251
++:109CC00000003821000529C00A00127C27BD0020B1
++:109CD00027BDFFE0AFB100143091FFFFAFB000101F
++:109CE000AFBF00181220001D000080218F86001CCD
++:109CF0008CC500002403000600053F020005140285
++:109D000030E4000714830015304500FF2CA800063E
++:109D10001100004D000558803C0C0800258C57D4DC
++:109D2000016C50218D490000012000080000000056
++:109D30008F8E00EC240D000111CD005900000000B1
++:109D4000260B00013170FFFF24CA00200211202BD6
++:109D5000014030211480FFE6AF8A001C0200102170
++:109D60008FBF00188FB100148FB0001003E00008FF
++:109D700027BD0020938700CE14E00038240400148F
++:109D80000E001338000000008F86001C2402000122
++:109D90000A00147FAF8200EC8F8900EC24080002D7
++:109DA0001128003B2404001300002821000030216A
++:109DB000240700010E00127C000000000A00147F3E
++:109DC0008F86001C8F8700EC2405000214E5FFF647
++:109DD000240400120E0012E9000000008F8500E844
++:109DE00000403021240400120E00127C00003821B3
++:109DF0000A00147F8F86001C8F8300EC241F000351
++:109E0000147FFFD0260B00010E00129B0000000003
++:109E10008F8500E800403021240200022404001055
++:109E200000003821AF8200EC0E00127C0000000020
++:109E30000A00147F8F86001C8F8F00EC240600021E
++:109E400011E6000B0000000024040010000028218F
++:109E5000000030210A00149C240700010000282182
++:109E60000E00127C000030210A00147F8F86001C37
++:109E70000E0013A500000000144000128F99001C72
++:109E80008F86001C240200030A00147FAF8200ECBE
++:109E90000E001431000000000A00147F8F86001CA1
++:109EA0000E00128B000000002402000224040014A3
++:109EB0000000282100003021000038210A0014B9D8
++:109EC000AF8200EC004038212404001097380002D3
++:109ED000000028210E00127C3306FFFF0A00147FC9
++:109EE0008F86001C8F8400C83C077FFF34E6FFFF8D
++:109EF0008C8500742402000100A61824AC83007431
++:109F000003E00008A082000510A000362CA200800B
++:109F1000274A04003C0B000524090080104000077C
++:109F20002408008030A6000F00C540212D030081C9
++:109F30001460000200A0482124080080AF4B0030CC
++:109F400000000000000000000000000011000009F7
++:109F500000003821014030218C8D000024E70004EE
++:109F600000E8602BACCD0000248400041580FFFACB
++:109F700024C60004000000000000000000000000F3
++:109F80003C0E0006010E3825AF47003000000000EF
++:109F900000000000000000008F4F000031E80010BA
++:109FA0001100FFFD000000008F42003C8F43003C89
++:109FB0000049C8210323C02B130000040000000047
++:109FC0008F4C003825860001AF4600388F47003C93
++:109FD00000A9282300E96821AF4D003C14A0FFCE62
++:109FE0002CA2008003E000080000000027BDFFD085
++:109FF0003C020002AFB100143C11000CAF45003828
++:10A00000AFB3001CAF46003C00809821AF42003047
++:10A0100024050088AF44002803512021AFBF002849
++:10A02000AFB50024AFB40020AFB200180E0014F199
++:10A03000AFB000103C1F08008FFF004C3C18080018
++:10A040008F1800642410FF8003F3A82132B9007F29
++:10A0500002B078240018A0C0033A70210018914083
++:10A0600001D12021AF4F00280E0014F10254282105
++:10A070003C0D08008DAD00502405012001B358218E
++:10A08000316C007F01705024019A48210131202158
++:10A090000E0014F1AF4A00283C0808008D08005457
++:10A0A0003C0508008CA500640113382130E6007FD0
++:10A0B00000F0182400DA202100912021AF4300286D
++:10A0C0000E0014F1000529403C0208008C420058A3
++:10A0D0003C1008008E1000601200001C0053882104
++:10A0E0002415FF800A0015743C14000C3226007FF2
++:10A0F0000235182400DA202102402821AF4300282D
++:10A10000009420210E0014F12610FFC01200000F51
++:10A11000023288212E05004110A0FFF42412100005
++:10A120003226007F001091800235182400DA2021A9
++:10A1300002402821AF430028009420210E0014F192
++:10A14000000080211600FFF3023288213C0B08003A
++:10A150008D6B005C240AFF802405000201734021FE
++:10A16000010A4824AF4900283C0408009484006296
++:10A170003110007F021A88213C07000C0E000CAA47
++:10A180000227982100402821026020218FBF00284B
++:10A190008FB500248FB400208FB3001C8FB200183D
++:10A1A0008FB100148FB000100A0014F127BD0030E9
++:10A1B0008F83001C8C62000410400003000000002C
++:10A1C00003E00008000000008C6400108C650008AB
++:10A1D0000A00152A8C66000C000000000000001B1D
++:10A1E0000000000F0000000A000000080000000648
++:10A1F000000000050000000500000004000000044D
++:10A200000000000300000003000000030000000342
++:10A210000000000300000002000000020000000235
++:10A220000000000200000002000000020000000226
++:10A230000000000200000002000000020000000216
++:10A240000000000200000002000000020000000206
++:10A2500000000001000000010000000108000F24C0
++:10A2600008000D6C08000FB80800106008000F4CC3
++:10A2700008000F8C0800119408000D88080011B820
++:10A2800008000DD8080015540800151C08000D889A
++:10A2900008000D8808000D880800124008001240D0
++:10A2A00008000D8808000D88080014E008000D88DB
++:10A2B00008000D8808000D8808000D88080013B4F8
++:10A2C00008000D8808000D8808000D8808000D881A
++:10A2D00008000D8808000D8808000D8808000D880A
++:10A2E00008000D8808000D8808000D8808000D88FA
++:10A2F00008000D8808000D8808000FAC08000D88C4
++:10A3000008000D880800167808000D8808000D88E0
++:10A3100008000D8808000D8808000D8808000D88C9
++:10A3200008000D8808000D8808000D8808000D88B9
++:10A3300008000D8808000D8808000D8808000D88A9
++:10A3400008000D8808000D8808000D88080014100A
++:10A3500008000D8808000D8808001334080012A4B6
++:10A3600008001E2C08001EFC08001F1408001F28EF
++:10A3700008001F3808001E2C08001E2C08001E2C88
++:10A3800008001ED808002E1408002E1C08002DE41A
++:10A3900008002DF008002DFC08002E08080052F4DB
++:10A3A000080052B40800528008005254080052308D
++:10A3B000080051EC0A000C840000000000000000BE
++:10A3C0000000000D727870362E322E33000000002F
++:10A3D000060203030000000000000001000000006E
++:10A3E000000000000000000000000000000000006D
++:10A3F000000000000000000000000000000000005D
++:10A40000000000000000000000000000000000004C
++:10A41000000000000000000000000000000000003C
++:10A42000000000000000000000000000000000002C
++:10A43000000000000000000000000000000000001C
++:10A44000000000000000000000000000000000000C
++:10A4500000000000000000000000000000000000FC
++:10A4600000000000000000000000000000000000EC
++:10A4700000000000000000000000000000000000DC
++:10A4800000000000000000000000000000000000CC
++:10A4900000000000000000000000000000000000BC
++:10A4A00000000000000000000000000000000000AC
++:10A4B000000000000000000000000000000000009C
++:10A4C000000000000000000000000000000000008C
++:10A4D000000000000000000000000000000000007C
++:10A4E000000000000000000000000000000000006C
++:10A4F000000000000000000000000000000000005C
++:10A50000000000000000000000000000000000004B
++:10A51000000000000000000000000000000000003B
++:10A52000000000000000000000000000000000002B
++:10A53000000000000000000000000000000000001B
++:10A54000000000000000000000000000000000000B
++:10A5500000000000000000000000000000000000FB
++:10A5600000000000000000000000000000000000EB
++:10A5700000000000000000000000000000000000DB
++:10A5800000000000000000000000000000000000CB
++:10A5900000000000000000000000000000000000BB
++:10A5A00000000000000000000000000000000000AB
++:10A5B000000000000000000000000000000000009B
++:10A5C000000000000000000000000000000000008B
++:10A5D000000000000000000000000000000000007B
++:10A5E000000000000000000000000000000000006B
++:10A5F000000000000000000000000000000000005B
++:10A60000000000000000000000000000000000004A
++:10A61000000000000000000000000000000000003A
++:10A62000000000000000000000000000000000002A
++:10A63000000000000000000000000000000000001A
++:10A64000000000000000000000000000000000000A
++:10A6500000000000000000000000000000000000FA
++:10A6600000000000000000000000000000000000EA
++:10A6700000000000000000000000000000000000DA
++:10A6800000000000000000000000000000000000CA
++:10A6900000000000000000000000000000000000BA
++:10A6A00000000000000000000000000000000000AA
++:10A6B000000000000000000000000000000000009A
++:10A6C000000000000000000000000000000000008A
++:10A6D000000000000000000000000000000000007A
++:10A6E000000000000000000000000000000000006A
++:10A6F000000000000000000000000000000000005A
++:10A700000000000000000000000000000000000049
++:10A710000000000000000000000000000000000039
++:10A720000000000000000000000000000000000029
++:10A730000000000000000000000000000000000019
++:10A740000000000000000000000000000000000009
++:10A7500000000000000000000000000000000000F9
++:10A7600000000000000000000000000000000000E9
++:10A7700000000000000000000000000000000000D9
++:10A7800000000000000000000000000000000000C9
++:10A7900000000000000000000000000000000000B9
++:10A7A00000000000000000000000000000000000A9
++:10A7B0000000000000000000000000000000000099
++:10A7C0000000000000000000000000000000000089
++:10A7D0000000000000000000000000000000000079
++:10A7E0000000000000000000000000000000000069
++:10A7F0000000000000000000000000000000000059
++:10A800000000000000000000000000000000000048
++:10A810000000000000000000000000000000000038
++:10A820000000000000000000000000000000000028
++:10A830000000000000000000000000000000000018
++:10A840000000000000000000000000000000000008
++:10A8500000000000000000000000000000000000F8
++:10A8600000000000000000000000000000000000E8
++:10A8700000000000000000000000000000000000D8
++:10A8800000000000000000000000000000000000C8
++:10A8900000000000000000000000000000000000B8
++:10A8A00000000000000000000000000000000000A8
++:10A8B0000000000000000000000000000000000098
++:10A8C0000000000000000000000000000000000088
++:10A8D0000000000000000000000000000000000078
++:10A8E0000000000000000000000000000000000068
++:10A8F0000000000000000000000000000000000058
++:10A900000000000000000000000000000000000047
++:10A910000000000000000000000000000000000037
++:10A920000000000000000000000000000000000027
++:10A930000000000000000000000000000000000017
++:10A940000000000000000000000000000000000007
++:10A9500000000000000000000000000000000000F7
++:10A9600000000000000000000000000000000000E7
++:10A9700000000000000000000000000000000000D7
++:10A9800000000000000000000000000000000000C7
++:10A9900000000000000000000000000000000000B7
++:10A9A00000000000000000000000000000000000A7
++:10A9B0000000000000000000000000000000000097
++:10A9C0000000000000000000000000000000000087
++:10A9D0000000000000000000000000000000000077
++:10A9E0000000000000000000000000000000000067
++:10A9F0000000000000000000000000000000000057
++:10AA00000000000000000000000000000000000046
++:10AA10000000000000000000000000000000000036
++:10AA20000000000000000000000000000000000026
++:10AA30000000000000000000000000000000000016
++:10AA40000000000000000000000000000000000006
++:10AA500000000000000000000000000000000000F6
++:10AA600000000000000000000000000000000000E6
++:10AA700000000000000000000000000000000000D6
++:10AA800000000000000000000000000000000000C6
++:10AA900000000000000000000000000000000000B6
++:10AAA00000000000000000000000000000000000A6
++:10AAB0000000000000000000000000000000000096
++:10AAC0000000000000000000000000000000000086
++:10AAD0000000000000000000000000000000000076
++:10AAE0000000000000000000000000000000000066
++:10AAF0000000000000000000000000000000000056
++:10AB00000000000000000000000000000000000045
++:10AB10000000000000000000000000000000000035
++:10AB20000000000000000000000000000000000025
++:10AB30000000000000000000000000000000000015
++:10AB40000000000000000000000000000000000005
++:10AB500000000000000000000000000000000000F5
++:10AB600000000000000000000000000000000000E5
++:10AB700000000000000000000000000000000000D5
++:10AB800000000000000000000000000000000000C5
++:10AB900000000000000000000000000000000000B5
++:10ABA00000000000000000000000000000000000A5
++:10ABB0000000000000000000000000000000000095
++:10ABC0000000000000000000000000000000000085
++:10ABD0000000000000000000000000000000000075
++:10ABE0000000000000000000000000000000000065
++:10ABF0000000000000000000000000000000000055
++:10AC00000000000000000000000000000000000044
++:10AC10000000000000000000000000000000000034
++:10AC20000000000000000000000000000000000024
++:10AC30000000000000000000000000000000000014
++:10AC40000000000000000000000000000000000004
++:10AC500000000000000000000000000000000000F4
++:10AC600000000000000000000000000000000000E4
++:10AC700000000000000000000000000000000000D4
++:10AC800000000000000000000000000000000000C4
++:10AC900000000000000000000000000000000000B4
++:10ACA00000000000000000000000000000000000A4
++:10ACB0000000000000000000000000000000000094
++:10ACC0000000000000000000000000000000000084
++:10ACD0000000000000000000000000000000000074
++:10ACE0000000000000000000000000000000000064
++:10ACF0000000000000000000000000000000000054
++:10AD00000000000000000000000000000000000043
++:10AD10000000000000000000000000000000000033
++:10AD20000000000000000000000000000000000023
++:10AD30000000000000000000000000000000000013
++:10AD40000000000000000000000000000000000003
++:10AD500000000000000000000000000000000000F3
++:10AD600000000000000000000000000000000000E3
++:10AD700000000000000000000000000000000000D3
++:10AD800000000000000000000000000000000000C3
++:10AD900000000000000000000000000000000000B3
++:10ADA00000000000000000000000000000000000A3
++:10ADB0000000000000000000000000000000000093
++:10ADC0000000000000000000000000000000000083
++:10ADD0000000000000000000000000000000000073
++:10ADE0000000000000000000000000000000000063
++:10ADF0000000000000000000000000000000000053
++:10AE00000000000000000000000000000000000042
++:10AE10000000000000000000000000000000000032
++:10AE20000000000000000000000000000000000022
++:10AE30000000000000000000000000000000000012
++:10AE40000000000000000000000000000000000002
++:10AE500000000000000000000000000000000000F2
++:10AE600000000000000000000000000000000000E2
++:10AE700000000000000000000000000000000000D2
++:10AE800000000000000000000000000000000000C2
++:10AE900000000000000000000000000000000000B2
++:10AEA00000000000000000000000000000000000A2
++:10AEB0000000000000000000000000000000000092
++:10AEC0000000000000000000000000000000000082
++:10AED0000000000000000000000000000000000072
++:10AEE0000000000000000000000000000000000062
++:10AEF0000000000000000000000000000000000052
++:10AF00000000000000000000000000000000000041
++:10AF10000000000000000000000000000000000031
++:10AF20000000000000000000000000000000000021
++:10AF30000000000000000000000000000000000011
++:10AF40000000000000000000000000000000000001
++:10AF500000000000000000000000000000000000F1
++:10AF600000000000000000000000000000000000E1
++:10AF700000000000000000000000000000000000D1
++:10AF800000000000000000000000000000000000C1
++:10AF900000000000000000000000000000000000B1
++:10AFA00000000000000000000000000000000000A1
++:10AFB0000000000000000000000000000000000091
++:10AFC0000000000000000000000000000000000081
++:10AFD0000000000000000000000000000000000071
++:10AFE0000000000000000000000000000000000061
++:10AFF0000000000000000000000000000000000051
++:10B000000000000000000000000000000000000040
++:10B010000000000000000000000000000000000030
++:10B020000000000000000000000000000000000020
++:10B030000000000000000000000000000000000010
++:10B040000000000000000000000000000000000000
++:10B0500000000000000000000000000000000000F0
++:10B0600000000000000000000000000000000000E0
++:10B0700000000000000000000000000000000000D0
++:10B0800000000000000000000000000000000000C0
++:10B0900000000000000000000000000000000000B0
++:10B0A00000000000000000000000000000000000A0
++:10B0B0000000000000000000000000000000000090
++:10B0C0000000000000000000000000000000000080
++:10B0D0000000000000000000000000000000000070
++:10B0E0000000000000000000000000000000000060
++:10B0F0000000000000000000000000000000000050
++:10B10000000000000000000000000000000000003F
++:10B11000000000000000000000000000000000002F
++:10B12000000000000000000000000000000000001F
++:10B13000000000000000000000000000000000000F
++:10B1400000000000000000000000000000000000FF
++:10B1500000000000000000000000000000000000EF
++:10B1600000000000000000000000000000000000DF
++:10B1700000000000000000000000000000000000CF
++:10B1800000000000000000000000000000000000BF
++:10B1900000000000000000000000000000000000AF
++:10B1A000000000000000000000000000000000009F
++:10B1B000000000000000000000000000000000008F
++:10B1C000000000000000000000000000000000007F
++:10B1D000000000000000000000000000000000006F
++:10B1E000000000000000000000000000000000005F
++:10B1F000000000000000000000000000000000004F
++:10B20000000000000000000000000000000000003E
++:10B21000000000000000000000000000000000002E
++:10B22000000000000000000000000000000000001E
++:10B23000000000000000000000000000000000000E
++:10B2400000000000000000000000000000000000FE
++:10B2500000000000000000000000000000000000EE
++:10B2600000000000000000000000000000000000DE
++:10B2700000000000000000000000000000000000CE
++:10B2800000000000000000000000000000000000BE
++:10B2900000000000000000000000000000000000AE
++:10B2A000000000000000000000000000000000009E
++:10B2B000000000000000000000000000000000008E
++:10B2C000000000000000000000000000000000007E
++:10B2D000000000000000000000000000000000006E
++:10B2E000000000000000000000000000000000005E
++:10B2F000000000000000000000000000000000004E
++:10B30000000000000000000000000000000000003D
++:10B31000000000000000000000000000000000002D
++:10B32000000000000000000000000000000000001D
++:10B33000000000000000000000000000000000000D
++:10B3400000000000000000000000000000000000FD
++:10B3500000000000000000000000000000000000ED
++:10B3600000000000000000000000000000000000DD
++:10B3700000000000000000000000000000000000CD
++:10B3800000000000000000000000000000000000BD
++:10B3900000000000000000000000000000000000AD
++:10B3A000000000000000000000000000000000009D
++:10B3B000000000000000000000000000000000008D
++:10B3C000000000000000000000000000000000007D
++:10B3D000000000000000000000000000000000006D
++:10B3E000000000000000000000000000000000005D
++:10B3F000000000000000000000000000000000004D
++:10B40000000000000000000000000000000000003C
++:10B41000000000000000000000000000000000002C
++:10B42000000000000000000000000000000000001C
++:10B43000000000000000000000000000000000000C
++:10B4400000000000000000000000000000000000FC
++:10B4500000000000000000000000000000000000EC
++:10B4600000000000000000000000000000000000DC
++:10B4700000000000000000000000000000000000CC
++:10B4800000000000000000000000000000000000BC
++:10B4900000000000000000000000000000000000AC
++:10B4A000000000000000000000000000000000009C
++:10B4B000000000000000000000000000000000008C
++:10B4C000000000000000000000000000000000007C
++:10B4D000000000000000000000000000000000006C
++:10B4E000000000000000000000000000000000005C
++:10B4F000000000000000000000000000000000004C
++:10B50000000000000000000000000000000000003B
++:10B51000000000000000000000000000000000002B
++:10B52000000000000000000000000000000000001B
++:10B53000000000000000000000000000000000000B
++:10B5400000000000000000000000000000000000FB
++:10B5500000000000000000000000000000000000EB
++:10B5600000000000000000000000000000000000DB
++:10B5700000000000000000000000000000000000CB
++:10B5800000000000000000000000000000000000BB
++:10B5900000000000000000000000000000000000AB
++:10B5A000000000000000000000000000000000009B
++:10B5B000000000000000000000000000000000008B
++:10B5C000000000000000000000000000000000007B
++:10B5D000000000000000000000000000000000006B
++:10B5E000000000000000000000000000000000005B
++:10B5F000000000000000000000000000000000004B
++:10B60000000000000000000000000000000000003A
++:10B61000000000000000000000000000000000002A
++:10B62000000000000000000000000000000000001A
++:10B63000000000000000000000000000000000000A
++:10B6400000000000000000000000000000000000FA
++:10B6500000000000000000000000000000000000EA
++:10B6600000000000000000000000000000000000DA
++:10B6700000000000000000000000000000000000CA
++:10B6800000000000000000000000000000000000BA
++:10B6900000000000000000000000000000000000AA
++:10B6A000000000000000000000000000000000009A
++:10B6B000000000000000000000000000000000008A
++:10B6C000000000000000000000000000000000007A
++:10B6D000000000000000000000000000000000006A
++:10B6E000000000000000000000000000000000005A
++:10B6F000000000000000000000000000000000004A
++:10B700000000000000000000000000000000000039
++:10B710000000000000000000000000000000000029
++:10B720000000000000000000000000000000000019
++:10B730000000000000000000000000000000000009
++:10B7400000000000000000000000000000000000F9
++:10B7500000000000000000000000000000000000E9
++:10B7600000000000000000000000000000000000D9
++:10B7700000000000000000000000000000000000C9
++:10B7800000000000000000000000000000000000B9
++:10B7900000000000000000000000000000000000A9
++:10B7A0000000000000000000000000000000000099
++:10B7B0000000000000000000000000000000000089
++:10B7C0000000000000000000000000000000000079
++:10B7D0000000000000000000000000000000000069
++:10B7E0000000000000000000000000000000000059
++:10B7F0000000000000000000000000000000000049
++:10B800000000000000000000000000000000000038
++:10B810000000000000000000000000000000000028
++:10B820000000000000000000000000000000000018
++:10B830000000000000000000000000000000000008
++:10B8400000000000000000000000000000000000F8
++:10B8500000000000000000000000000000000000E8
++:10B8600000000000000000000000000000000000D8
++:10B8700000000000000000000000000000000000C8
++:10B8800000000000000000000000000000000000B8
++:10B8900000000000000000000000000000000000A8
++:10B8A0000000000000000000000000000000000098
++:10B8B0000000000000000000000000000000000088
++:10B8C0000000000000000000000000000000000078
++:10B8D0000000000000000000000000000000000068
++:10B8E0000000000000000000000000000000000058
++:10B8F0000000000000000000000000000000000048
++:10B900000000000000000000000000000000000037
++:10B910000000000000000000000000000000000027
++:10B920000000000000000000000000000000000017
++:10B930000000000000000000000000000000000007
++:10B9400000000000000000000000000000000000F7
++:10B9500000000000000000000000000000000000E7
++:10B9600000000000000000000000000000000000D7
++:10B9700000000000000000000000000000000000C7
++:10B9800000000000000000000000000000000000B7
++:10B9900000000000000000000000000000000000A7
++:10B9A0000000000000000000000000000000000097
++:10B9B0000000000000000000000000000000000087
++:10B9C0000000000000000000000000000000000077
++:10B9D0000000000000000000000000000000000067
++:10B9E0000000000000000000000000000000000057
++:10B9F0000000000000000000000000000000000047
++:10BA00000000000000000000000000000000000036
++:10BA10000000000000000000000000000000000026
++:10BA20000000000000000000000000000000000016
++:10BA30000000000000000000000000000000000006
++:10BA400000000000000000000000000000000000F6
++:10BA500000000000000000000000000000000000E6
++:10BA600000000000000000000000000000000000D6
++:10BA700000000000000000000000000000000000C6
++:10BA800000000000000000000000000000000000B6
++:10BA900000000000000000000000000000000000A6
++:10BAA0000000000000000000000000000000000096
++:10BAB0000000000000000000000000000000000086
++:10BAC0000000000000000000000000000000000076
++:10BAD0000000000000000000000000000000000066
++:10BAE0000000000000000000000000000000000056
++:10BAF0000000000000000000000000000000000046
++:10BB00000000000000000000000000000000000035
++:10BB10000000000000000000000000000000000025
++:10BB20000000000000000000000000000000000015
++:10BB30000000000000000000000000000000000005
++:10BB400000000000000000000000000000000000F5
++:10BB500000000000000000000000000000000000E5
++:10BB600000000000000000000000000000000000D5
++:10BB700000000000000000000000000000000000C5
++:10BB800000000000000000000000000000000000B5
++:10BB900000000000000000000000000000000000A5
++:10BBA0000000000000000000000000000000000095
++:10BBB0000000000000000000000000000000000085
++:10BBC0000000000000000000000000000000000075
++:10BBD0000000000000000000000000000000000065
++:10BBE0000000000000000000000000000000000055
++:10BBF0000000000000000000000000000000000045
++:10BC00000000000000000000000000000000000034
++:10BC10000000000000000000000000000000000024
++:10BC20000000000000000000000000000000000014
++:10BC30000000000000000000000000000000000004
++:10BC400000000000000000000000000000000000F4
++:10BC500000000000000000000000000000000000E4
++:10BC600000000000000000000000000000000000D4
++:10BC700000000000000000000000000000000000C4
++:10BC800000000000000000000000000000000000B4
++:10BC900000000000000000000000000000000000A4
++:10BCA0000000000000000000000000000000000094
++:10BCB0000000000000000000000000000000000084
++:10BCC0000000000000000000000000000000000074
++:10BCD0000000000000000000000000000000000064
++:10BCE0000000000000000000000000000000000054
++:10BCF0000000000000000000000000000000000044
++:10BD00000000000000000000000000000000000033
++:10BD10000000000000000000000000000000000023
++:10BD20000000000000000000000000000000000013
++:10BD30000000000000000000000000000000000003
++:10BD400000000000000000000000000000000000F3
++:10BD500000000000000000000000000000000000E3
++:10BD600000000000000000000000000000000000D3
++:10BD700000000000000000000000000000000000C3
++:10BD800000000000000000000000000000000000B3
++:10BD900000000000000000000000000000000000A3
++:10BDA0000000000000000000000000000000000093
++:10BDB0000000000000000000000000000000000083
++:10BDC0000000000000000000000000000000000073
++:10BDD0000000000000000000000000000000000063
++:10BDE0000000000000000000000000000000000053
++:10BDF0000000000000000000000000000000000043
++:10BE00000000000000000000000000000000000032
++:10BE10000000000000000000000000000000000022
++:10BE20000000000000000000000000000000000012
++:10BE30000000000000000000000000000000000002
++:10BE400000000000000000000000000000000000F2
++:10BE500000000000000000000000000000000000E2
++:10BE600000000000000000000000000000000000D2
++:10BE700000000000000000000000000000000000C2
++:10BE800000000000000000000000000000000000B2
++:10BE900000000000000000000000000000000000A2
++:10BEA0000000000000000000000000000000000092
++:10BEB0000000000000000000000000000000000082
++:10BEC0000000000000000000000000000000000072
++:10BED0000000000000000000000000000000000062
++:10BEE0000000000000000000000000000000000052
++:10BEF0000000000000000000000000000000000042
++:10BF00000000000000000000000000000000000031
++:10BF10000000000000000000000000000000000021
++:10BF20000000000000000000000000000000000011
++:10BF30000000000000000000000000000000000001
++:10BF400000000000000000000000000000000000F1
++:10BF500000000000000000000000000000000000E1
++:10BF600000000000000000000000000000000000D1
++:10BF700000000000000000000000000000000000C1
++:10BF800000000000000000000000000000000000B1
++:10BF900000000000000000000000000000000000A1
++:10BFA0000000000000000000000000000000000091
++:10BFB0000000000000000000000000000000000081
++:10BFC0000000000000000000000000000000000071
++:10BFD0000000000000000000000000000000000061
++:10BFE0000000000000000000000000000000000051
++:10BFF0000000000000000000000000000000000041
++:10C000000000000000000000000000000000000030
++:10C010000000000000000000000000000000000020
++:10C020000000000000000000000000000000000010
++:10C030000000000000000000000000000000000000
++:10C0400000000000000000000000000000000000F0
++:10C0500000000000000000000000000000000000E0
++:10C0600000000000000000000000000000000000D0
++:10C0700000000000000000000000000000000000C0
++:10C0800000000000000000000000000000000000B0
++:10C0900000000000000000000000000000000000A0
++:10C0A0000000000000000000000000000000000090
++:10C0B0000000000000000000000000000000000080
++:10C0C0000000000000000000000000000000000070
++:10C0D0000000000000000000000000000000000060
++:10C0E0000000000000000000000000000000000050
++:10C0F0000000000000000000000000000000000040
++:10C10000000000000000000000000000000000002F
++:10C11000000000000000000000000000000000001F
++:10C12000000000000000000000000000000000000F
++:10C1300000000000000000000000000000000000FF
++:10C1400000000000000000000000000000000000EF
++:10C1500000000000000000000000000000000000DF
++:10C1600000000000000000000000000000000000CF
++:10C1700000000000000000000000000000000000BF
++:10C1800000000000000000000000000000000000AF
++:10C19000000000000000000000000000000000009F
++:10C1A000000000000000000000000000000000008F
++:10C1B000000000000000000000000000000000007F
++:10C1C000000000000000000000000000000000006F
++:10C1D000000000000000000000000000000000005F
++:10C1E000000000000000000000000000000000004F
++:10C1F000000000000000000000000000000000003F
++:10C20000000000000000000000000000000000002E
++:10C21000000000000000000000000000000000001E
++:10C22000000000000000000000000000000000000E
++:10C2300000000000000000000000000000000000FE
++:10C2400000000000000000000000000000000000EE
++:10C2500000000000000000000000000000000000DE
++:10C2600000000000000000000000000000000000CE
++:10C2700000000000000000000000000000000000BE
++:10C2800000000000000000000000000000000000AE
++:10C29000000000000000000000000000000000009E
++:10C2A000000000000000000000000000000000008E
++:10C2B000000000000000000000000000000000007E
++:10C2C000000000000000000000000000000000006E
++:10C2D000000000000000000000000000000000005E
++:10C2E000000000000000000000000000000000004E
++:10C2F000000000000000000000000000000000003E
++:10C30000000000000000000000000000000000002D
++:10C31000000000000000000000000000000000001D
++:10C32000000000000000000000000000000000000D
++:10C3300000000000000000000000000000000000FD
++:10C3400000000000000000000000000000000000ED
++:10C3500000000000000000000000000000000000DD
++:10C3600000000000000000000000000000000000CD
++:10C3700000000000000000000000000000000000BD
++:10C3800000000000000000000000000000000000AD
++:10C39000000000000000000000000000000000009D
++:10C3A000000000000000000000000000000000008D
++:10C3B000000000000000000000000000000000007D
++:10C3C000000000000000000000000000000000006D
++:10C3D000000000000000000000000000000000005D
++:10C3E000000000000000000000000000000000004D
++:10C3F000000000000000000000000000000000003D
++:10C40000000000000000000000000000000000002C
++:10C41000000000000000000000000000000000001C
++:10C42000000000000000000000000000000000000C
++:10C4300000000000000000000000000000000000FC
++:10C4400000000000000000000000000000000000EC
++:10C4500000000000000000000000000000000000DC
++:10C4600000000000000000000000000000000000CC
++:10C4700000000000000000000000000000000000BC
++:10C4800000000000000000000000000000000000AC
++:10C49000000000000000000000000000000000009C
++:10C4A000000000000000000000000000000000008C
++:10C4B000000000000000000000000000000000007C
++:10C4C000000000000000000000000000000000006C
++:10C4D000000000000000000000000000000000005C
++:10C4E000000000000000000000000000000000004C
++:10C4F000000000000000000000000000000000003C
++:10C50000000000000000000000000000000000002B
++:10C51000000000000000000000000000000000001B
++:10C52000000000000000000000000000000000000B
++:10C5300000000000000000000000000000000000FB
++:10C5400000000000000000000000000000000000EB
++:10C5500000000000000000000000000000000000DB
++:10C5600000000000000000000000000000000000CB
++:10C5700000000000000000000000000000000000BB
++:10C5800000000000000000000000000000000000AB
++:10C59000000000000000000000000000000000009B
++:10C5A000000000000000000000000000000000008B
++:10C5B000000000000000000000000000000000007B
++:10C5C000000000000000000000000000000000006B
++:10C5D000000000000000000000000000000000005B
++:10C5E000000000000000000000000000000000004B
++:10C5F000000000000000000000000000000000003B
++:10C60000000000000000000000000000000000002A
++:10C61000000000000000000000000000000000001A
++:10C62000000000000000000000000000000000000A
++:10C6300000000000000000000000000000000000FA
++:10C6400000000000000000000000000000000000EA
++:10C6500000000000000000000000000000000000DA
++:10C6600000000000000000000000000000000000CA
++:10C6700000000000000000000000000000000000BA
++:10C6800000000000000000000000000000000000AA
++:10C69000000000000000000000000000000000009A
++:10C6A000000000000000000000000000000000008A
++:10C6B000000000000000000000000000000000007A
++:10C6C000000000000000000000000000000000006A
++:10C6D000000000000000000000000000000000005A
++:10C6E000000000000000000000000000000000004A
++:10C6F000000000000000000000000000000000003A
++:10C700000000000000000000000000000000000029
++:10C710000000000000000000000000000000000019
++:10C720000000000000000000000000000000000009
++:10C7300000000000000000000000000000000000F9
++:10C7400000000000000000000000000000000000E9
++:10C7500000000000000000000000000000000000D9
++:10C7600000000000000000000000000000000000C9
++:10C7700000000000000000000000000000000000B9
++:10C7800000000000000000000000000000000000A9
++:10C790000000000000000000000000000000000099
++:10C7A0000000000000000000000000000000000089
++:10C7B0000000000000000000000000000000000079
++:10C7C0000000000000000000000000000000000069
++:10C7D0000000000000000000000000000000000059
++:10C7E0000000000000000000000000000000000049
++:10C7F0000000000000000000000000000000000039
++:10C800000000000000000000000000000000000028
++:10C810000000000000000000000000000000000018
++:10C820000000000000000000000000000000000008
++:10C8300000000000000000000000000000000000F8
++:10C8400000000000000000000000000000000000E8
++:10C8500000000000000000000000000000000000D8
++:10C8600000000000000000000000000000000000C8
++:10C8700000000000000000000000000000000000B8
++:10C8800000000000000000000000000000000000A8
++:10C890000000000000000000000000000000000098
++:10C8A0000000000000000000000000000000000088
++:10C8B0000000000000000000000000000000000078
++:10C8C0000000000000000000000000000000000068
++:10C8D0000000000000000000000000000000000058
++:10C8E0000000000000000000000000000000000048
++:10C8F0000000000000000000000000000000000038
++:10C900000000000000000000000000000000000027
++:10C910000000000000000000000000000000000017
++:10C920000000000000000000000000000000000007
++:10C9300000000000000000000000000000000000F7
++:10C9400000000000000000000000000000000000E7
++:10C9500000000000000000000000000000000000D7
++:10C9600000000000000000000000000000000000C7
++:10C9700000000000000000000000000000000000B7
++:10C9800000000000000000000000000000000000A7
++:10C990000000000000000000000000000000000097
++:10C9A0000000000000000000000000000000000087
++:10C9B0000000000000000000000000000000000077
++:10C9C0000000000000000000000000000000000067
++:10C9D0000000000000000000000000000000000057
++:10C9E0000000000000000000000000000000000047
++:10C9F0000000000000000000000000000000000037
++:10CA00000000000000000000000000000000000026
++:10CA10000000000000000000000000000000000016
++:10CA20000000000000000000000000000000000006
++:10CA300000000000000000000000000000000000F6
++:10CA400000000000000000000000000000000000E6
++:10CA500000000000000000000000000000000000D6
++:10CA600000000000000000000000000000000000C6
++:10CA700000000000000000000000000000000000B6
++:10CA800000000000000000000000000000000000A6
++:10CA90000000000000000000000000000000000096
++:10CAA0000000000000000000000000000000000086
++:10CAB0000000000000000000000000000000000076
++:10CAC0000000000000000000000000000000000066
++:10CAD0000000000000000000000000000000000056
++:10CAE0000000000000000000000000000000000046
++:10CAF0000000000000000000000000000000000036
++:10CB00000000000000000000000000000000000025
++:10CB10000000000000000000000000000000000015
++:10CB20000000000000000000000000000000000005
++:10CB300000000000000000000000000000000000F5
++:10CB400000000000000000000000000000000000E5
++:10CB500000000000000000000000000000000000D5
++:10CB600000000000000000000000000000000000C5
++:10CB700000000000000000000000000000000000B5
++:10CB800000000000000000000000000000000000A5
++:10CB90000000000000000000000000000000000095
++:10CBA0000000000000000000000000000000000085
++:10CBB0000000000000000000000000000000000075
++:10CBC0000000000000000000000000000000000065
++:10CBD0000000000000000000000000000000000055
++:10CBE0000000000000000000000000000000000045
++:10CBF0000000000000000000000000000000000035
++:10CC00000000000000000000000000000000000024
++:10CC10000000000000000000000000000000000014
++:10CC20000000000000000000000000000000000004
++:10CC300000000000000000000000000000000000F4
++:10CC400000000000000000000000000000000000E4
++:10CC500000000000000000000000000000000000D4
++:10CC600000000000000000000000000000000000C4
++:10CC700000000000000000000000000000000000B4
++:10CC800000000000000000000000000000000000A4
++:10CC90000000000000000000000000000000000094
++:10CCA0000000000000000000000000000000000084
++:10CCB0000000000000000000000000000000000074
++:10CCC0000000000000000000000000000000000064
++:10CCD0000000000000000000000000000000000054
++:10CCE0000000000000000000000000000000000044
++:10CCF0000000000000000000000000000000000034
++:10CD00000000000000000000000000000000000023
++:10CD10000000000000000000000000000000000013
++:10CD20000000000000000000000000000000000003
++:10CD300000000000000000000000000000000000F3
++:10CD400000000000000000000000000000000000E3
++:10CD500000000000000000000000000000000000D3
++:10CD600000000000000000000000000000000000C3
++:10CD700000000000000000000000000000000000B3
++:10CD800000000000000000000000000000000000A3
++:10CD90000000000000000000000000000000000093
++:10CDA0000000000000000000000000000000000083
++:10CDB0000000000000000000000000000000000073
++:10CDC0000000000000000000000000000000000063
++:10CDD0000000000000000000000000000000000053
++:10CDE0000000000000000000000000000000000043
++:10CDF0000000000000000000000000000000000033
++:10CE00000000000000000000000000000000000022
++:10CE10000000000000000000000000000000000012
++:10CE20000000000000000000000000000000000002
++:10CE300000000000000000000000000000000000F2
++:10CE400000000000000000000000000000000000E2
++:10CE500000000000000000000000000000000000D2
++:10CE600000000000000000000000000000000000C2
++:10CE700000000000000000000000000000000000B2
++:10CE800000000000000000000000000000000000A2
++:10CE90000000000000000000000000000000000092
++:10CEA0000000000000000000000000000000000082
++:10CEB0000000000000000000000000000000000072
++:10CEC0000000000000000000000000000000000062
++:10CED0000000000000000000000000000000000052
++:10CEE0000000000000000000000000000000000042
++:10CEF0000000000000000000000000000000000032
++:10CF00000000000000000000000000000000000021
++:10CF10000000000000000000000000000000000011
++:10CF20000000000000000000000000000000000001
++:10CF300000000000000000000000000000000000F1
++:10CF400000000000000000000000000000000000E1
++:10CF500000000000000000000000000000000000D1
++:10CF600000000000000000000000000000000000C1
++:10CF700000000000000000000000000000000000B1
++:10CF800000000000000000000000000000000000A1
++:10CF90000000000000000000000000000000000091
++:10CFA0000000000000000000000000000000000081
++:10CFB0000000000000000000000000000000000071
++:10CFC0000000000000000000000000000000000061
++:10CFD0000000000000000000000000000000000051
++:10CFE0000000000000000000000000000000000041
++:10CFF0000000000000000000000000000000000031
++:10D000000000000000000000000000000000000020
++:10D010000000000000000000000000000000000010
++:10D020000000000000000000000000000000000000
++:10D0300000000000000000000000000000000000F0
++:10D0400000000000000000000000000000000000E0
++:10D0500000000000000000000000000000000000D0
++:10D0600000000000000000000000000000000000C0
++:10D0700000000000000000000000000000000000B0
++:10D0800000000000000000000000000000000000A0
++:10D090000000000000000000000000000000000090
++:10D0A0000000000000000000000000000000000080
++:10D0B0000000000000000000000000000000000070
++:10D0C0000000000000000000000000000000000060
++:10D0D0000000000000000000000000000000000050
++:10D0E0000000000000000000000000000000000040
++:10D0F0000000000000000000000000000000000030
++:10D10000000000000000000000000000000000001F
++:10D11000000000000000000000000000000000000F
++:10D1200000000000000000000000000000000000FF
++:10D1300000000000000000000000000000000000EF
++:10D1400000000000000000000000000000000000DF
++:10D1500000000000000000000000000000000000CF
++:10D1600000000000000000000000000000000000BF
++:10D1700000000000000000000000000000000000AF
++:10D18000000000000000000000000000000000009F
++:10D19000000000000000000000000000000000008F
++:10D1A000000000000000000000000000000000007F
++:10D1B000000000000000000000000000000000006F
++:10D1C000000000000000000000000000000000005F
++:10D1D000000000000000000000000000000000004F
++:10D1E000000000000000000000000000000000003F
++:10D1F000000000000000000000000000000000002F
++:10D20000000000000000000000000000000000001E
++:10D21000000000000000000000000000000000000E
++:10D2200000000000000000000000000000000000FE
++:10D2300000000000000000000000000000000000EE
++:10D2400000000000000000000000000000000000DE
++:10D2500000000000000000000000000000000000CE
++:10D2600000000000000000000000000000000000BE
++:10D2700000000000000000000000000000000000AE
++:10D28000000000000000000000000000000000009E
++:10D29000000000000000000000000000000000008E
++:10D2A000000000000000000000000000000000007E
++:10D2B000000000000000000000000000000000006E
++:10D2C000000000000000000000000000000000005E
++:10D2D000000000000000000000000000000000004E
++:10D2E000000000000000000000000000000000003E
++:10D2F000000000000000000000000000000000002E
++:10D30000000000000000000000000000000000001D
++:10D31000000000000000000000000000000000000D
++:10D3200000000000000000000000000000000000FD
++:10D3300000000000000000000000000000000000ED
++:10D3400000000000000000000000000000000000DD
++:10D3500000000000000000000000000000000000CD
++:10D3600000000000000000000000000000000000BD
++:10D3700000000000000000000000000000000000AD
++:10D38000000000000000000000000000000000009D
++:10D39000000000000000000000000000000000008D
++:10D3A000000000000000000000000000000000007D
++:10D3B000000000000000000000000000000000006D
++:10D3C000000000000000000000000000000000005D
++:10D3D000000000000000000000000000000000004D
++:10D3E000000000000000000000000000000000003D
++:10D3F000000000000000000000000000000000002D
++:10D40000000000000000000000000000000000001C
++:10D41000000000000000000000000000000000000C
++:10D4200000000000000000000000000000000000FC
++:10D4300000000000000000000000000000000000EC
++:10D4400000000000000000000000000000000000DC
++:10D4500000000000000000000000000000000000CC
++:10D4600000000000000000000000000000000000BC
++:10D4700000000000000000000000000000000000AC
++:10D48000000000000000000000000000000000009C
++:10D49000000000000000000000000000000000008C
++:10D4A000000000000000000000000000000000007C
++:10D4B000000000000000000000000000000000006C
++:10D4C000000000000000000000000000000000005C
++:10D4D000000000000000000000000000000000004C
++:10D4E000000000000000000000000000000000003C
++:10D4F000000000000000000000000000000000002C
++:10D50000000000000000000000000000000000001B
++:10D51000000000000000000000000000000000000B
++:10D5200000000000000000000000000000000000FB
++:10D5300000000000000000000000000000000000EB
++:10D5400000000000000000000000000000000000DB
++:10D5500000000000000000000000000000000000CB
++:10D5600000000000000000000000000000000000BB
++:10D5700000000000000000000000000000000000AB
++:10D58000000000000000000000000000000000009B
++:10D59000000000000000008000000000000000000B
++:10D5A000000000000000000000000000000000007B
++:10D5B00000000000000000000000000A0000000061
++:10D5C0000000000000000000100000030000000048
++:10D5D0000000000D0000000D3C02080024427340D2
++:10D5E0003C030800246377CCAC4000000043202BB0
++:10D5F0001480FFFD244200043C1D080037BD7FFC61
++:10D6000003A0F0213C100800261032103C1C08003A
++:10D61000279C73400E0010FE000000000000000D6B
++:10D6200030A5FFFF30C600FF274301808F4201B8BD
++:10D630000440FFFE24020002AC640000A465000860
++:10D64000A066000AA062000B3C021000AC67001844
++:10D6500003E00008AF4201B83C0360008C624FF861
++:10D660000440FFFE3C020200AC644FC0AC624FC4F9
++:10D670003C02100003E00008AC624FF89482000CFA
++:10D680002486001400A0382100021302000210803A
++:10D690000082402100C8102B1040005700000000FD
++:10D6A00090C300002C6200095040005190C200015C
++:10D6B000000310803C030800246372F00043102133
++:10D6C0008C420000004000080000000090C30001F0
++:10D6D0002402000A1462003A000000000106102330
++:10D6E0002C42000A1440003624C600028CE20000DE
++:10D6F00034420100ACE2000090C2000090C300017F
++:10D7000090C4000290C5000300031C000002160034
++:10D710000043102500042200004410250045102578
++:10D7200024C60004ACE2000490C2000090C30001D3
++:10D7300090C4000290C500030002160000031C0004
++:10D740000043102500042200004410250045102548
++:10D7500024C600040A000CB8ACE2000890C3000123
++:10D76000240200041462001624C6000290C20000C5
++:10D7700090C400018CE30000000212000044102558
++:10D780003463000424C60002ACE2000C0A000CB8AA
++:10D79000ACE3000090C300012402000314620008FF
++:10D7A00024C600028CE2000090C3000024C60001E1
++:10D7B00034420008A0E300100A000CB8ACE20000FC
++:10D7C00003E000082402000190C3000124020002CB
++:10D7D0001062000224C40002010020210A000CB8DB
++:10D7E000008030210A000CB824C6000190C200015C
++:10D7F0000A000CB800C2302103E00008000010212C
++:10D8000027BDFFE8AFBF0014AFB000100E00130239
++:10D8100000808021936200052403FFFE0200202186
++:10D82000004310248FBF00148FB00010A3620005C6
++:10D830000A00130B27BD001827BDFFE8AFB000108A
++:10D84000AFBF00140E000F3C0080802193620000E7
++:10D8500024030050304200FF14430004240201005E
++:10D86000AF4201800A000D3002002021AF4001804C
++:10D87000020020218FBF00148FB000100A000FE7B4
++:10D8800027BD001827BDFF80AFBE0078AFB700747A
++:10D89000AFB20060AFBF007CAFB60070AFB5006C38
++:10D8A000AFB40068AFB30064AFB1005CAFB0005874
++:10D8B0008F5001283C0208008C4231A02403FF80D5
++:10D8C0009365003F0202102100431024AF42002460
++:10D8D0003C0208008C4231A09364000530B200FF86
++:10D8E000020210213042007F034218210004202749
++:10D8F0003C02000A0062182130840001AF8300144A
++:10D900000000F0210000B82114800053AFA00050A7
++:10D9100093430116934401128F450104306300FFC5
++:10D920003C020001308400FF00A2282403431021A0
++:10D9300003441821245640002467400014A001CD60
++:10D940002402000193620000304300FF2402002003
++:10D950001062000524020050106200060000000062
++:10D960000A000D74000000000000000D0A000D7D8B
++:10D97000AFA000303C1E080027DE738C0A000D7D2E
++:10D98000AFA000303C0208008C4200DC24420001C1
++:10D990003C010800AC2200DC0E00139F00000000D8
++:10D9A0000A000F318FBF007C8F4201043C0300202E
++:10D9B00092D3000D004310240002202B00042140CC
++:10D9C000AFA400308F4301043C02004000621824E1
++:10D9D000146000023485004000802821326200205B
++:10D9E000AFA500301440000234A6008000A0302112
++:10D9F00010C0000BAFA6003093C500088F67004C25
++:10DA00000200202100052B0034A5008130A5F08103
++:10DA10000E000C9B30C600FF0A000F2E0000000015
++:10DA20009362003E304200401040000F2402000488
++:10DA300056420007240200120200202100E02821A3
++:10DA40000E0013F702C030210A000F318FBF007C97
++:10DA500016420005000000000E000D2100002021EC
++:10DA60000A000F318FBF007C9743011A96C4000E45
++:10DA700093620035326500043075FFFF00442004D6
++:10DA8000AFA400548ED1000410A000158ED400085D
++:10DA90009362003E3042004010400007000000004A
++:10DAA0000E0013E0022020211040000D00000000B5
++:10DAB0000A000F2E000000008F6200440222102393
++:10DAC0000440016A000000008F6200480222102317
++:10DAD00004410166240400160A000E218FC20004CE
++:10DAE0008F6200480222102304400008000000005A
++:10DAF0003C0208008C423100244200013C01080035
++:10DB0000AC2231000A000F23000000008F620040A9
++:10DB100002221023184000128F8400143C020800D7
++:10DB20008C423100327300FC0000A8212442000125
++:10DB30003C010800AC2231008F6300409482011C3C
++:10DB4000022318233042FFFF0043102A50400010E8
++:10DB50002402000C8F6200400A000DF20222102302
++:10DB60009483011C9762003C0043102B1040000678
++:10DB7000000000009482011C00551023A482011CA7
++:10DB80000A000DF72402000CA480011C2402000CE2
++:10DB9000AFA200308F620040005120231880000D9A
++:10DBA00002A4102A1440012600000000149500066B
++:10DBB00002A410233A620001304200011440012007
++:10DBC0000000000002A41023022488210A000E098C
++:10DBD0003055FFFF00002021326200021040001A81
++:10DBE000326200109362003E30420040504000110B
++:10DBF0008FC200040E00130202002021240200182C
++:10DC0000A362003F936200052403FFFE020020216F
++:10DC1000004310240E00130BA362000524040039F6
++:10DC2000000028210E0013C9240600180A000F3036
++:10DC300024020001240400170040F809000000003D
++:10DC40000A000F302402000110400108000000000B
++:10DC50008F63004C8F620054028210231C4001032A
++:10DC600002831023044200010060A021AFA4001829
++:10DC7000AFB10010AFB50014934201208F65004092
++:10DC80009763003C304200FF034210210044102102
++:10DC90008FA400543063FFFF244240000083182B00
++:10DCA0008FA40030AFA20020AFA50028008320255C
++:10DCB000AFA40030AFA50024AFA0002CAFB4003457
++:10DCC0009362003E30420008504000118FC20000B5
++:10DCD00002C0202127A500380E000CB2AFA00038EA
++:10DCE0005440000B8FC200008FA200383042010068
++:10DCF000504000078FC200008FA3003C8F6200607D
++:10DD00000062102304430001AF6300608FC2000073
++:10DD10000040F80927A400108FA200303042000212
++:10DD200054400001327300FE9362003E30420040D6
++:10DD3000104000378FA200248F6200541682001A10
++:10DD40003262000124020014124200102A4200151F
++:10DD500010400006240200162402000C12420007A4
++:10DD6000326200010A000E7D000000001242000530
++:10DD7000326200010A000E7D000000000A000E78E9
++:10DD80002417000E0A000E78241700100A000E7CDB
++:10DD900024170012936200232403FFBD00431024C4
++:10DDA000A362002332620001104000198FA20024F8
++:10DDB0002402000C1242000E2A42000D1040000600
++:10DDC0002402000E2402000A124200078FA200243F
++:10DDD0000A000E9524420001124200088FA200247E
++:10DDE0000A000E95244200010A000E932417000831
++:10DDF0002402000E16E20002241700162417001059
++:10DE00008FA2002424420001AFA200248FA200248C
++:10DE10008FA300148F76004000431021AF620040B2
++:10DE20008F8200149442011C104000090000000081
++:10DE30008F6200488F6400409763003C00441023C9
++:10DE40003063FFFF0043102A104000088FA20054E7
++:10DE5000936400368F6300403402FFFC008210049C
++:10DE600000621821AF6300488FA200548FA60030D3
++:10DE70000282902130C200081040000E0000000015
++:10DE80008F6200581642000430C600FF9742011A04
++:10DE90005040000134C6001093C500088FA700341D
++:10DEA0000200202100052B0034A500800E000C9BF1
++:10DEB00030A5F0808F620040005610231840001BF0
++:10DEC0008FA200183C0208008C42319830420010AA
++:10DED0001040000D24020001976200681440000AFF
++:10DEE000240200018F8200149442011C1440000699
++:10DEF00024020001A76200689742007A244200646D
++:10DF00000A000EE9A7620012A76200120E001302B7
++:10DF1000020020219362007D2403000102002021E1
++:10DF2000344200010A000EE7AFA300501840000A77
++:10DF3000000000000E001302020020219362007D09
++:10DF40002403000102002021AFA30050344200044A
++:10DF50000E00130BA362007D9362003E304200402E
++:10DF60001440000C326200011040000A0000000062
++:10DF70008F6300408FC20004240400182463000152
++:10DF80000040F809AF6300408FA200300A000F3054
++:10DF9000304200048F620058105200100000000050
++:10DFA0008F620018022210231C4000082404000184
++:10DFB0008F62001816220009000000008F62001C0A
++:10DFC000028210230440000500000000AF720058D8
++:10DFD000AFA40050AF710018AF74001C12E0000B2A
++:10DFE0008FA200500E00130202002021A377003FF1
++:10DFF0000E00130B0200202102E030212404003720
++:10E000000E0013C9000028218FA200501040000309
++:10E01000000000000E000CA90200202112A0000543
++:10E02000000018218FA2003030420004504000113F
++:10E0300000601021240300010A000F30006010214D
++:10E040000E001302020020219362007D02002021B5
++:10E05000344200040E00130BA362007D0E000CA9D5
++:10E06000020020210A000F3024020001AF400044CA
++:10E07000240200018FBF007C8FBE00788FB7007430
++:10E080008FB600708FB5006C8FB400688FB30064DA
++:10E090008FB200608FB1005C8FB0005803E00008C1
++:10E0A00027BD00808F4201B80440FFFE2402080013
++:10E0B000AF4201B803E00008000000003C02000885
++:10E0C00003421021944200483084FFFF2484001250
++:10E0D0003045FFFF10A0001700A4102B10400016C1
++:10E0E00024020003934201202403001AA343018B5E
++:10E0F000304200FF2446FFFE8F82000000A6182B4E
++:10E100003863000100021382004310241040000510
++:10E110008F84000434820001A746019403E00008C4
++:10E12000AF8200042402FFFE0082102403E00008F6
++:10E13000AF8200042402000303E00008A342018B25
++:10E1400027BDFFE0AFB10014AFB00010AFBF0018A3
++:10E1500030B0FFFF30D1FFFF8F4201B80440FFFE17
++:10E1600000000000AF440180AF4400200E000F42C9
++:10E17000020020218F8300008F840004A750019AA1
++:10E18000A750018EA74301908F8300083082800042
++:10E19000AF4301A8A75101881040000E8F820004F0
++:10E1A00093420116304200FC24420004005A102120
++:10E1B0008C4240003042FFFF144000068F82000472
++:10E1C0003C02FFFF34427FFF00821024AF82000434
++:10E1D0008F8200042403BFFF00431024A74201A63E
++:10E1E0009743010C8F42010400031C003042FFFFE3
++:10E1F00000621825AF4301AC3C021000AF4201B8E9
++:10E200008FBF00188FB100148FB0001003E000081A
++:10E2100027BD00208F470070934201128F830000BA
++:10E2200027BDFFF0304200FF00022882306201006B
++:10E23000000030211040004324A40003306240005D
++:10E24000104000103062200000041080005A10219D
++:10E250008C43400024A4000400041080AFA30000FD
++:10E26000005A10218C424000AFA2000493420116D4
++:10E27000304200FC005A10218C4240000A000FC0BE
++:10E28000AFA200081040002F0000302100041080D1
++:10E29000005A10218C43400024A400040004108084
++:10E2A000AFA30000005A10218C424000AFA000082C
++:10E2B000AFA200048FA80008000030210000202138
++:10E2C000240A00083C0908002529010003A41021A4
++:10E2D000148A000300042A001100000A0000000054
++:10E2E00090420000248400012C83000C00A2102125
++:10E2F00000021080004910218C4200001460FFF3DE
++:10E3000000C230263C0408008C8431048F42007027
++:10E310002C83002010600009004738233C030800CC
++:10E32000246331080004108000431021248300017D
++:10E33000AC4700003C010800AC233104AF86000864
++:10E340002406000100C0102103E0000827BD0010D2
++:10E350003C0208008C42003827BDFFD0AFB5002436
++:10E36000AFB40020AFB10014AFBF0028AFB3001CA2
++:10E37000AFB20018AFB00010000088213C150800B3
++:10E3800026B50038144000022454FFFF0000A021ED
++:10E390009742010E8F8400003042FFFF308340001F
++:10E3A0001060000A245200043C0200200082102465
++:10E3B00050400007308280008F8200042403BFFF9A
++:10E3C000008318240A0010103442100030828000AC
++:10E3D0001040000A3C020020008210241040000778
++:10E3E0008F8200043C03FFFF34637FFF0083182407
++:10E3F00034428000AF820004AF8300000E000F980B
++:10E400000000000014400007000000009743011EB8
++:10E410009742011C3063FFFF0002140000621825C0
++:10E42000AF8300089742010C8F4340003045FFFF47
++:10E430003402FFFF14620003000000000A001028ED
++:10E44000241100208F42400030420100544000015E
++:10E45000241100108F8400003082100050400014FE
++:10E4600036310001308200201440000B3C021000C5
++:10E47000008210245040000E363100013C030E0093
++:10E480003C020DFF008318243442FFFF0043102B91
++:10E4900050400007363100013C0208008C42002C3D
++:10E4A000244200013C010800AC22002C363100055A
++:10E4B0003C0608008CC6003454C000238F85000041
++:10E4C0008F820004304240005440001F8F850000BE
++:10E4D0003C021F01008210243C0310005443001A28
++:10E4E0008F85000030A20200144000178F850000C5
++:10E4F0003250FFFF363100028F4201B80440FFFE68
++:10E5000000000000AF400180020020210E000F42F9
++:10E51000AF4000208F8300042402BFFFA750019A60
++:10E52000006218248F820000A750018EA751018835
++:10E53000A74301A6A74201903C021000AF4201B8D8
++:10E540000A0010F5000010213C02100000A2102467
++:10E550001040003A0000000010C0000F0000000052
++:10E5600030A201001040000C3C0302003C020F00EE
++:10E5700000A2102410430008000000008F82000851
++:10E58000005410240055102190420004244200043D
++:10E590000A00109F000221C00000000000051602C2
++:10E5A0003050000F3A0300022E4203EF38420001C0
++:10E5B0002C6300010062182414600073240200011F
++:10E5C0003C0308008C6300D02E06000C386200016A
++:10E5D0002C4200010046102414400015001021C0F8
++:10E5E0002602FFFC2C4200045440001100002021B0
++:10E5F000386200022C420001004610241040000343
++:10E60000000512420A00109F000020210010182B64
++:10E610000043102450400006001021C000002021BB
++:10E620003245FFFF0E000F633226FFFB001021C0B2
++:10E630003245FFFF0A0010F2362600028F424000EA
++:10E640003C0308008C630024304201001040004667
++:10E6500030620001322200043070000D14400002CC
++:10E660002413000424130002000512C238420001E2
++:10E670002E4303EF304200013863000100431025B0
++:10E68000104000033231FFFB2402FFFB0202802412
++:10E6900010C000183202000130A201001040001525
++:10E6A000320200013C020F0000A210243C030200D1
++:10E6B0001043000F8F8200082403FFFE0203802412
++:10E6C00000541024005510219042000402333025DC
++:10E6D0002442000412000002000221C03226FFFF83
++:10E6E0000E000F633245FFFF1200002700001021CB
++:10E6F000320200011040000D320200042402000129
++:10E7000012020002023330253226FFFF00002021D2
++:10E710000E000F633245FFFF2402FFFE0202802439
++:10E7200012000019000010213202000410400016EF
++:10E7300024020001240200041202000202333025E8
++:10E740003226FFFF3245FFFF0E000F632404010055
++:10E750002402FFFB020280241200000B00001021A3
++:10E760000A0010F5240200011040000700001021EB
++:10E770003245FFFF36260002000020210E000F6305
++:10E7800000000000000010218FBF00288FB500247A
++:10E790008FB400208FB3001C8FB200188FB100140B
++:10E7A0008FB0001003E0000827BD003027BDFFD068
++:10E7B000AFB000103C04600CAFBF002CAFB6002817
++:10E7C000AFB50024AFB40020AFB3001CAFB2001847
++:10E7D000AFB100148C8250002403FF7F3C1A8000EC
++:10E7E000004310243442380CAC8250002402000351
++:10E7F0003C106000AF4200088E0208083C1B8008F5
++:10E800003C010800AC2000203042FFF038420010EC
++:10E810002C4200010E001B8DAF8200183C04FFFF4C
++:10E820003C020400348308063442000CAE0219484E
++:10E83000AE03194C3C0560168E0219808CA30000B3
++:10E840003442020000641824AE0219803C02535383
++:10E850001462000334A47C008CA200040050202128
++:10E860008C82007C8C830078AF820010AF83000C18
++:10E870008F55000032A200031040FFFD32A20001BC
++:10E880001040013D32A200028F420128AF42002019
++:10E890008F4201048F430100AF8200000E000F3C45
++:10E8A000AF8300043C0208008C4200C01040000806
++:10E8B0008F8400003C0208008C4200C42442000106
++:10E8C0003C010800AC2200C40A00126900000000EC
++:10E8D0003C020010008210241440010C8F830004BD
++:10E8E0003C0208008C4200203C0308008C63003886
++:10E8F00000008821244200013C010800AC220020D5
++:10E900003C16080026D60038146000022474FFFF6D
++:10E910000000A0219742010E308340003042FFFFEB
++:10E920001060000A245200043C02002000821024DF
++:10E9300050400007308280008F8200042403BFFF14
++:10E94000008318240A0011703442100030828000C5
++:10E950001040000A3C0200200082102410400007F2
++:10E960008F8200043C03FFFF34637FFF0083182481
++:10E9700034428000AF820004AF8300000E000F9885
++:10E980000000000014400007000000009743011E33
++:10E990009742011C3063FFFF00021400006218253B
++:10E9A000AF8300089742010C8F4340003045FFFFC2
++:10E9B0003402FFFF14620003000000000A00118807
++:10E9C000241100208F4240003042010054400001D9
++:10E9D000241100108F840000308210005040001479
++:10E9E00036310001308200201440000B3C02100040
++:10E9F000008210245040000E363100013C030E000E
++:10EA00003C020DFF008318243442FFFF0043102B0B
++:10EA100050400007363100013C0208008C42002CB7
++:10EA2000244200013C010800AC22002C36310005D4
++:10EA30003C0608008CC6003454C000238F850000BB
++:10EA40008F820004304240005440001F8F85000038
++:10EA50003C021F01008210243C0310005443001AA2
++:10EA60008F85000030A20200144000178F8500003F
++:10EA70003250FFFF363100028F4201B80440FFFEE2
++:10EA800000000000AF400180020020210E000F4274
++:10EA9000AF4000208F8300042402BFFFA750019ADB
++:10EAA000006218248F820000A750018EA7510188B0
++:10EAB000A74301A6A74201903C021000AF4201B853
++:10EAC0000A001267000010213C02100000A210246E
++:10EAD0001040003A0000000010C0000F00000000CD
++:10EAE00030A201001040000C3C0302003C020F0069
++:10EAF00000A2102410430008000000008F820008CC
++:10EB000000541024005610219042000424420004B6
++:10EB10000A0011FF000221C00000000000051602DB
++:10EB20003050000F3A0300022E4203EF384200013A
++:10EB30002C63000100621824146000852402000187
++:10EB40003C0308008C6300D02E06000C38620001E4
++:10EB50002C4200010046102414400015001021C072
++:10EB60002602FFFC2C42000454400011000020212A
++:10EB7000386200022C42000100461024504000037D
++:10EB8000000512420A0011FF000020210010182B7E
++:10EB90000043102450400006001021C00000202136
++:10EBA0003245FFFF0E000F633226FFFB001021C02D
++:10EBB0003245FFFF0A001252362600028F42400003
++:10EBC0003C0308008C6300243042010010400046E2
++:10EBD00030620001322200043070000D1440000247
++:10EBE0002413000424130002000512C2384200015D
++:10EBF0002E4303EF3042000138630001004310252B
++:10EC0000104000033231FFFB2402FFFB020280248C
++:10EC100010C000183202000130A20100104000159F
++:10EC2000320200013C020F0000A210243C0302004B
++:10EC30001043000F8F8200082403FFFE020380248C
++:10EC40000054102400561021904200040233302555
++:10EC50002442000412000002000221C03226FFFFFD
++:10EC60000E000F633245FFFF120000390000102133
++:10EC7000320200011040000D3202000424020001A3
++:10EC800012020002023330253226FFFF000020214D
++:10EC90000E000F633245FFFF2402FFFE02028024B4
++:10ECA0001200002B00001021320200041040002846
++:10ECB0002402000124020004120200020233302563
++:10ECC0003226FFFF3245FFFF0E000F6324040100D0
++:10ECD0002402FFFB020280241200001D000010210C
++:10ECE0000A001267240200015040001900001021A0
++:10ECF0003245FFFF36260002000020210E000F6380
++:10ED0000000000000A001267000010212402BFFF6B
++:10ED1000006210241040000800000000240287FF59
++:10ED200000621024144000083C020060008210249D
++:10ED300010400005000000000E000D34000000002F
++:10ED40000A001267000000000E0012C70000000059
++:10ED5000104000063C0240008F4301243C0260202A
++:10ED6000AC430014000000003C024000AF420138F8
++:10ED70000000000032A200021040FEBD00000000B2
++:10ED80008F4201403C044000AF4200208F430148C5
++:10ED90003C02700000621824106400420000000071
++:10EDA0000083102B144000063C0260003C0220004F
++:10EDB000106200073C0240000A0012C3000000007D
++:10EDC0001062003C3C0240000A0012C30000000038
++:10EDD0008F4501408F4601448F42014800021402D2
++:10EDE000304300FF240200041462000A274401801B
++:10EDF0008F4201B80440FFFE2402001CAC850000D5
++:10EE0000A082000B3C021000AF4201B80A0012C3FE
++:10EE10003C0240002402000914620012000616029F
++:10EE2000000229C0AF4500208F4201B80440FFFE18
++:10EE30002402000124030003AF450180A343018B9A
++:10EE4000A740018EA740019AA7400190AF4001A8BA
++:10EE5000A7420188A74201A6AF4001AC3C021000C6
++:10EE6000AF4201B88F4201B80440FFFE000000002D
++:10EE7000AC8500008F42014800021402A482000801
++:10EE800024020002A082000B8F420148A4820010DD
++:10EE90003C021000AC860024AF4201B80A0012C345
++:10EEA0003C0240000E001310000000000A0012C3D4
++:10EEB0003C0240000E001BC2000000003C0240006B
++:10EEC000AF420178000000000A00112F000000008E
++:10EED0008F4201003042003E144000112402000124
++:10EEE000AF4000488F420100304207C0104000058B
++:10EEF00000000000AF40004CAF40005003E00008AD
++:10EF000024020001AF400054AF4000408F42010096
++:10EF10003042380054400001AF4000442402000158
++:10EF200003E00008000000008F4201B80440FFFE2B
++:10EF300024020001AF440180AF400184A74501884D
++:10EF4000A342018A24020002A342018B9742014A94
++:10EF500014C00004A7420190AF4001A40A0012EFC0
++:10EF60003C0210008F420144AF4201A43C02100059
++:10EF7000AF4001A803E00008AF4201B88F4201B8DA
++:10EF80000440FFFE24020002AF440180AF4401842C
++:10EF9000A7450188A342018AA342018B9742014AF7
++:10EFA000A7420190AF4001A48F420144AF4201A8A3
++:10EFB0003C02100003E00008AF4201B83C029000A0
++:10EFC0003442000100822025AF4400208F420020FF
++:10EFD0000440FFFE0000000003E000080000000005
++:10EFE0003C028000344200010082202503E000083A
++:10EFF000AF44002027BDFFE8AFBF0014AFB0001042
++:10F000008F50014093430149934201489344014882
++:10F01000306300FF304200FF00021200006228252A
++:10F020002402001910620076308400802862001AE1
++:10F030001040001C24020020240200081062007707
++:10F04000286200091040000E2402000B2402000177
++:10F0500010620034286200025040000524020006BD
++:10F0600050600034020020210A00139A00000000C2
++:10F0700010620030020020210A00139A00000000F4
++:10F080001062003B2862000C504000022402000E77
++:10F090002402000910620056020020210A00139A7F
++:10F0A0000000000010620056286200211040000F8E
++:10F0B000240200382402001C106200582862001D3F
++:10F0C000104000062402001F2402001B1062004CA6
++:10F0D000000000000A00139A000000001062004ABD
++:10F0E000020020210A00139A00000000106200456F
++:10F0F0002862003910400007240200802462FFCB00
++:10F100002C42000210400045020020210A00139604
++:10F110000000302110620009000000000A00139A6C
++:10F12000000000001480003D020020210A0013901E
++:10F130008FBF00140A001396240600018F4201B805
++:10F140000440FFFE24020002A342018BA745018870
++:10F150009742014AA74201908F420144A74201927F
++:10F160003C021000AF4201B80A00139C8FBF00148C
++:10F170009742014A144000290000000093620005F4
++:10F180003042000414400025000000000E0013026D
++:10F190000200202193620005020020213442000475
++:10F1A0000E00130BA36200059362000530420004B9
++:10F1B00014400002000000000000000D93620000F7
++:10F1C00024030020304200FF14430014000000001C
++:10F1D0008F4201B80440FFFE24020005AF500180B9
++:10F1E000A342018B3C0210000A00139AAF4201B8FF
++:10F1F0008FBF00148FB000100A0012F227BD001854
++:10F200000000000D02002021000030218FBF0014FB
++:10F210008FB000100A0012DD27BD00180000000D9D
++:10F220008FBF00148FB0001003E0000827BD001846
++:10F2300027BDFFE8AFBF00100E000F3C000000002C
++:10F24000AF4001808FBF0010000020210A000FE7AF
++:10F2500027BD00183084FFFF30A5FFFF00001821F4
++:10F260001080000700000000308200011040000202
++:10F2700000042042006518210A0013AB0005284055
++:10F2800003E000080060102110C0000624C6FFFF44
++:10F290008CA2000024A50004AC8200000A0013B573
++:10F2A0002484000403E000080000000010A000080F
++:10F2B00024A3FFFFAC860000000000000000000057
++:10F2C0002402FFFF2463FFFF1462FFFA248400047A
++:10F2D00003E0000800000000308300FF30A500FFBD
++:10F2E00030C600FF274701808F4201B80440FFFE6F
++:10F2F000000000008F42012834634000ACE20000AF
++:10F3000024020001ACE00004A4E30008A0E2000A2B
++:10F3100024020002A0E2000B3C021000A4E5001051
++:10F32000ACE00024ACE00028A4E6001203E00008F2
++:10F33000AF4201B827BDFFE8AFBF00109362003FA6
++:10F3400024030012304200FF1043000D00803021E2
++:10F350008F620044008210230440000A8FBF001017
++:10F360008F620048240400390000282100C21023C5
++:10F3700004410004240600120E0013C9000000001E
++:10F380008FBF00102402000103E0000827BD001811
++:10F3900027BDFFC8AFB20030AFB1002CAFBF003403
++:10F3A000AFB0002890C5000D0080902130A400105F
++:10F3B0001080000B00C088218CC300088F620054AD
++:10F3C0001062000730A20005144000B524040001BB
++:10F3D0000E000D21000020210A0014BB0040202156
++:10F3E00030A200051040000930A30012108000ACCC
++:10F3F000240400018E2300088F620054146200A9C7
++:10F400008FBF00340A00142C240400382402001298
++:10F41000146200A3240400010220202127A500106B
++:10F420000E000CB2AFA000101040001102402021CD
++:10F430008E220008AF620084AF6000400E0013020D
++:10F44000000000009362007D024020213442002031
++:10F450000E00130BA362007D0E000CA902402021B8
++:10F46000240400382405008D0A0014B82406001274
++:10F470009362003E304200081040000F8FA200103F
++:10F4800030420100104000078FA300148F6200601B
++:10F490000062102304430008AF6300600A001441B7
++:10F4A00000000000AF6000609362003E2403FFF79D
++:10F4B00000431024A362003E9362003E30420008E5
++:10F4C000144000022406000300003021936200343F
++:10F4D000936300378F640084304200FF306300FF85
++:10F4E00000661821000318800043282100A4202B67
++:10F4F0001080000B000000009763003C8F620084C6
++:10F500003063FFFF004510230062182B14600004D5
++:10F51000000000008F6200840A00145D0045802313
++:10F520009762003C3050FFFF8FA300103062000450
++:10F5300010400004000628808FA2001C0A001465F9
++:10F540000202102B2E02021850400003240202185F
++:10F550000A00146E020510233063000410600003DB
++:10F56000004510238FA2001C00451023004080217D
++:10F570002C42008054400001241000800E00130231
++:10F580000240202124020001AF62000C9362003E81
++:10F59000001020403042007FA362003E8E22000413
++:10F5A00024420001AF620040A770003C8F6200500F
++:10F5B0009623000E00431021AF6200588F62005066
++:10F5C00000441021AF62005C8E220004AF6200187C
++:10F5D0008E220008AF62001C8FA20010304200088B
++:10F5E0005440000A93A20020A360003693620036C4
++:10F5F0002403FFDFA36200359362003E0043102422
++:10F60000A362003E0A0014988E220008A36200350F
++:10F610008E220008AF62004C8F6200248F6300408E
++:10F6200000431021AF6200489362000024030050A1
++:10F63000304200FF144300122403FF803C02080004
++:10F640008C4231A00242102100431024AF42002816
++:10F650003C0208008C4231A08E2400083C03000CC0
++:10F66000024210213042007F03421021004310214A
++:10F67000AC4400D88E230008AF820014AC4300DCF9
++:10F680000E00130B02402021240400380000282122
++:10F690002406000A0E0013C9000000002404000123
++:10F6A0008FBF00348FB200308FB1002C8FB0002894
++:10F6B0000080102103E0000827BD003827BDFFF8B7
++:10F6C00027420180AFA20000308A00FF8F4201B8BC
++:10F6D0000440FFFE000000008F4601283C020800A5
++:10F6E0008C4231A02403FF80AF86004800C2102165
++:10F6F00000431024AF4200243C0208008C4231A099
++:10F700008FA900008FA8000000C210213042007FA6
++:10F71000034218213C02000A00621821946400D4BC
++:10F720008FA700008FA5000024020002AF83001401
++:10F73000A0A2000B8FA30000354260003084FFFFC1
++:10F74000A4E200083C021000AD260000AD04000455
++:10F75000AC60002427BD0008AF4201B803E00008F8
++:10F76000240200018F88003C938200288F830014BC
++:10F770003C07080024E7779800481023304200FF38
++:10F78000304900FC246500888F860040304A000321
++:10F790001120000900002021248200048CA3000015
++:10F7A000304400FF0089102AACE3000024A50004C7
++:10F7B0001440FFF924E70004114000090000202153
++:10F7C0002482000190A30000304400FF008A102B27
++:10F7D000A0E3000024A500011440FFF924E7000184
++:10F7E00030C20003144000048F85003C3102000346
++:10F7F0001040000D0000000010A0000900002021B2
++:10F800002482000190C30000304400FF0085102BCB
++:10F81000A0E3000024C600011440FFF924E7000122
++:10F8200003E00008000000001100FFFD000020219F
++:10F83000248200048CC30000304400FF0088102B99
++:10F84000ACE3000024C600041440FFF924E70004E0
++:10F8500003E00008000000008F83003C9382002832
++:10F8600030C600FF30A500FF00431023304300FFE7
++:10F870008F820014008038210043102114C0000240
++:10F88000244800880083382130E20003144000053A
++:10F8900030A2000314400003306200031040000D4A
++:10F8A0000000000010A000090000202124820001B7
++:10F8B00090E30000304400FF0085102BA1030000FE
++:10F8C00024E700011440FFF92508000103E00008C7
++:10F8D0000000000010A0FFFD000020212482000491
++:10F8E0008CE30000304400FF0085102BAD030000C6
++:10F8F00024E700041440FFF92508000403E0000891
++:10F90000000000000080482130AAFFFF30C600FF41
++:10F9100030E7FFFF274801808F4201B80440FFFE17
++:10F920008F820048AD0200008F420124AD02000426
++:10F930008D220020A5070008A102000A240200165B
++:10F94000A102000B934301208D2200088D240004A6
++:10F95000306300FF004310219783003A00441021D8
++:10F960008D250024004310233C0308008C6331A044
++:10F970008F840014A502000C246300E82402FFFF1A
++:10F98000A50A000EA5030010A5060012AD0500187B
++:10F99000AD020024948201142403FFF73042FFFFDC
++:10F9A000AD0200288C820118AD02002C3C02100030
++:10F9B000AD000030AF4201B88D220020004310247A
++:10F9C00003E00008AD2200208F82001430E7FFFF23
++:10F9D00000804821904200D330A5FFFF30C600FFD1
++:10F9E0000002110030420F0000E238252748018054
++:10F9F0008F4201B80440FFFE8F820048AD02000034
++:10FA00008F420124AD0200048D220020A5070008CA
++:10FA1000A102000A24020017A102000B9343012057
++:10FA20008D2200088D240004306300FF0043102164
++:10FA30009783003A004410218F8400140043102360
++:10FA40003C0308008C6331A0A502000CA505000E44
++:10FA5000246300E8A5030010A5060012AD00001401
++:10FA60008D220024AD0200188C82005CAD02001CC7
++:10FA70008C820058AD0200202402FFFFAD0200245A
++:10FA8000948200E63042FFFFAD02002894820060BD
++:10FA9000948300BE30427FFF3063FFFF00021200FC
++:10FAA00000431021AD02002C3C021000AD000030DC
++:10FAB000AF4201B8948200BE2403FFF700A21021D8
++:10FAC000A48200BE8D2200200043102403E0000821
++:10FAD000AD220020274301808F4201B80440FFFE81
++:10FAE0008F8200249442001C3042FFFF000211C0AC
++:10FAF000AC62000024020019A062000B3C0210005E
++:10FB0000AC60003003E00008AF4201B88F87002CE2
++:10FB100030C300FF8F4201B80440FFFE8F820048CF
++:10FB200034636000ACA2000093820044A0A20005F0
++:10FB30008CE20010A4A20006A4A300088C8200207E
++:10FB40002403FFF7A0A2000A24020002A0A2000BD7
++:10FB50008CE20000ACA200108CE20004ACA2001405
++:10FB60008CE2001CACA200248CE20020ACA2002895
++:10FB70008CE2002CACA2002C8C820024ACA20018D9
++:10FB80003C021000AF4201B88C82002000431024D8
++:10FB900003E00008AC8200208F86001427BDFFE838
++:10FBA000AFBF0014AFB0001090C20063304200201D
++:10FBB0001040000830A500FF8CC2007C2403FFDF4A
++:10FBC00024420001ACC2007C90C2006300431024B8
++:10FBD000A0C2006310A000238F830014275001806F
++:10FBE000020028210E0015D6240600828F82001400
++:10FBF000904200633042004050400019A38000440E
++:10FC00008F83002C8F4201B80440FFFE8F82004892
++:10FC1000AE02000024026082A60200082402000254
++:10FC2000A202000B8C620008AE0200108C62000C75
++:10FC3000AE0200148C620014AE0200188C62001830
++:10FC4000AE0200248C620024AE0200288C620028E0
++:10FC5000AE02002C3C021000AF4201B8A380004469
++:10FC60008F8300148FBF00148FB000109062006368
++:10FC700027BD00183042007FA06200639782003ADF
++:10FC80008F86003C8F850014938300280046102344
++:10FC9000A782003AA4A000E490A400638F820040F1
++:10FCA000AF83003C2403FFBF0046102100832024C3
++:10FCB000AF820040A0A400638F820014A04000BD6A
++:10FCC0008F82001403E00008A44000BE8F8A001455
++:10FCD00027BDFFE0AFB10014AFB000108F88003C2B
++:10FCE000AFBF00189389001C954200E430D100FF9B
++:10FCF0000109182B0080802130AC00FF3047FFFF46
++:10FD00000000582114600003310600FF012030215B
++:10FD1000010958239783003A0068102B1440003CD7
++:10FD20000000000014680007240200018E02002079
++:10FD30002403FFFB34E7800000431024AE020020C0
++:10FD40002402000134E70880158200053165FFFFB9
++:10FD50000E001554020020210A00169102002021F5
++:10FD60000E001585020020218F8400482743018062
++:10FD70008F4201B80440FFFE24020018AC6400006A
++:10FD8000A062000B8F840014948200E6A46200102D
++:10FD90003C021000AC600030AF4201B894820060B9
++:10FDA00024420001A4820060948200603C030800A9
++:10FDB0008C63318830427FFF5443000F02002021C2
++:10FDC000948200602403800000431024A482006019
++:10FDD0009082006090830060304200FF000211C2F8
++:10FDE00000021027000211C03063007F0062182556
++:10FDF000A083006002002021022028218FBF00186C
++:10FE00008FB100148FB000100A0015F927BD002033
++:10FE1000914200632403FF8000431025A142006348
++:10FE20009782003A3048FFFF110000209383001CA6
++:10FE30008F840014004B1023304600FF948300E4AD
++:10FE40002402EFFF0168282B00621824A48300E439
++:10FE500014A000038E020020010058210000302170
++:10FE60002403FFFB34E7800000431024AE0200208F
++:10FE700024020001158200053165FFFF0E001554B4
++:10FE8000020020210A0016B99783003A0E0015855A
++:10FE9000020020219783003A8F82003CA780003A1D
++:10FEA00000431023AF82003C9383001C8F82001418
++:10FEB0008FBF00188FB100148FB0001027BD002035
++:10FEC00003E00008A04300BD938200442403000126
++:10FED00027BDFFE8004330042C420020AFB00010E3
++:10FEE000AFBF00142410FFFE10400005274501801D
++:10FEF0003C0208008C4231900A0016D600461024BD
++:10FF00003C0208008C423194004610241440000743
++:10FF1000240600848F8300142410FFFF9062006287
++:10FF20003042000F34420040A06200620E0015D63D
++:10FF300000000000020010218FBF00148FB00010DD
++:10FF400003E0000827BD00188F83002427BDFFE0D1
++:10FF5000AFB20018AFB10014AFB00010AFBF001CBB
++:10FF60009062000D00A0902130D100FF3042007F50
++:10FF7000A062000D8F8500148E4300180080802140
++:10FF80008CA2007C146200052402000E90A2006383
++:10FF9000344200200A0016FFA0A200630E0016C51E
++:10FFA000A38200442403FFFF104300472404FFFF03
++:10FFB00052200045000020218E4300003C0200102A
++:10FFC00000621024504000043C020008020020217E
++:10FFD0000A00170E24020015006210245040000988
++:10FFE0008E45000002002021240200140E0016C5D8
++:10FFF000A38200442403FFFF104300332404FFFFC7
++:020000021000EC
++:100000008E4500003C02000200A2102410400016A1
++:100010003C0200048F8600248CC200148CC30010A4
++:100020008CC40014004310230044102B50400005E2
++:10003000020020218E43002C8CC2001010620003AD
++:10004000020020210A00173F240200123C02000493
++:1000500000A210245040001C00002021020020219A
++:100060000A00173F2402001300A2102410400006CB
++:100070008F8300248C620010504000130000202168
++:100080000A001739020020218C6200105040000441
++:100090008E42002C020020210A00173F240200118A
++:1000A00050400009000020210200202124020017F6
++:1000B0000E0016C5A38200442403FFFF1043000274
++:1000C0002404FFFF000020218FBF001C8FB2001806
++:1000D0008FB100148FB000100080102103E00008E1
++:1000E00027BD00208F83001427BDFFD8AFB40020A8
++:1000F000AFB3001CAFB20018AFB10014AFB0001026
++:10010000AFBF0024906200638F91002C2412FFFF88
++:100110003442004092250000A06200638E2200104D
++:100120000080982130B0003F105200060360A021EB
++:100130002402000D0E0016C5A38200441052005484
++:100140002404FFFF8F8300148E2200188C63007C30
++:1001500010430007026020212402000E0E0016C585
++:10016000A38200442403FFFF104300492404FFFF3F
++:1001700024040020120400048F83001490620063A2
++:1001800034420020A06200638F85003410A000205C
++:1001900000000000560400048F8200140260202139
++:1001A0000A0017902402000A9683000A9442006015
++:1001B0003042FFFF144300048F8200202404FFFD1F
++:1001C0000A0017B7AF82003C3C0208008C42318C19
++:1001D0000045102B14400006026020210000282159
++:1001E0000E001646240600010A0017B70000202161
++:1001F0002402002D0E0016C5A38200442403FFFF35
++:10020000104300232404FFFF0A0017B70000202139
++:10021000160400058F8400148E2300142402FFFFAF
++:100220005062001802602021948200602442000184
++:10023000A4820060948200603C0308008C633188D3
++:1002400030427FFF5443000F0260202194820060FF
++:100250002403800000431024A48200609082006088
++:1002600090830060304200FF000211C2000210279C
++:10027000000211C03063007F00621825A083006077
++:10028000026020210E0015F9240500010000202144
++:100290008FBF00248FB400208FB3001C8FB20018D2
++:1002A0008FB100148FB000100080102103E000080F
++:1002B00027BD00288F83001427BDFFE8AFB00010D2
++:1002C000AFBF0014906200638F87002C00808021F4
++:1002D000344200408CE60010A06200633C0308003A
++:1002E0008C6331B030C23FFF0043102B1040004EF2
++:1002F0008F8500302402FF8090A3000D004310245E
++:10030000304200FF504000490200202100061382C5
++:10031000304800032402000255020044020020215C
++:1003200094A2001C8F85001424030023A4A20114AE
++:100330008CE60000000616023042003F1043001019
++:100340003C0300838CE300188CA2007C1062000642
++:100350002402000E0E0016C5A38200442403FFFFF2
++:10036000104300382404FFFF8F8300149062006361
++:1003700034420020A06200630A0017FC8F8300242F
++:1003800000C31024144300078F83002490A200624E
++:100390003042000F34420020A0A20062A38800383F
++:1003A0008F8300249062000D3042007FA062000D18
++:1003B0008F83003410600018020020218F840030E9
++:1003C0008C8200100043102B1040000924020018FA
++:1003D000020020210E0016C5A38200442403FFFF63
++:1003E000104300182404FFFF0A00182400002021F5
++:1003F0008C820010240500010200202100431023FC
++:100400008F830024240600010E001646AC62001003
++:100410000A001824000020210E0015F9240500010F
++:100420000A00182400002021020020212402000DCF
++:100430008FBF00148FB0001027BD00180A0016C52A
++:10044000A38200448FBF00148FB0001000801021E1
++:1004500003E0000827BD001827BDFFC8AFB2002089
++:10046000AFBF0034AFB60030AFB5002CAFB400283A
++:10047000AFB30024AFB1001CAFB000188F46012805
++:100480003C0308008C6331A02402FF80AF86004843
++:1004900000C318213065007F03452821006218241D
++:1004A0003C02000AAF43002400A2282190A200626F
++:1004B00000809021AF850014304200FF000211023D
++:1004C000A382003890A200BC304200021440000217
++:1004D00024030034240300308F820014A3830028F7
++:1004E000938300388C4200C0A3800044AF82003C5C
++:1004F000240200041062031C8F84003C8E4400041C
++:10050000508003198F84003C8E4200103083FFFF1F
++:10051000A784003A106002FFAF8200408F8400146D
++:100520002403FF809082006300621024304200FFA9
++:10053000144002CF9785003A9383003824020002CA
++:1005400030B6FFFF14620005000088219382002866
++:100550002403FFFD0A001B19AF82003C8F82003C80
++:1005600002C2102B144002A18F8400400E0014EC34
++:1005700000000000938300283C040800248477983E
++:10058000240200341462002EAF84002C3C0A0800C0
++:100590008D4A77C82402FFFFAFA2001000803821E7
++:1005A0002405002F3C09080025297398240800FF22
++:1005B0002406FFFF90E2000024A3FFFF00062202B2
++:1005C00000C21026304200FF0002108000491021B6
++:1005D0008C420000306500FF24E7000114A8FFF5FD
++:1005E0000082302600061027AFA20014AFA2001030
++:1005F0000000282127A7001027A6001400C51023FB
++:100600009044000324A2000100A71821304500FFF8
++:100610002CA200041440FFF9A06400008FA2001077
++:100620001142000724020005024020210E0016C5D9
++:10063000A38200442403FFFF104300642404FFFF4F
++:100640003C0208009042779C104000098F82001401
++:10065000024020212402000C0E0016C5A382004493
++:100660002403FFFF104300592404FFFF8F8200146E
++:10067000A380001C3C0308008C63779C8C440080A2
++:100680003C0200FF3442FFFF006218240083202B4D
++:1006900010800008AF83003402402021240200199A
++:1006A0000E0016C5A38200442403FFFF1043004739
++:1006B0002404FFFF8F87003C9782003A8F85003427
++:1006C000AF8700200047202310A0003BA784003AFA
++:1006D0008F86001430A200030002102390C300BCD8
++:1006E0003050000300B0282100031882307300014D
++:1006F0000013108000A228213C0308008C6331A065
++:100700008F8200483084FFFF0085202B004310219A
++:1007100010800011244200888F84002C1082000E6B
++:100720003C033F013C0208008C42779800431024B0
++:100730003C0325001443000630E500FF8C820000D6
++:10074000ACC200888C8200100A0018E9ACC2009884
++:100750000E001529000030219382001C8F850014A3
++:100760008F830040020238218F82003CA387001C47
++:1007700094A400E4006218218F82003434841000B5
++:10078000AF83004000503021A4A400E41260000EAA
++:10079000AF86003C24E20004A382001C94A200E483
++:1007A00024C30004AF83003C34422000A4A200E430
++:1007B0000A001906000020218F820040AF80003C13
++:1007C00000471021AF820040000020212414FFFFC9
++:1007D000109402112403FFFF3C0808008D0877A83D
++:1007E0003C0208008C4231B03C03080090637798CB
++:1007F00031043FFF0082102B1040001B3067003F88
++:100800003C0208008C4231A88F83004800042180FC
++:1008100000621821006418213062007F0342282101
++:100820003C02000C00A228213C020080344200015E
++:100830003066007800C230252402FF800062102458
++:10084000AF42002830640007AF4208048F820014D2
++:100850000344202124840940AF460814AF850024B6
++:10086000AF840030AC4301189383003824020003A6
++:10087000146201CF240200012402002610E201D1FB
++:1008800028E2002710400013240200322402002234
++:1008900010E201CC28E200231040000824020024CA
++:1008A0002402002010E201B82402002110E20147D6
++:1008B000024020210A001AFB2402000B10E201C1B1
++:1008C0002402002510E20010024020210A001AFB39
++:1008D0002402000B10E201AE28E2003310400006B3
++:1008E0002402003F2402003110E2009A024020213D
++:1008F0000A001AFB2402000B10E201A5024020218D
++:100900000A001AFB2402000B8F90002C3C03080005
++:100910008C6331B08F8500308E0400100000A82158
++:100920008CB3001430823FFF0043102B8CB10020A9
++:100930005040018F0240202190A3000D2402FF802F
++:1009400000431024304200FF504001890240202122
++:10095000000413823042000314400185024020212C
++:1009600094A3001C8F8200148E040028A443011459
++:100970008CA20010026218231064000302402021A0
++:100980000A00197C2402001F8F82003400621021AB
++:100990000262102B104000088F83002402402021A7
++:1009A000240200180E0016C5A382004410540174DE
++:1009B0002404FFFF8F8300248F8400348C62001096
++:1009C0000224882100441023AC6200108F8200149E
++:1009D000AC7100208C4200680051102B10400009BF
++:1009E0008F830030024020212402001D0E0016C516
++:1009F000A38200442403FFFF104301612404FFFF8E
++:100A00008F8300308E0200248C6300241043000783
++:100A1000024020212402001C0E0016C5A3820044BF
++:100A20002403FFFF104301562404FFFF8F8400249A
++:100A30008C82002424420001AC8200241233000482
++:100A40008F8200148C4200685622000E8E02000035
++:100A50008E0200003C030080004310241440000D6F
++:100A60002402001A024020210E0016C5A382004471
++:100A70002403FFFF104301422404FFFF0A0019BAB8
++:100A80008E0200143C0300800043102450400003F9
++:100A90008E020014AC8000208E0200142411FFFF8F
++:100AA0001051000E3C0308003C0208008C423190BB
++:100AB000004310242403001B14400007A3830044B8
++:100AC0000E0016C5024020211051012D2404FFFF05
++:100AD0000A0019CB8E030000A38000448E0300009F
++:100AE0003C02000100621024104000123C02008011
++:100AF0000062102414400008024020212402001A41
++:100B00000E0016C5A38200442403FFFF1043011CFE
++:100B10002404FFFF02402021020028210E0016E5D8
++:100B2000240600012403FFFF104301152404FFFFE6
++:100B3000241500018F83002402A0302102402021CF
++:100B40009462003624050001244200010A001ADFE5
++:100B5000A46200368F90002C3C0308008C6331B0F7
++:100B60008E13001032623FFF0043102B10400089AB
++:100B70008F8400302402FF809083000D00431024F6
++:100B8000304200FF104000842402000D0013138245
++:100B900030420003240300011443007F2402000DAF
++:100BA0009082000D30420008544000048F820034CF
++:100BB000024020210A001A102402002450400004A0
++:100BC0008E03000C024020210A001A102402002784
++:100BD0008C82002054620006024020218E0300080F
++:100BE0008C820024506200098E02001402402021F1
++:100BF000240200200E0016C5A38200441054007188
++:100C00002403FFFF0A001A458F8400242411FFFFEC
++:100C1000145100048F860014024020210A001A405B
++:100C2000240200258E0300188CC2007C1062000391
++:100C30002402000E0A001A40024020218E030024E4
++:100C40008C82002810620003240200210A001A404E
++:100C5000024020218E0500288C82002C10A2000367
++:100C60002402001F0A001A40024020218E03002C9B
++:100C700014600003240200230A001A4002402021CD
++:100C80008CC200680043102B104000032402002691
++:100C90000A001A40024020218C82001400651821AD
++:100CA0000043102B104000088F84002402402021B4
++:100CB000240200220E0016C5A382004410510041F8
++:100CC0002403FFFF8F8400242403FFF79082000D8C
++:100CD00000431024A082000D8F8600143C030800FE
++:100CE0008C6331AC8F82004894C400E08F8500246F
++:100CF0000043102130847FFF000420400044102175
++:100D00003043007F034320213C03000E0083202159
++:100D10002403FF8000431024AF42002CA493000062
++:100D20008CA2002824420001ACA200288CA2002C36
++:100D30008E03002C00431021ACA2002C8E02002C4C
++:100D4000ACA200308E020014ACA2003494A2003A8F
++:100D500024420001A4A2003A94C600E03C0208002C
++:100D60008C4231B024C4000130837FFF1462000F35
++:100D700000803021240280000082302430C2FFFF36
++:100D8000000213C2304200FF000210270A001A7E40
++:100D9000000233C02402000D024020210E0016C5BF
++:100DA000A38200440A001A84004018218F82001494
++:100DB00002402021240500010E0015F9A44600E0A0
++:100DC000000018210A001B16006088218F90002C5B
++:100DD0003C0308008C6331B08E05001030A23FFF49
++:100DE0000043102B104000612402FF808F840030EC
++:100DF0009083000D00431024304200FF5040005CFF
++:100E0000024020218F8200341040000B0005138225
++:100E10008F8200149763000A944200603042FFFF03
++:100E200014430005000513828F8200202404FFFD77
++:100E30000A001AF3AF82003C304200031440000E57
++:100E40000000000092020002104000058E03002402
++:100E500050600015920300030A001AAF02402021DF
++:100E60008C82002450620010920300030240202173
++:100E70000A001AB72402000F9082000D30420008C9
++:100E80005440000992030003024020212402001074
++:100E90000E0016C5A38200442403FFFF1043003850
++:100EA0002404FFFF92030003240200025462000C9A
++:100EB000920200038F820034544000099202000322
++:100EC000024020212402002C0E0016C5A3820044FB
++:100ED0002403FFFF1043002A2404FFFF92020003B3
++:100EE0000200282102402021384600102CC60001B3
++:100EF0002C4200010E0016E5004630252410FFFFAD
++:100F00001050001F2404FFFF8F8300341060001373
++:100F1000024020213C0208008C42318C0043102BFF
++:100F200014400007000000000000282124060001F2
++:100F30000E001646000000000A001AF300002021EF
++:100F40002402002D0E0016C5A38200441050000C90
++:100F50002404FFFF0A001AF3000020210E0015F9F7
++:100F6000240500010A001AF300002021024020217C
++:100F70002402000D0E0016C5A3820044004020216B
++:100F80000A001B16008088211514000E00000000C6
++:100F90000E00174C024020210A001B160040882139
++:100FA0000E0016C5A38200440A001B1600408821CB
++:100FB00014620017022018212402002314E2000505
++:100FC0002402000B0E0017C0024020210A001B164D
++:100FD0000040882102402021A38200440E0016C553
++:100FE0002411FFFF0A001B170220182130A500FF63
++:100FF0000E001529240600019783003A8F82003CD9
++:10100000A780003A00431023AF82003C0220182141
++:101010001220003E9782003A2402FFFD5462003EF7
++:101020008E4300208E4200048F830014005610234C
++:10103000AE420004906200633042007FA062006311
++:101040008E4200208F840014A780003A34420002B0
++:10105000AE420020A48000E4908200632403FFBF1E
++:1010600000431024A08200630A001B598E43002015
++:101070009082006300621024304200FF1040002381
++:101080009782003A90820088908300BD2485008872
++:101090003042003F2444FFE02C820020A383001C48
++:1010A00010400019AF85002C2402000100821804B2
++:1010B000306200191440000C3C02800034420002EF
++:1010C000006210241440000B306200201040000F1A
++:1010D0009782003A90A600010240202124050001D9
++:1010E0000A001B5330C60001024020210A001B5297
++:1010F00024050001024020210000282124060001CF
++:101100000E001646000000009782003A1440FD04CD
++:101110008F8400148E4300203062000410400012BF
++:101120008F84003C2402FFFB00621024AE420020AA
++:10113000274301808F4201B80440FFFE8F820048A0
++:10114000AC6200008F420124AC6200042402608380
++:10115000A462000824020002A062000B3C021000FE
++:10116000AF4201B88F84003C8F8300148FBF0034DE
++:101170008FB600308FB5002C8FB400288FB30024B9
++:101180008FB200208FB1001C8FB000182402000124
++:1011900027BD003803E00008AC6400C030A500FFA4
++:1011A0002403000124A900010069102B1040000C49
++:1011B00000004021240A000100A31023004A380443
++:1011C00024630001308200010069302B10400002CE
++:1011D000000420420107402554C0FFF800A310235B
++:1011E00003E00008010010213C020800244260A432
++:1011F0003C010800AC22738C3C02080024425308D6
++:101200003C010800AC2273902402000627BDFFE0D9
++:101210003C010800A02273943C021EDCAFB200180F
++:10122000AFB10014AFBF001CAFB0001034526F411B
++:1012300000008821240500080E001B7A02202021CE
++:10124000001180803C07080024E773980002160014
++:1012500002071821AC6200000000282124A200012E
++:101260003045FFFF8C6200002CA6000804410002FC
++:10127000000220400092202614C0FFF8AC64000059
++:10128000020780218E0400000E001B7A2405002036
++:10129000262300013071FFFF2E2301001460FFE5BB
++:1012A000AE0200008FBF001C8FB200188FB1001477
++:1012B0008FB0001003E0000827BD002027BDFFD835
++:1012C000AFB3001CAFB20018AFBF0020AFB1001425
++:1012D000AFB000108F5101408F48014800089402C0
++:1012E000324300FF311300FF8F4201B80440FFFE7C
++:1012F00027500180AE1100008F420144AE0200046D
++:1013000024020002A6120008A202000B240200140C
++:10131000AE1300241062002528620015104000085A
++:101320002402001524020010106200302402001272
++:10133000106200098FBF00200A001CB58FB3001C8B
++:101340001062007024020022106200378FBF00205C
++:101350000A001CB58FB3001C3C0208008C4231A06F
++:101360002403FF800222102100431024AF420024F6
++:101370003C0208008C4231A0022210213042007F42
++:10138000034218213C02000A00621821166000BCCA
++:10139000AF830014906200623042000F344200308C
++:1013A000A06200620A001CB48FBF00203C046000F1
++:1013B0008C832C083C02F0033442FFFF00621824A7
++:1013C000AC832C083C0208008C4231A08C832C0892
++:1013D000244200740002108200021480006218256A
++:1013E000AC832C080A001CB48FBF00203C0208000C
++:1013F0008C4231A02403FF800222102100431024DC
++:10140000AF4200243C0208008C4231A03C03000A99
++:10141000022210213042007F03421021004310219C
++:101420000A001CB3AF8200143C0208008C4231A0B9
++:101430002405FF800222102100451024AF42002421
++:101440003C0208008C4231A0022210213042007F71
++:10145000034218213C02000A0062182190620063D6
++:1014600000A21024304200FF10400085AF8300141A
++:1014700024620088944300123C0208008C4231A888
++:1014800030633FFF00031980022210210043102126
++:101490003043007F03432021004510243C03000C0F
++:1014A00000832021AF4200289082000D00A210246A
++:1014B000304200FF10400072AF8400249082000D83
++:1014C000304200101440006F8FBF00200E0015C87E
++:1014D000000000008F4201B80440FFFE0000000041
++:1014E000AE1100008F420144AE020004240200024B
++:1014F000A6120008A202000BAE1300240A001CB4BE
++:101500008FBF00202406FF8002261024AF42002057
++:101510003C0208008C4231A031043FFF00042180CE
++:101520000222102100461024AF4200243C03080090
++:101530008C6331A83C0208008C4231A03227007F26
++:101540000223182102221021006418213042007F5A
++:101550003064007F034228213C02000A0066182400
++:1015600000A22821034420213C02000C00822021FB
++:10157000AF4300283C020008034718210062902175
++:10158000AF850014AF8400240E0015C8010080212F
++:101590008F4201B80440FFFE8F8200248F84001424
++:1015A000274501809042000DACB10000A4B00006B8
++:1015B000000216000002160300021027000237C2C4
++:1015C00014C00016248200889442001232033FFFA8
++:1015D00030423FFF14430012240260829083006374
++:1015E0002402FF8000431024304200FF5040000CD2
++:1015F00024026082908200623042000F3442004038
++:10160000A082006224026084A4A200082402000DCB
++:10161000A0A200050A001C9E3C0227002402608252
++:10162000A4A20008A0A000053C02270000061C00A0
++:101630000062182524020002A0A2000BACA3001037
++:10164000ACA00014ACA00024ACA00028ACA0002CDE
++:101650008E42004C8F840024ACA200189083000DB1
++:101660002402FF8000431024304200FF1040000598
++:101670008FBF00209082000D3042007FA082000DBD
++:101680008FBF00208FB3001C8FB200188FB10014E1
++:101690008FB000103C02100027BD002803E00008B6
++:1016A000AF4201B80800343008003430080033A8D5
++:1016B000080033E0080034140800343808003438D7
++:1016C00008003438080033180A0001240000000024
++:1016D000000000000000000D747061362E322E33C1
++:1016E00000000000060203010000000000000000EE
++:1016F00000000000000000000000000000000000EA
++:1017000000000000000000000000000000000000D9
++:1017100000000000000000000000000000000000C9
++:1017200000000000000000000000000000000000B9
++:1017300000000000000000000000000000000000A9
++:101740000000000000000000000000000000000099
++:101750000000000000000000000000001000000376
++:10176000000000000000000D0000000D3C02080019
++:1017700024421C003C03080024632094AC40000079
++:101780000043202B1480FFFD244200043C1D080070
++:1017900037BD2FFC03A0F0213C1008002610049058
++:1017A0003C1C0800279C1C000E00015C000000008F
++:1017B0000000000D3084FFFF308200078F85001885
++:1017C00010400002248300073064FFF800853021B8
++:1017D00030C41FFF03441821247B4000AF85001C48
++:1017E000AF84001803E00008AF4400843084FFFF9A
++:1017F000308200078F8500208F860028104000026D
++:10180000248300073064FFF8008520210086182B10
++:1018100014600002AF8500240086202303442821A1
++:1018200034068000AF840020AF44008000A6202151
++:1018300003E00008AF84003827BDFFD8AFB3001C19
++:10184000AFB20018AFB00010AFBF0024AFB400209B
++:10185000AFB100143C0860088D1450002418FF7FBD
++:101860003C1A8000029898243672380CAD12500051
++:101870008F5100083C07601C3C08600036300001B6
++:10188000AF500008AF800018AF400080AF40008428
++:101890008CE600088D0F08083C0760168CEC0000F1
++:1018A00031EEFFF039CA00103C0DFFFF340B800011
++:1018B0003C030080034B48212D440001018D282466
++:1018C0003C0253533C010800AC230420AF8900388C
++:1018D000AF860028AF840010275B400014A20003ED
++:1018E00034E37C008CF90004032818218C7F007CF1
++:1018F0008C6500783C0280003C0B08008D6B048CEA
++:101900003C0A08008D4A048834520070AF85003CC0
++:10191000AF9F00403C13080026731C440240A021E6
++:101920008E4800008F46000038C30001306400017B
++:1019300010800017AF880034028048218D2F0000EE
++:101940003C0508008CA5045C3C1808008F1804585E
++:1019500001E8102300A280210000C8210202402BD0
++:1019600003198821022838213C010800AC30045CAE
++:101970003C010800AC2704588F4E000039CD00010F
++:1019800031AC00011580FFED01E04021AF8F003444
++:101990008E5100003C0708008CE7045C3C0D0800F9
++:1019A0008DAD04580228802300F0602100007021D2
++:1019B0000190302B01AE1821006620213C01080067
++:1019C000AC2C045C3C010800AC2404588F46010890
++:1019D0008F47010030C92000AF860000AF87000CA0
++:1019E0001120000A00C040213C1808008F18042C68
++:1019F000270800013C010800AC28042C3C184000DA
++:101A0000AF5801380A000196000000009749010410
++:101A100000002821014550213122FFFF0162582199
++:101A20000162F82B015F502130D902003C0108000F
++:101A3000AC2B048C3C010800AC2A0488172000154C
++:101A400024040F0010E400130000000024080D001F
++:101A500010E8023B30CD000611A0FFE93C18400021
++:101A6000936E00002409001031C400F01089027147
++:101A700024020070108202E58F880014250F0001F7
++:101A8000AF8F00143C184000AF5801380A0001968F
++:101A900000000000974C01041180FFD93C18400061
++:101AA00030C34000146000A1000000008F460178A0
++:101AB00004C0FFFE8F87003824100800240F0008A0
++:101AC0008CE30008AF500178A74F0140A7400142C6
++:101AD000974E01048F86000031C9FFFF30CD000111
++:101AE00011A002E1012040212531FFFE241800024F
++:101AF000A75801463228FFFFA75101483C190800AA
++:101B00008F39043C172002D08F8C000C30DF00206E
++:101B100017E00002240400092404000130C20C0074
++:101B2000240504005045000134840004A744014A00
++:101B30003C1108008E3104203C1800483C10000184
++:101B40000238182530CF00020070282511E000046B
++:101B5000000018213C19010000B9282524030001C8
++:101B600030DF000453E00005AF8300083C0600109E
++:101B700000A6282524030001AF830008AF4510000C
++:101B80000000000000000000000000000000000055
++:101B90008F83000810600023000000008F451000B4
++:101BA00004A1FFFE000000001060001E0000000005
++:101BB0008F4410003C0C0020008C102410400019B1
++:101BC0008F8E000031CD000211A000160000000031
++:101BD000974F101415E000130000000097591008EB
++:101BE0003338FFFF271100060011188200033080F0
++:101BF00000C7282132300001322300031200032CD9
++:101C00008CA200000000000D00C7F821AFE2000028
++:101C10003C0508008CA5043024A600013C01080006
++:101C2000AC2604308F6D00003402FFFFAF8D00043E
++:101C30008CEC0000118202A6000020218CED000037
++:101C400031AC01001180028A000000003C02080053
++:101C50008C4204743C0308008C63044C3C1F080055
++:101C60008FFF04703C1808008F1804480048382182
++:101C70000068802100E8282B03E430210208402B73
++:101C80000304882100C57021022878213C01080046
++:101C9000AC30044C3C010800AC2F04483C01080067
++:101CA000AC2704743C010800AC2E04708F8400182B
++:101CB0000120302131290007249F000833F91FFF3C
++:101CC00003594021AF84001CAF990018251B400028
++:101CD000AF590084112000038F83002024C2000725
++:101CE0003046FFF88F84002800C3282100A4302B41
++:101CF00014C00002AF83002400A428230345602100
++:101D0000340D8000018D10213C0F1000AF850020A4
++:101D1000AF820038AF450080AF4F01788F88001444
++:101D2000250F00010A0001EFAF8F00148F62000839
++:101D30008F670000240500300007760231C300F0F1
++:101D4000106500A7240F0040546FFF4C8F880014CB
++:101D50008F4B01780560FFFE0000000030CA0200D2
++:101D600015400003000612820000000D00061282DA
++:101D7000304D0003000D4900012D18210003808023
++:101D8000020D402100086080019380218E1F000019
++:101D900017E00002000000000000000D8F6E00043C
++:101DA00005C202BD92070006920E000592020004D1
++:101DB0003C090001000E18800070F8218FED00181A
++:101DC000277100082448000501A96021000830821D
++:101DD000AFEC0018022020210E00059E26050014FD
++:101DE000920A00068F7900043C0B7FFF000A2080D6
++:101DF000009178218DF800043566FFFF0326282422
++:101E000003053821ADE70004920E0005920D000491
++:101E1000960C0008000E10800051C8218F2300008E
++:101E2000974901043C07FFFF006758243128FFFF52
++:101E3000010DF82103EC50233144FFFF01643025EC
++:101E4000AF260000920300072418000110780275E5
++:101E5000240F0003106F0285000000008E050010A3
++:101E60002419000AA7590140A7450142921800040D
++:101E70008F860000240F0001A7580144A7400146A7
++:101E80009747010430D100023C050041A7470148B3
++:101E900000001821A74F014A1220000330CB000494
++:101EA0003C0501412403000151600005AF83000897
++:101EB0003C06001000A6282524030001AF8300087B
++:101EC000AF4510000000000000000000000000000E
++:101ED000000000008F8A000811400004000000008C
++:101EE0008F4410000481FFFE000000008F6B000093
++:101EF000920800043C1108008E310444AF8B0004AA
++:101F000097590104311800FF3C0E08008DCE0440A3
++:101F10003325FFFF0305382102276021000010212F
++:101F2000250F000A31E8FFFF0187482B01C2682115
++:101F300001A9F821311000073C010800AC2C044431
++:101F40003C010800AC3F0440120000038F8C0018D5
++:101F50002506000730C8FFF8010C682131BF1FFFBC
++:101F6000AF8C001CAF9F0018AF5F00849744010442
++:101F7000035F80213084FFFF308A00071140000397
++:101F8000261B4000248900073124FFF88F8200209F
++:101F90008F850028008220210085702B15C000024B
++:101FA000AF820024008520233C0B08008D6B048C3D
++:101FB0003C0A08008D4A04880344882134038000C9
++:101FC000022310213C0F1000AF840020AF820038A4
++:101FD000AF440080AF4F01780A0002968F8800144A
++:101FE0008F5001780600FFFE30D10200162000035A
++:101FF000000612820000000D00061282305F00030E
++:10200000001F1900007F302100062080009FC8219A
++:1020100000194880013380218E180000130000024F
++:10202000000000000000000D8F6C000C058001FB1B
++:102030008F870038240E0001AE0E00008CE30008EC
++:10204000A20000078F65000400055402314D00FF17
++:1020500025A80005000830822CCB00411560000245
++:10206000A20A00040000000D8F7800043C03FFFF6B
++:1020700000E02821330BFFFF256C000B000C1082C1
++:1020800000022080008748218D3F000026040014B4
++:10209000A618000803E3C8240E00059EAD39000011
++:1020A0008F4F01083C11100001F1382410E001AB02
++:1020B00000000000974D01049208000725AAFFECDC
++:1020C000350600023144FFFFA2060007960600080D
++:1020D0002CC7001354E0000592030007921100077B
++:1020E000362F0001A20F00079203000724180001F9
++:1020F000107801C224090003106901D58F880038C7
++:1021000030CBFFFF257100020011788331E400FF1E
++:1021100000042880A20F000500A848218D2D000092
++:10212000974A01043C0EFFFF01AEF8243143FFFF44
++:10213000006B1023244CFFFE03ECC825AD390000D2
++:10214000920600053C03FFF63462FFFF30D800FF23
++:102150000018388000F08821922F00143C04FF7F83
++:102160003487FFFF31EE000F01C65821316500FFB3
++:1021700000055080015068218DAC00200148F821F5
++:10218000A20B00060182C824AE0C000CAFF9000CB3
++:10219000920900068E11000C032778240009C080E4
++:1021A0000310702195C60026030828210227202449
++:1021B000AE04000CADCF0020ADC60024ACA60010CC
++:1021C0008F8800003C0B08008D6B048C3C0A0800D3
++:1021D0008D4A0488241F001024190002A75F0140C3
++:1021E000A7400142A7400144A7590146974901046D
++:1021F00024070001310600022538FFFEA7580148D8
++:102200003C050009A747014A10C00003000018213F
++:102210003C05010924030001310C00045180000534
++:10222000AF8300083C08001000A828252403000103
++:10223000AF830008AF451000000000000000000060
++:1022400000000000000000009205000424AE00021F
++:1022500031CD0007000D182330620007AE020010D8
++:102260008F90000812000004000000008F4F100043
++:1022700005E1FFFE000000008F7100008F8E001846
++:102280003C0308008C630444AF91000497450104AB
++:1022900025CF001031E61FFF30A2FFFFAF8E001CDC
++:1022A000AF860018AF4600842449FFFE3C0C0800AE
++:1022B0008D8C0440974D010401208021000947C303
++:1022C0000070C02131A9FFFF0310F82B0188C8213D
++:1022D000033F202103463821313100073C0108002B
++:1022E000AC3804443C010800AC2404401220000334
++:1022F00024FB40002527000730E9FFF88F860020E7
++:102300008F8400280126382100E4C02B170000022A
++:10231000AF86002400E438230347202134198000CD
++:10232000009910213C0F1000AF870020AF820038C9
++:10233000AF470080AF4F01780A0002968F880014E3
++:102340009747010410E0FDAE3C1840008F5801781B
++:102350000700FFFE30C5400010A000033C1F00082E
++:102360000000000D3C1F0008AF5F01402410080072
++:102370008F860000AF5001789744010430D90001E6
++:10238000132000ED3086FFFF24CCFFFE240D000259
++:10239000A74D0146A74C01488F9100182408000D55
++:1023A000A748014A8F630000262F000831E21FFF73
++:1023B0000342702130C90007AF830004AF91001CB5
++:1023C000AF82001800C03821AF4200841120000302
++:1023D00025DB400024D800073307FFF88F85002055
++:1023E0008F84002800E5302100C4382B14E000025F
++:1023F000AF85002400C430238F8400140346F821E5
++:10240000340C8000AF86002003EC8021AF460080B2
++:10241000249900013C0610003C184000AF460178AA
++:10242000AF900038AF990014AF5801380A000196F8
++:10243000000000008F630000975101043067FFFF28
++:102440003228FFFF8F4F017805E0FFFE30EC0007D8
++:10245000000CF82333F0000724F9FFFE2404000ADF
++:10246000A7440140A7500142A7590144A740014693
++:10247000A74801488F45010830B800201700000226
++:10248000240300092403000130CD0002A743014AC0
++:102490003C04004111A00003000018213C0401414C
++:1024A0002403000130C9000451200005AF83000857
++:1024B0003C0600100086202524030001AF8300089D
++:1024C000AF44100000000000000000000000000009
++:1024D000000000008F8E000811C000040000000002
++:1024E0008F4210000441FFFE000000008F7F0000BB
++:1024F000276400088F91003CAF9F0004948500087A
++:102500009490000A9499000C30AFFFFF0010C400B3
++:102510003323FFFF11F100A6030320253C0E080022
++:102520008DCE04443C0C08008D8C044000E88821CA
++:102530002626FFFE01C628210000682100A6F82BF0
++:10254000018D2021009F80213C010800AC2504441E
++:102550003C010800AC30044024E200083042FFFF98
++:102560003047000710E000038F830018244F000756
++:1025700031E2FFF83106FFFF30C800070043802139
++:1025800032191FFF0359C021AF83001CAF990018F7
++:10259000271B4000AF590084110000038F8C0020DE
++:1025A00024C5000730A6FFF88F84002800CC28211E
++:1025B00000A4F82B17E00002AF8C002400A428230D
++:1025C000AF850020AF4500803C0408008C840434B3
++:1025D00003454821340E8000012E6821108000053B
++:1025E000AF8D0038939100172406000E12260011BB
++:1025F0002407043F3C021000AF4201788F8800148A
++:10260000250F00010A0001EFAF8F00140E0005C472
++:1026100000E020218F8800143C0B08008D6B048C97
++:102620003C0A08008D4A0488250F00010A0001EFCA
++:10263000AF8F00143C021000A7470148AF42017859
++:102640000A0004CE8F88001424040F001184003D7A
++:1026500030CE002015C0000224030009240300012D
++:102660000A00021AA743014A0A00020DA7400146C8
++:1026700094EF000894F1000A94F0000C8F8C003C59
++:10268000001174003207FFFF31EDFFFF11AC00377E
++:1026900001C720253C1808008F1804443C0F08008F
++:1026A0008DEF0440000080210308682101A8382B29
++:1026B00001F0702101C760213C010800AC2D0444E9
++:1026C0003C010800AC2C04400A00027A8F840018F8
++:1026D0003C0208008C42047C3C0308008C630454D8
++:1026E0003C1F08008FFF04783C1808008F18045026
++:1026F000004838210068802100E8282B03E43021BD
++:102700000208402B0304882100C57021022878218B
++:102710003C010800AC3004543C010800AC2F0450CC
++:102720003C010800AC27047C3C010800AC2E047876
++:102730000A00027A8F840018A74001460A00043577
++:102740008F91001830CD002015A0FFC52403000D87
++:10275000240300050A00021AA743014A974E010408
++:1027600025C5FFF00A00038130A4FFFF8F980040C9
++:102770001498FFC8000010213C0508008CA5046CCB
++:102780003C1F08008FFF046800A8C8210328302BD5
++:1027900003E22021008640213C010800AC39046C92
++:1027A0003C010800AC2804680A00027A8F840018F3
++:1027B0008F8C0040148CFF5900E8C8213C18080099
++:1027C0008F18046C3C1108008E3104682723FFFE2B
++:1027D00003034821000010210123302B0222702125
++:1027E00001C668213C010800AC29046C3C010800CA
++:1027F000AC2D04680A0004A524E200088F88003884
++:102800003C03FFFF8D02000C0043F82403E4C825BD
++:10281000AD19000C0A00038F30CBFFFF0A0003C381
++:10282000AE000000974A0104920400048E26000CBA
++:10283000014458212579FFF200C7C0243325FFFF4A
++:1028400003053825AE27000C0A0002E68E050010AD
++:102850003C0DFFFF8D0A0010014D582401646025D6
++:10286000AD0C00100A00038F30CBFFFF974301042B
++:10287000920E00048E290010006E1021244DFFEEF0
++:102880000127602431A8FFFF0188F825AE3F001022
++:102890000A0002E68E0500108E0F000CAE0000004C
++:1028A00000078880023028210A0002B8ACAF00205F
++:1028B0001460000D3058FFFF3C04FFFF0044682403
++:1028C00001A47026000E602B000D102B004CF82484
++:1028D00013E00002000000000000000D8CAF0000BB
++:1028E0000A00025001E410253B03FFFF0003882B80
++:1028F0000018802B0211202410800002000000002C
++:102900000000000D8CB900000A0002503722FFFFC2
++:102910003084FFFF30A5FFFF108000070000182162
++:10292000308200011040000200042042006518219E
++:102930001480FFFB0005284003E000080060102120
++:1029400010C00007000000008CA2000024C6FFFF9A
++:1029500024A50004AC82000014C0FFFB2484000402
++:1029600003E000080000000010A0000824A3FFFFFF
++:10297000AC86000000000000000000002402FFFF01
++:102980002463FFFF1462FFFA2484000403E00008BC
++:1029900000000000308EFFFF30D8FFFF00057C00F4
++:1029A00001F8602539CDFFFF01AC5021014C582BB7
++:1029B000014B4821000944023127FFFF00E8302184
++:1029C0000006240230C5FFFF00A418213862FFFF73
++:1029D00003E000083042FFFF3C0C08008D8C0484AB
++:1029E000240BFF8027BDFFD001845021014B4824D8
++:1029F000AF4900203C0808008D080484AFB20020D5
++:102A0000AFB00018AFBF0028AFB30024AFB1001CB7
++:102A1000936600040104382130E4007F009A1021FD
++:102A20003C0300080043902130C500200360802152
++:102A30003C080111277B000814A000022646007004
++:102A40002646006C9213000497510104920F000473
++:102A50003267000F322EFFFF31ED004001C72823FF
++:102A600011A0000500004821925900BC3338000431
++:102A70001700009000000000924300BC307F00046B
++:102A800013E0000F0000000010A0000D0000000087
++:102A9000960E0002240AFF8000A7602125CDFFFECC
++:102AA000A74D1016920B0004014B2024308200FF2A
++:102AB00010400085010C40253C0F0400010F40250B
++:102AC0008F5301780660FFFE2404000AA7440140EA
++:102AD000960D00022404000931AC0007000C5823B5
++:102AE000316A0007A74A0142960200022443FFFE12
++:102AF000A7430144A7400146975F0104A75F01482F
++:102B00008F590108333800205300000124040001CC
++:102B1000920F000431EE001015C000023483001043
++:102B200000801821A743014A0000000000000000B7
++:102B30000000000000000000AF481000000000008E
++:102B40000000000000000000000000008F51100095
++:102B50000621FFFE3113FFFF12600003000000009A
++:102B60008F481018ACC8000096030006307FFFFFA6
++:102B700027F900020019988200138880023B302157
++:102B80008CD800001520005700183402920300046E
++:102B90002405FF8000A3F82433F100FF1220002C4D
++:102BA00000000000924700BC30F2000212400028F2
++:102BB00000000000974B100C2562FFFEA742101684
++:102BC000000000003C0A040035490030AF49100005
++:102BD00000000000000000000000000000000000F5
++:102BE0008F4C10000581FFFE000000009749100C7B
++:102BF0008F51101C00C020213127FFFF24F200302C
++:102C0000001218820003288000BBF8213226FFFF43
++:102C1000AFF100000E0005B300112C020013C880B4
++:102C2000033B98218E78000000027400AFB80010BA
++:102C30008FA80010310FFFFFAFAF00108FA400105E
++:102C400001C46825AFAD00108FA60010AE6600006D
++:102C500097730008976D000A9766000C8F8A003CF6
++:102C6000000D5C0030CCFFFF3262FFFF104A0036DF
++:102C7000016C2025960600023C10100024D30008A9
++:102C80000E00013B3264FFFF974C01040E00014926
++:102C90003184FFFFAF5001788FBF00288FB300242D
++:102CA0008FB200208FB1001C8FB0001803E0000825
++:102CB00027BD003010A0FF700000000024A5FFFC1D
++:102CC0000A0005EC240900048CD10000AF51101853
++:102CD0008F5301780660FF7A2404000A0A00060177
++:102CE0000000000000A7C8218F8800388F4E101CFC
++:102CF0000019C0820018788001E82021AC8E000005
++:102D0000000E2C0200C020210E0005B331C6FFFFCB
++:102D1000023B28218CAD000000025400004030210D
++:102D2000AFAD00108FAC0010318BFFFFAFAB0010C8
++:102D30008FA2001001424825AFA900108FA70010F4
++:102D40000A000631ACA700008F8F0040148FFFC926
++:102D50000000000097420104960B00023C050800A9
++:102D60008CA5046C3049FFFF316AFFFF3C1108005D
++:102D70008E310468012A382124F2FFFE00B240217E
++:102D80000012FFC30112C82B023FC02103192021EA
++:102D90003C010800AC28046C3C010800AC24046829
++:102DA0000A00066B0000000000A4102B1040000970
++:102DB000240300010005284000A4102B04A00003F8
++:102DC000000318405440FFFC000528401060000735
++:102DD000000000000085302B14C0000200031842E0
++:102DE000008520231460FFFB0005284203E0000853
++:102DF000008010218F85002C27BDFFE800053027BB
++:102E00002CC300012CA400020083102510400003F5
++:102E1000AFBF00102405007FAF85002C00052827D8
++:102E200030A5FFFF0E000592240426F58F830030A5
++:102E3000240402BD004030210083382B10E000093B
++:102E400024050001000420400083102B04800003AF
++:102E5000000528405440FFFC0004204010A000085A
++:102E600000C350210064402B1500000200052842D9
++:102E70000064182314A0FFFB0004204200C350216B
++:102E80008FBF0010000A4C02312200FF27BD00183E
++:102E9000AF8A002C03E00008AF8900300A00002A46
++:102EA00000000000000000000000000D7478703683
++:102EB0002E322E3300000000060203000000000046
++:102EC000000001360000EA60000000000000000081
++:102ED00000000000000000000000000000000000F2
++:102EE00000000000000000000000000000000000E2
++:102EF00000000000000000000000000000000016BC
++:102F000000000000000000000000000000000000C1
++:102F100000000000000000000000000000000000B1
++:102F200000000000000000000000000000000000A1
++:102F3000000000000000138800000000000005DC15
++:102F4000000000000000000010000003000000006E
++:102F50000000000D0000000D3C02080024423C204F
++:102F60003C03080024633DD4AC4000000043202B08
++:102F70001480FFFD244200043C1D080037BD7FFC87
++:102F800003A0F0213C100800261000A83C1C0800FB
++:102F9000279C3C200E0002BA000000000000000D3B
++:102FA0008F8300383C088000350700708CE50000F6
++:102FB000008330253C02900000C22025AF85003000
++:102FC000AF4400208F4900200520FFFE3C03800015
++:102FD000346200708C4500008F8600303C19080078
++:102FE0008F39007C3C0E08008DCE007800A620238F
++:102FF00003245821000078210164682B01CF60214F
++:10300000018D50213C010800AC2B007C3C010800E4
++:10301000AC2A007803E00008000000000A0000412C
++:10302000240400018F8400383C05800034A2000194
++:103030000082182503E00008AF43002003E00008E9
++:10304000000010213084FFFF30A5FFFF1080000733
++:1030500000001821308200011040000200042042CC
++:10306000006518211480FFFB0005284003E00008DC
++:103070000060102110C00007000000008CA20000BA
++:1030800024C6FFFF24A50004AC82000014C0FFFB8F
++:103090002484000403E000080000000010A00008E1
++:1030A00024A3FFFFAC860000000000000000000029
++:1030B0002402FFFF2463FFFF1462FFFA248400044C
++:1030C00003E0000800000000308AFFFF93A800130F
++:1030D000A74A014497490E1630C600FF3C02100073
++:1030E000A7490146AF450148A3460152A748015AE6
++:1030F000AF4701608FA400188FA30014A7440158A4
++:10310000AF43015403E00008AF42017803E0000838
++:10311000000000003C038000346200708C49000015
++:103120008F8800002484000727BDFFF83084FFF853
++:10313000AF890030974D008A31ACFFFFAFAC000083
++:103140008FAB0000016850232547FFFF30E61FFFCB
++:1031500000C4282B14A0FFF73C0C8000358B0070B6
++:103160008D6A00003C0708008CE700843C060800DC
++:103170008CC6008000081082014918230002788064
++:1031800000E370210000202101C3C82B00C4C0212E
++:1031900001FA4021031948212502400027BD0008FB
++:1031A0003C010800AC2E00843C010800AC290080E2
++:1031B00003E00008000000008F8200002486000762
++:1031C00030C5FFF800A2182130641FFF03E000089B
++:1031D000AF8400008F8700388F8A004027BDFFB87A
++:1031E0008F860044AFB60040AFBF0044AFB5003C8F
++:1031F000AFB40038AFB30034AFB20030AFB1002C81
++:10320000AFB000288F4501048D4900ACAF47008066
++:103210008CC8002000A938230000B021AF480E1050
++:103220008F440E1000004821AF440E148CC20024BD
++:10323000AF420E188F430E18AF430E1C10E001254D
++:103240002D230001936B0008116000D400000000E2
++:10325000976E001031CDFFFF00ED602B158000CF81
++:103260000000000097700010320FFFFFAF4F0E00FC
++:103270008F520000325100081220FFFD00000000B4
++:1032800097540E088F460E043285FFFF30B30001BD
++:1032900012600132000000000000000D30B8A040B4
++:1032A00024150040131500C030A9A0001120012DE5
++:1032B00000000000937F000813E0000800000000F9
++:1032C00097630010306BFFFF00CB402B1100000311
++:1032D00030AC00401180012300000000A785003CB5
++:1032E000AF8600349366000800E02821AFA70020D5
++:1032F00014C0012427B30020AF60000C9782003C6B
++:103300003047400014E00002240300162403000E9E
++:1033100024194007A363000AAF790014938A003E82
++:103320008F740014315800070018AA4002959025A8
++:10333000AF7200149784003C8F700014309100101D
++:1033400002117825AF6F0014978E003C31CD000834
++:1033500011A00147000028218F6700143C021000D3
++:103360003C0C810000E22825AF65001497460E0A48
++:103370002408000E3405FFFC30C3FFFF006C582505
++:10338000AF6B0004A3680002937F000A27E90004E2
++:10339000A369000A9786003C9363000A30CC1F00A3
++:1033A000000C598301634021251F0028A37F0009D9
++:1033B00097490E0CA769001093790009272A00028B
++:1033C000315800070018A82332B10007A371000B81
++:1033D00093740009976400108F910034978F003C1C
++:1033E000329200FF024480210205702131ED00403D
++:1033F00011A0000531C4FFFF0091282B3C12800072
++:1034000010A000140000A0210224382B14E0011B9E
++:103410008FA500208F4D0E14AF4D0E108F420E1C45
++:10342000AF420E18AF440E008F4F000031EE00087F
++:1034300011C0FFFD0000000097540E080080882195
++:1034400000009021A794003C8F500E04241400012A
++:10345000AF900034976400103095FFFF8E68000035
++:103460000111F82317E00009AE7F00008F650014FA
++:103470008F8B004434A60040AF6600148F4C0E10B2
++:10348000AD6C00208F430E18AD63002493670008D5
++:1034900014E000D2000000000E00009E2404001082
++:1034A0008F8900483C08320000402821312600FF67
++:1034B0000006FC0003E8502525390001AF990048BB
++:1034C000AC4A0000937800099370000A330400FFAF
++:1034D00000047400320F00FF01CF6825AC4D0004DA
++:1034E0008F820048064000EAACA20008ACA0000CA5
++:1034F0009783003C306B0008156000022628000608
++:1035000026280002974E0E148F450E1C8F6700046C
++:10351000936D000231C4FFFF31A200FFAFA2001083
++:103520008F6C0014AFA800180E00008BAFAC001415
++:10353000240400100E0000C7000000008E7200007E
++:1035400016400005000000008F6400142405FFBF32
++:1035500000859824AF7300148F79000C033538214F
++:10356000AF67000C9375000816A00008000000006B
++:1035700012800006000000008F7F00143C0BEFFF5C
++:103580003568FFFE03E84824AF690014A3740008FF
++:103590008FA500200A00024602202021AF470E001E
++:1035A0000A0000F5000000008F5901780720FFFE97
++:1035B000241F08008F840000AF5F0178974B008ABA
++:1035C000316AFFFF014448232528FFFF31021FFF16
++:1035D0002C4300081460FFF9000000008F8E0048A3
++:1035E0008F8D003800C048210344202125C60001EA
++:1035F000240C0F00AF86004800E9382324864000E1
++:1036000031CA00FF11AC0005240800019391003E6F
++:103610003230000700107A4035E80001000AAC00A3
++:103620003C18010002B8A025AC9440008F930048DC
++:1036300030B2003630A40008ACD3000410800097EC
++:1036400001123025974E0E0A8F8D00003C0281003A
++:1036500031CCFFFF25AB0008018240253C03100060
++:1036600031651FFF25390006241F000EAF48016099
++:1036700000C33025A75F015AAF850000A759015844
++:1036800014E0000A8F93003824120F0052720002D7
++:103690002416000134C600408F580E108F94004449
++:1036A000AE9800208F550E18AE9500248F450E144D
++:1036B000AF4501448F590E1CAF590148A34A01522E
++:1036C0003C0A1000AF460154AF4A017814E0FEDD19
++:1036D0002D2300010076A025128000178FBF004423
++:1036E0008F84003824160F0010960084000000001C
++:1036F0008F45017804A0FFFE24150F001095006E81
++:10370000000000008F470E14240202403C1F1000EE
++:10371000AF4701448F440E1CAF440148A3400152FF
++:10372000A740015AAF400160A7400158AF42015481
++:10373000AF5F01788FBF00448FB600408FB5003C6B
++:103740008FB400388FB300348FB200308FB1002CAB
++:103750008FB0002803E0000827BD004814C0FED049
++:1037600030B8A0408F420E148F84004400004821DE
++:10377000AC8200208F510E1CAC9100240A00020E76
++:103780002D2300018F910034978A003C3C12800069
++:103790000220A821315800401700FF300000A0216E
++:1037A000976900108F9200343139FFFF13320035D2
++:1037B00000002021008048211480FEA000A03821B4
++:1037C0008F420E148F840044AC8200208F510E1C57
++:1037D000AC9100240A00020E2D230001936A000917
++:1037E0009378000B315000FF330F00FF020F702160
++:1037F00025C2000A3050FFFF0E00009E020020216B
++:103800008F8600483C1F410024CD0001AF8D004849
++:10381000936C000930C600FF00064400318300FFAE
++:10382000246B0002010B4825013FC825AC5900005C
++:103830008F67000C97440E1400F22825AC45000455
++:103840008F450E1C8F670004936A00023084FFFFCF
++:10385000315800FFAFB800108F6F0014AFB10018DF
++:103860000E00008BAFAF00140A0001A60200202159
++:10387000AF6000040A00013EA36000020A00024695
++:1038800000002021000090210A0001702414000192
++:103890003C1280000A000195ACB2000C8F91000030
++:1038A00025240002A744015826300008320F1FFFCC
++:1038B0000A0001F9AF8F0000AF40014C1120002C2D
++:1038C000000000008F590E10AF5901448F430E18AD
++:1038D000240200403C1F1000AF430148A3400152A6
++:1038E000A740015AAF400160A7400158AF420154C0
++:1038F000AF5F01780A0002278FBF00441120000645
++:103900000000000097460E0830CC004015800002F1
++:10391000000000000000000D8F4D017805A0FFFEA3
++:103920000000000097530E103C120500240E2000EA
++:10393000326AFFFF0152C025AF58014C8F4F0E1461
++:103940003C021000AF4F01448F500E1CAF50014895
++:10395000A34001528F840038A740015AAF40016054
++:10396000A7400158AF4E01540A000215AF4201783A
++:103970008F490E14AF4901448F430E1C0A00028E7A
++:10398000240200403C0E20FF27BDFFE03C1A8000CF
++:103990003C0F800835CDFFFDAFBF001CAFB2001853
++:1039A000AFB10014AFB00010AF8F0040AF4D0E00AC
++:1039B0000000000000000000000000000000000007
++:1039C000000000003C0C00FF358BFFFDAF4B0E00EC
++:1039D0003C0660048CC95000240AFF7F3C11600043
++:1039E000012A40243507380CACC750008E24043817
++:1039F00024050009AF4500083083FFFF38622F71AE
++:103A00002450C0B3AF8000480E000068AF800000B3
++:103A100052000001AE20442C0E0004353C11800001
++:103A20000E000ED9363000708F8A00403C1208001C
++:103A300026523C88020088218E0800008F5F00001B
++:103A40003BF900013338000113000017AF88003044
++:103A5000022048218D2700003C0F08008DEF006CEC
++:103A60003C0C08008D8C006800E8C02301F8282178
++:103A70000000682100B8302B018D582101664021DB
++:103A80003C010800AC25006C3C010800AC28006833
++:103A90008F44000038830001306200011440FFEDC4
++:103AA00000E04021AF8700308E0C00003C0508008C
++:103AB0008CA5006C3C0408008C84006801883023CD
++:103AC00000A638210000102100E6402B00821821BA
++:103AD0000068F8213C010800AC27006C3C0108009C
++:103AE000AC3F00688F49010025590088AF99004418
++:103AF000AF890038AF4900208E070000AF87003043
++:103B00008F4D017805A0FFFE000000008E0600002A
++:103B10003C0B08008D6B00743C0408008C84007022
++:103B200000C728230165F8210000102103E5402B80
++:103B30000082382100E8C821240908003C0108005F
++:103B4000AC3F00743C010800AC390070AF4901780B
++:103B500093580108A398003E938F003E31EE000178
++:103B600015C000158F830038240E0D00106E00194B
++:103B7000240F0F00106F001D00000000915900007D
++:103B800024180050332900FF113800043C1F400066
++:103B9000AF5F01380A0002E7000000000E00090EC6
++:103BA000000000008F8A00403C1F4000AF5F0138DA
++:103BB0000A0002E700000000938D003E31AC0006D1
++:103BC000000C51000E0000CE0152D8210A00034320
++:103BD0008F8A00403C1B0800277B3D080E0000CE6A
++:103BE000000000000A0003438F8A00403C1B0800CD
++:103BF000277B3D280E0000CE000000000A00034392
++:103C00008F8A004090AA00018FAB00108CAC00108E
++:103C10003C0300FF8D680004AD6C00208CAD0014E7
++:103C200000E060213462FFFFAD6D00248CA7001816
++:103C30003C09FF000109C024AD6700288CAE001CC0
++:103C40000182C82403197825AD6F0004AD6E002CE5
++:103C50008CAD0008314A00FFAD6D001C94A9000234
++:103C60003128FFFFAD68001090A70000A56000029A
++:103C7000A1600004A167000090A30002306200FF71
++:103C80000002198210600005240500011065000E75
++:103C90000000000003E00008A16A00018CD80028A1
++:103CA000354A0080AD7800188CCF0014AD6F001439
++:103CB0008CCE0030AD6E00088CC4002CA16A0001CF
++:103CC00003E00008AD64000C8CCD001CAD6D001845
++:103CD0008CC90014AD6900148CC80024AD680008BC
++:103CE0008CC70020AD67000C8CC200148C8300646C
++:103CF0000043C82B13200007000000008CC20014F2
++:103D0000144CFFE400000000354A008003E0000886
++:103D1000A16A00018C8200640A000399000000007F
++:103D200090AA000027BDFFF88FA9001CA3AA0000DD
++:103D30008FAE00003C0FFF808FA8001835E2FFFF18
++:103D40008CCD002C01C26024AFAC0000A120000487
++:103D500000E06021A7A000028FB800008D270004BA
++:103D60000188182100A0582100C05021006D28268C
++:103D70003C06FF7F3C0F00FF2CAD000135EEFFFF3E
++:103D800034D9FFFF3C02FF0003193024000D1DC091
++:103D9000010EC82400E2C02400C370250319782551
++:103DA000AD2E0000AD2F00048D450024AFAE000005
++:103DB000AD2500088D4D00202405FFFFAD2D000C22
++:103DC000956800023107FFFFAD27001091660018CB
++:103DD00030C200FF000219C2506000018D4500345E
++:103DE000AD2500148D67000827BD0008AD27001C15
++:103DF0008C8B00CCAD2C0028AD20002CAD2B0024EA
++:103E0000AD20001803E00008AD20002027BDFFE032
++:103E1000AFB20018AFB10014AFB00010AFBF001CBC
++:103E20009098000000C088213C0D00FF330F007FF8
++:103E3000A0CF0000908E000135ACFFFF3C0AFF00D0
++:103E4000A0CE000194A6001EA22000048CAB00149A
++:103E50008E29000400A08021016C2824012A40241E
++:103E60000080902101052025A6260002AE24000432
++:103E700026050020262400080E00007624060002F5
++:103E800092470000260500282624001400071E0083
++:103E90000003160324060004044000032403FFFF6C
++:103EA000965900023323FFFF0E000076AE23001068
++:103EB000262400248FBF001C8FB200188FB100147D
++:103EC0008FB0001024050003000030210A0000809C
++:103ED00027BD002027BDFFD8AFB1001CAFB0001830
++:103EE000AFBF002090A80000240200018FB0003C6A
++:103EF0003103003F00808821106200148FAA00382F
++:103F0000240B0005506B0016AFAA001000A0202162
++:103F100000C028210E0003DC02003021922400BCE6
++:103F2000308300021060000326060030ACC00000A1
++:103F300024C600048FBF00208FB1001C8FB0001872
++:103F400000C0102103E0000827BD002801403821EF
++:103F50000E00035AAFB000100A0004200000000059
++:103F60000E0003A1AFB000140A00042000000000FE
++:103F70003C02000A034218213C04080024843D6CE2
++:103F80002405001A000030210A000080AF8300548D
++:103F90003C038000346200708C48000000A058216F
++:103FA00000C04821308A00FFAF8800308F4401787C
++:103FB0000480FFFE3C0C8000358600708CC500003C
++:103FC0003C0308008C6300743C1808008F180070D4
++:103FD00000A82023006468210000C82101A4782BD8
++:103FE0000319702101CF60213C010800AC2D007441
++:103FF0003C010800AC2C00708F480E14AF480144FF
++:10400000AF47014CA34A0152A74B01589346010800
++:1040100030C5000854A0000135291000934B090059
++:1040200024070050316A00FF11470007000000001C
++:104030008F450E1CAF450148AF4901543C091000A3
++:1040400003E00008AF490178934D010831A800084A
++:104050001100001000000000934F010831EE001025
++:1040600051C00001352900083C04080090843DD06F
++:10407000A34401508F4309A4AF4301488F4209A0D4
++:10408000AF420144AF4901543C09100003E000086D
++:10409000AF4901783C1908008F393D8C333800084E
++:1040A0005700FFF1352900080A00047300000000E2
++:1040B00024070040AF470814AF4008108F4209445E
++:1040C0008F4309508F4409548F45095C8F46094C32
++:1040D000AF820064AF830050AF84004CAF85005CBA
++:1040E00003E00008AF8600609346010930C5007FF9
++:1040F000000518C0000521400083102103E00008DE
++:10410000244200883C09080091293D9124A800021E
++:104110003C05110000093C0000E8302500C51825C9
++:1041200024820008AC83000003E00008AC80000497
++:104130009347010B8F4A002C974F09083C18000E3B
++:104140000358482131EEFFFF000E41C0AF48002C5C
++:1041500097430908952C001A008040212403000190
++:10416000318BFFFFAC8B00008D2D001C00A058216F
++:1041700000C06021AC8D00048D24002030E7004099
++:10418000AD04000891220019304400031083004858
++:104190002885000214A00062240600021086005642
++:1041A00024190003109900660000000010E0003A96
++:1041B000000000003C07080094E73D8624E200016F
++:1041C000934F0934934709219525002A31EE00FFCA
++:1041D000000E488230ED00FF978700580009360036
++:1041E000000D1C003044FFFF00C310250044C02513
++:1041F00000A778213C19400003197025000F4C00DE
++:10420000AD090004AD0E0000934D09203C030006EB
++:1042100025090014000D360000C32025AD04000858
++:104220008F59092C24E5000130A27FFFAD19000C45
++:104230008F580930A782005825020028AD180010B9
++:104240008F4F0938AD0F0014AD2B00048F4E09407D
++:10425000AD2E0008934D09373C05080090A53D9010
++:104260008F4409488F46094031A700FF00EC182110
++:10427000008678230003C7000005CC0003196025E1
++:1042800031E8FFFC01885825AD2B000CAD20001053
++:1042900003E00008AF4A002C3C0D080095AD3D86B8
++:1042A0003C0E080095CE3D800A0004C901AE1021E5
++:1042B0003C05080094A53D8A3C06080094C63D8054
++:1042C0003C18080097183D7C952E002400A6782104
++:1042D00001F86823000E240025A2FFF200821825B1
++:1042E00024190800AD03000CAD190014AD00001036
++:1042F0000A0004C4250800189526002495250028E6
++:104300000006C40000057C00370E810035ED080072
++:10431000AD0E000CAD0D00100A0004C425080014F9
++:104320001480FFA200000000952400240004140063
++:1043300034430800AD03000C0A0004C42508001033
++:104340003C03080094633D8A3C05080094A53D8029
++:104350003C06080094C63D7C953900249538002819
++:10436000006520210086782300196C000018740075
++:1043700025E2FFEE01C2202535A3810024190800A3
++:10438000AD03000CAD040010AD190018AD00001411
++:104390000A0004C42508001C03E00008240201F4FC
++:1043A00027BDFFE8AFB00010AFBF00140E000060E3
++:1043B0000080802124050040AF4508148F83005001
++:1043C0008F84004C8F85005C0070182100641023DE
++:1043D00018400004AF830050AF6300548F66005450
++:1043E000AF86004C1200000C000000008F440074E7
++:1043F000936800813409FA002D07000710E00005DA
++:1044000000891021936C0081240B01F4018B50046E
++:1044100001441021AF62000C8F4E095C01C5682376
++:1044200019A000048FBF00148F4F095CAF8F005C90
++:104430008FBF00148FB000100A00006227BD001863
++:104440008F8400648F8300508F82004CAF640044DF
++:10445000AF63005003E00008AF6200543C038000EB
++:10446000346200708C43000027BDFFF8308700FFE6
++:1044700030A900FF30C800FFAF8300308F440178BF
++:104480000480FFFE3C028000345900708F38000029
++:10449000A3A700033C0708008CE700748FAC000062
++:1044A0003C0608008CC60070030378233C0E7FFF97
++:1044B00000EFC82135CDFFFF00005021018D2824D9
++:1044C00000CA1821000847C0032F202B00A8102580
++:1044D0000064C021AFA200003C010800AC390074A8
++:1044E0003C010800AC380070934F010AA3A0000201
++:1044F0003C0E80FFA3AF00018FAC0000312B007F8A
++:1045000035CDFFFF018D4824000B5600012A4025C0
++:10451000240730002406FF803C05100027BD00085A
++:10452000AF48014CAF470154A7400158A346015280
++:1045300003E00008AF45017827BDFFE8AFBF0014D6
++:10454000AFB000108F6500743C068000309000FF13
++:1045500000A620250E000060AF6400749363000580
++:10456000346200080E000062A362000502002021F0
++:104570008FBF00148FB00010240500052406000131
++:104580000A00057027BD001827BDFFE03C0380002E
++:10459000AFB00010AFBF0018AFB1001434620070AC
++:1045A0008C470000309000FF30A800FFAF8700303C
++:1045B0008F4401780480FFFE3C18800037110070A2
++:1045C0008E2F00003C0D08008DAD00743C0A0800E1
++:1045D0008D4A007001E7702301AE282100005821A8
++:1045E00000AE302B014B4821012638213C01080048
++:1045F000AC250074000088213C010800AC27007045
++:104600001100000F000000008F6200742619FFFFE8
++:104610003208007F0002FE0233E5007F150000062D
++:10462000332200FF2407FF800207202624A3FFFF78
++:1046300000838025320200FF0040802124111008F1
++:104640000E000060000000008F49081831250004AA
++:1046500014A0FFFD3218007F001878C000187140C8
++:1046600001CF682125AC0088AF4C0818274A098083
++:104670008D4B0020AF4B01448D460024AF460148CE
++:10468000A35001500E000062A740015802201021E3
++:104690008FBF00188FB100148FB0001003E0000826
++:1046A00027BD002027BDFFE8308400FFAFBF00100A
++:1046B0000E0005BB30A500FF8F8300508FBF001098
++:1046C000344500402404FF903C02100027BD001830
++:1046D000AF43014CA3440152AF45015403E000082D
++:1046E000AF4201789343093E306200081040000D4C
++:1046F0003C0901013528080AAC8800008F47007486
++:10470000AC8700043C06080090C63D9030C5001000
++:1047100050A00006AC8000088F6A0060AC8A0008D8
++:104720002484000C03E00008008010210A00062207
++:104730002484000C27BDFFE8AFBF0014AFB0001009
++:104740009346093F00A050210005288000853823AA
++:1047500030C200FF240300063C09080095293D866D
++:1047600024E8FFD824050004104300372406000283
++:104770009750093C3C0F020400063400320EFFFF44
++:1047800001CF6825AC8D0000934C093E318B002091
++:104790001160000800000000934309363C02010349
++:1047A000345F0300307900FF033FC0252405000873
++:1047B000AC98000493430934935909210005F88209
++:1047C000306200FF0002C082332F00FF00186E002D
++:1047D000000F740001AE6025018920253C094000CE
++:1047E00000898025ACF0FFD8934309378F4F0948E3
++:1047F0008F580940306200FF004AC821033F7021F2
++:1048000001F86023000E6F0001A650253185FFFCE2
++:10481000001F58800145482501683821AD09002056
++:104820000E00006024F00028240400040E00006242
++:10483000A364003F020010218FBF00148FB000104E
++:1048400003E0000827BD00180A0006352406001200
++:1048500027BDFFD024090010AFB60028AFB5002453
++:10486000AFB40020AFB10014AFB000103C0108009D
++:10487000A0293D90AFBF002CAFB3001CAFB2001811
++:1048800097480908309400FF3C02000E3107FFFFF3
++:10489000000731C0AF46002C974409089344010B30
++:1048A00030B500FF03428021308300300000B0218A
++:1048B0001060012500008821240C00043C01080040
++:1048C000A02C3D90934B093E000B5600000A2E038E
++:1048D00004A0016000000000AF400048934F010BAE
++:1048E00031EE002011C00006000000009358093E80
++:1048F00000189E0000139603064001890000000086
++:104900009344010B30830040106000038F930050EC
++:104910008F8200502453FFFF9347093E30E6000882
++:1049200014C0000224120003000090219619002CEC
++:1049300093580934934F0937A7990058330C00FF57
++:1049400031EE00FF024E6821000D5880016C5021AD
++:10495000015140213C010800A4283D869205001821
++:1049600030A900FF010918213C010800A4233D885B
++:104970009211001816200002000000000000000D37
++:104980003C010800A4233D8A3C010800A4203D808E
++:104990003C010800A4203D7C935F010B3063FFFFC6
++:1049A00033F00040120000022464000A2464000B6B
++:1049B0003091FFFF0E00009E022020219358010B32
++:1049C0003C08080095083D8A0040202100185982C3
++:1049D000316700010E00049A01072821934C010B56
++:1049E0008F4B002C974E09083C0F000E034F4021BF
++:1049F00031CDFFFF000D51C0AF4A002C974309088D
++:104A00009505001A004038212404000130A9FFFF59
++:104A1000AC4900008D06001C00404821318A00404E
++:104A2000AC4600048D020020ACE20008910300199E
++:104A300030630003106400EC28790002172001188D
++:104A4000241000021070010C241F0003107F011EAF
++:104A500000000000114000DE000000003C090800DA
++:104A600095293D8625220001935F0934934E092143
++:104A70009504002A33F900FF0019C08231CF00FFEE
++:104A8000978E005800184600000F6C00010D80251D
++:104A90003045FFFF02051025008E50213C034000E9
++:104AA00000433025000A6400ACEC0004ACE60000D2
++:104AB000935F09203C19000624EC0014001FC60077
++:104AC00003197825ACEF00088F48092C25CD00018B
++:104AD00031A57FFFACE8000C8F500930A785005846
++:104AE00024E80028ACF000108F4409380100802130
++:104AF000ACE40014AD9300048F530940AD9300085B
++:104B0000934A09373C19080093393D908F4309486F
++:104B10008F460940314200FF0052F82100667023A1
++:104B2000001F7F000019C40001F8282531CDFFFCCB
++:104B300000AD2025AD84000CAD800010AF4B002CE3
++:104B4000934B093E317300081260000D3C060101D1
++:104B500034CC080AACEC00288F530074AD13000469
++:104B60003C0B0800916B3D903167001050E0000352
++:104B7000AD0000088F6A0060AD0A00082510000C27
++:104B800012C0003D000000009343093F24160006B8
++:104B900024060004306200FF105600C924070002FA
++:104BA0009758093C3C0F0204330DFFFF01AF40252D
++:104BB000AE0800009345093E30A400201080000894
++:104BC00000000000935309363C0B0103357F0300BE
++:104BD000327900FF033F7025AE0E00042406000862
++:104BE000934F093493480921312AFFFF31ED00FF2B
++:104BF000000D1082310300FF0002B60000032C00FC
++:104C000002C56025018A9825001220803C094000D9
++:104C10000204502302695825AD4BFFD8935F093732
++:104C20008F4F09488F58094033F900FF0332702134
++:104C30000006B08201D668210007440001F828234D
++:104C4000000D1F000068302530A2FFFC2547FFD86B
++:104C500000C260250016808002074821ACEC0020CD
++:104C6000253000280E00006024120004A372003FCB
++:104C70000E000062000000009347010B30F200407C
++:104C8000124000053C1900FF8E180000372EFFFF70
++:104C9000030E3024AE0600000E0000C702202021C3
++:104CA0003C10080092103D90321100031220000FBA
++:104CB00002A028218F89005025330001AF930050B6
++:104CC000AF7300508F6B00540173F8231BE0000298
++:104CD000026020218F640054AF6400548F4C007434
++:104CE000258401F4AF64000C02A028210280202159
++:104CF000A76000680E0005BB3C1410008F850050B3
++:104D000034550006AF45014C8F8A00488FBF002CF8
++:104D10008FB3001C25560001AF9600488FB20018D3
++:104D2000A34A01528FB60028AF5501548FB1001429
++:104D3000AF5401788FB500248FB400208FB00010DD
++:104D400003E0000827BD00309358093E00189E007C
++:104D500000139603064200362411000293440923EF
++:104D6000308300021060FEDD8F8600608F8200506D
++:104D700014C2FEDA000000000E0000600000000017
++:104D80009369003F24070016312800FF1107000C2B
++:104D9000240500083C0C0800918C3D90358B0001E7
++:104DA0003C010800A02B3D90936A003F314300FF77
++:104DB00010650065240D000A106D005E2402000CD1
++:104DC0000E000062000000000A00069000000000D3
++:104DD0003C09080095293D863C0A0800954A3D801B
++:104DE0000A0006F3012A10213C09080095293D8A92
++:104DF0003C04080094843D803C06080094C63D7C39
++:104E000095030024012410210046F8230003CC0060
++:104E100027F0FFF20330C025240F0800ACF8000C87
++:104E2000ACEF0014ACE000100A0006EE24E7001816
++:104E30003C010800A0313D90935F093E241600011B
++:104E400033F900201720FEA5241100080A0006905F
++:104E5000241100048F6E00848F4D094011A0FE9E26
++:104E6000AF8E0050240F00143C010800A02F3D908D
++:104E70000A00068F00000000950E0024950D002802
++:104E8000000E6400000D2C003589810034A6080056
++:104E9000ACE9000CACE600100A0006EE24E70014B2
++:104EA0001460FEEC000000009502002400021C00CB
++:104EB00034640800ACE4000C0A0006EE24E700109D
++:104EC0000A000741240700123C02080094423D8A70
++:104ED0003C06080094C63D803C03080094633D7C7A
++:104EE00095100024951900280046F82103E3C023FB
++:104EF00000106C0000197400270FFFEE01CF282569
++:104F000035AC8100ACEC000CACE5001024070800C7
++:104F1000AD2700182527001C0A0006EEAD2000145E
++:104F20008F7F004CAF7F00548F7900540A000699A0
++:104F3000AF790050A362003F0E0000620000000045
++:104F40000A00069000000000240200140A0008274E
++:104F5000A362003F27BDFFE8308400FFAFBF001011
++:104F60000E0005BB30A500FF9378007E9379007F8B
++:104F7000936E00809368007A332F00FF001866005C
++:104F8000000F6C0031CB00FF018D4825000B520053
++:104F90008FBF0010012A3825310600FF344470000D
++:104FA00000E628252402FF813C03100027BD0018DD
++:104FB000AF45014CAF440154A342015203E0000845
++:104FC000AF43017827BDFFD8AFB20018AFB10014CE
++:104FD000AFB00010AFBF0020AFB3001C9342010977
++:104FE000308600FF30B000FF000618C23204000215
++:104FF0003071000114800005305200FF93670005F6
++:1050000030E5000810A0000D30C80010024020213B
++:105010000E0005A702202821240400018FBF0020D4
++:105020008FB3001C8FB200188FB100148FB0001026
++:105030000080102103E0000827BD00281500003281
++:105040000000000093430109000028213062007F26
++:10505000000220C00002F94003E49821267900886C
++:10506000033B98218E7800248E6F0008130F0046B2
++:10507000000000008F640084241800020004FD82F8
++:1050800033F900031338007C0000000093660083AE
++:10509000934A0109514600043205007C10A00060CB
++:1050A000000000003205007C14A0005302402021C3
++:1050B00016200006320400018E7F00248F5901045F
++:1050C00017F9FFD600002021320400011080000AE9
++:1050D000024020218F4209408F9300641053000644
++:1050E000000000000E00066D022028218F430940B9
++:1050F000AF630044024020210E0006020220282156
++:105100000A000860240400013C0908008D2900649D
++:10511000252600013C010800AC26006416000012A0
++:10512000000000008F6D00843C0E00C001AE6024C2
++:1051300015800005024020210E00082E02202821A3
++:105140000A00086024040001240500040E00057014
++:1051500024060001024020210E00082E02202821F2
++:105160000A000860240400010E000041240400012C
++:10517000936B007D020B50250E000062A36A007D38
++:105180000A0008A38F6D00848F6600748F480104A5
++:105190008E67002400064E021507FFB63126007FF9
++:1051A000936B008326440001308A007F1146004340
++:1051B000316300FF5464FFB08F6400842645000112
++:1051C00030B1007F30A200FF122600042405000148
++:1051D000004090210A00087624110001240FFF806E
++:1051E000024F702401CF9026324200FF00409021F0
++:1051F0000A000876241100010E00066D0220282105
++:10520000321800301300FFAA321000820240202121
++:105210000E0005A7022028210A00086024040001CE
++:105220008F6E00743C0F80002405000301CF902591
++:10523000AF72007493710083240600010E000570A4
++:10524000322400FF0E00004124040001936D007D14
++:10525000020D60250E000062A36C007D3C0B08006F
++:105260008D6B0054257000013C010800AC300054E7
++:105270000A000860240400018F6800743C09800063
++:105280002405000401093825AF6700749363008387
++:10529000240600010E000570306400FF0E0000417E
++:1052A000240400019362007D020298250E00006232
++:1052B000A373007D0A00086024040001324D0080C1
++:1052C00039AC0080546CFF6C8F6400840A0008C9FC
++:1052D0002645000127BDFFC83C0A0008AFBF0030CB
++:1052E000AFB5002CAFB40028AFB30024AFB200209C
++:1052F000AFB1001CAFB00018034AD8212409004008
++:10530000AF490814AF4008108F4209448F43095039
++:105310008F4609548F47095C8F48094C9344010814
++:105320009345010BAF820064308400FF30A500FF7D
++:10533000AF830050AF86004CAF87005C0E00084A78
++:10534000AF8800601440017D8FBF0030A760006807
++:10535000934D0900240B00503C15080026B53D482C
++:1053600031AC00FF3C12080026523D58118B00035F
++:10537000000000000000A8210000902193510109C5
++:105380008F9F005024040010322E007F000E68C052
++:10539000000E6140018D282124B40088AF54081804
++:1053A0008F4901048F4A09A43C0B000E034BC02116
++:1053B000012A10233C010800AC223D6C8F430958A0
++:1053C0003C010800A0243D9097470908007F302346
++:1053D0003C010800AC263D7030E8FFFF0008C9C062
++:1053E0003C010800AC3F3D94AF59002C974209089E
++:1053F0009710002C8EB10000930F001803749821B1
++:10540000A7900058AF9300440220F80931F000FF44
++:10541000304E000215C001B2304F000111E0014FC3
++:10542000000000009343093E3066000814C00002EB
++:10543000241400030000A0218F5809A424130001A4
++:105440003C010800AC383D98934F0934935109371B
++:1054500031EC00FF322E00FF028E6821000D288003
++:1054600000AC5021015058213C010800A42B3D887C
++:105470003C010800A42A3D8693490934312200FFEB
++:1054800002022021249000103C010800A4303D8439
++:10549000240700068F9F00503C010800AC273D8C7C
++:1054A0008F88005C8F59095800008021011F282334
++:1054B00004A00149033F20230480014700A4302BAE
++:1054C00010C00149000000003C010800AC253D70FF
++:1054D0008E4200000040F809000000003043000246
++:1054E000146000F80040882130440001548000100E
++:1054F0008E4200043C0908008D293D743C0AC0001E
++:10550000012A8025AF500E008F45000030AB000807
++:105510001160FFFD00000000974D0E0824100001EF
++:10552000A78D003C8F4C0E04AF8C00348E420004DB
++:105530000040F8090000000002228825322E0002F7
++:1055400015C00180000000003C09080095293D7C41
++:105550003C06080094C63D883C0A0800954A3D7EFA
++:105560003C1908008F393D74012660213C18080061
++:105570008F183D983C03080094633D92018A2021D6
++:105580008F4E09400329F821248F000203E32821CC
++:10559000031968213C010800A42C3D8AAF8E0064E9
++:1055A0003C010800AC2D3D983C010800A4253D803D
++:1055B0000E00009E31E4FFFF8F870048004020214D
++:1055C0003C010800A0273D918E42000824E800011C
++:1055D000AF8800480040F809000000009344010B28
++:1055E0008F4C002C974A09083C0B000E034B4021BE
++:1055F0003149FFFF000919C08F8B0050AF43002CC9
++:10560000974309089506001A00403821308A004067
++:1056100030DFFFFFAC5F00008D19001C0040482107
++:10562000AC5900048D180020AC580008910F0019E7
++:1056300031E30003107300F0000000002862000254
++:105640001440010924050002106500FD240D00032B
++:10565000106D010D00000000114000D90000000095
++:105660003C0A0800954A3D8625420001934D0934C5
++:1056700093580921950E002A31A300FF00032082D0
++:10568000331F00FF9798005800047E00001FCC00D5
++:1056900001F940253049FFFF0109102501D83021CB
++:1056A0003C0540000045502500066C00ACED0004B0
++:1056B000ACEA0000934309203C04000624ED0014EA
++:1056C0000003FE0003E4C825ACF900088F49092C4B
++:1056D000270F000131EE7FFFACE9000C8F48093045
++:1056E000A78E005824E90028ACE800108F4509383F
++:1056F00001204021ACE50014ADAB00048F4209400D
++:10570000ADA20008934B09373C1F080093FF3D9062
++:105710008F4309488F4A0940316600FF00D4202199
++:10572000006A78230004C700001FCC000319282555
++:1057300031EEFFFC00AE1025ADA2000CADA00010B4
++:10574000AF4C002C934C093E318B00085160000F88
++:105750008E58000C3C06010134CA080AACEA002845
++:105760008F4B0074AD2B00043C0C0800918C3D90D5
++:105770003187001050E00003AD2000088F62006008
++:10578000AD2200082528000C8E58000C0300F809F3
++:10579000010020213C19080097393D8A3C1F080070
++:1057A00097FF3D7E033F782125E900020E0000C7E8
++:1057B0003124FFFF3C0E08008DCE3D6C3C080800F4
++:1057C0008D083D7401C828233C010800AC253D6CC0
++:1057D00014A00006000000003C0308008C633D8C10
++:1057E000346400403C010800AC243D8C1200007081
++:1057F0008F8C00448F470E108F900044AE0700201E
++:105800008F4D0E18AE0D00243C10080096103D8000
++:105810000E0000600000000024020040AF420814A7
++:105820008F8600508F8A004C00D01821006A5823C0
++:1058300019600004AF830050AF6300548F650054BB
++:10584000AF85004C1200000C000000008F44007473
++:10585000936800813409FA002D0E000711C000057D
++:1058600000891821937F0081241901F403F9780439
++:1058700001E41821AF63000C8F44095C8F83005C46
++:105880000083C0231B000003000000008F50095C50
++:10589000AF90005C0E000062000000008F8C005092
++:1058A0008E4700103C010800AC2C3D9400E0F80944
++:1058B000000000003C0D08008DAD3D6C55A0FEF5CC
++:1058C000240700068F450024975909088F8B006430
++:1058D0008F9400503C0F001F978200588F86005411
++:1058E0008F93004C3328FFFF35E9FF8000A9502437
++:1058F000000871C032320100AF4E0024A4C2002C57
++:10590000AF4A0024AF6B0044AF740050AF73005433
++:105910001640008032380010570000868EA4000424
++:10592000322300405460001B8EB100088EB0000C82
++:105930000200F809000000008FBF00308FB5002C76
++:105940008FB400288FB300248FB200208FB1001CC9
++:105950008FB0001803E0000827BD00389347010905
++:105960008F8800380007FE0003E8C825AF59008083
++:105970008F5809A08F5309A4AFB80010AF580E1468
++:105980008FB40010AF540E10AF530E1C0A00096202
++:10599000AF530E180220F809000000008EB0000C72
++:1059A0000200F809000000000A000AA88FBF0030BA
++:1059B000A5800020A59300220A000A5BAD93002475
++:1059C0003C09080095293D863C06080094C63D80A8
++:1059D0000A0009F4012610213C010800AC203D70AA
++:1059E0000A00098E8E4200003C010800AC243D7084
++:1059F0000A00098E8E4200003C03080094633D8A31
++:105A00003C04080094843D803C1F080097FF3D7CC7
++:105A1000951800240064C821033F782300186C0007
++:105A200025EEFFF201AE2825AC45000C240208004B
++:105A3000ACE20014ACE000100A0009EF24E7001803
++:105A400095060024950900280006240000091C0082
++:105A5000349F810034790800ACFF000CACF90010D1
++:105A60000A0009EF24E700141460FEFB00000000A8
++:105A70009518002400187C0035EE0800ACEE000CF0
++:105A80000A0009EF24E700103C07080094E73D8076
++:105A90003C04080094843D8A3C03080094633D7CE8
++:105AA00095190024951800280087F82103E378232E
++:105AB0002407080000192C0000186C0025EEFFEEEA
++:105AC00001AE302534A28100AD2700182527001C27
++:105AD000AD22000CAD2600100A0009EFAD20001425
++:105AE00093520109000028210E000602324400FFF3
++:105AF0008FBF00308FB5002C8FB400288FB30024E7
++:105B00008FB200208FB1001C8FB0001803E0000896
++:105B100027BD0038935F010933E400FF0E00066DD6
++:105B200000002821323800105300FF7E322300404D
++:105B30008EA400040080F809000000000A000AA2F8
++:105B4000322300401200FF5F000000008F540E144B
++:105B50008F920044AE5400208F530E1C0A000A8A14
++:105B6000AE5300248F82001C008040213C040100C1
++:105B70009047008530E3002010600009000000001D
++:105B80003C0708008CE73D948F83001800E3202336
++:105B9000048000089389000414E30003010020211D
++:105BA00003E00008008010213C04010003E000082D
++:105BB000008010211120000B006738238F8C0020FB
++:105BC00024090034918B00BC316A0002514000016D
++:105BD0002409003000E9682B15A0FFF10100202105
++:105BE00000E938232419FFFC00B9C02400F9782407
++:105BF00000F8702B15C0FFEA01E8202130C2000335
++:105C00000002182314C00012306900030000302184
++:105C100000A9702101C6682100ED602B1180FFE012
++:105C20003C0401002D2F00010006482B01053821FE
++:105C300001E9302414C0FFDA24E4FFFC2419FFFC3E
++:105C400000B9C0240308202103E0000800801021CF
++:105C50008F8B002024060004916A00BC31440004AC
++:105C60001480FFEC00A970210A000B5E00003021B7
++:105C700027BDFFE8AFBF00108F460100934A01091E
++:105C80003C1F08008FFF00902407FF80314F00FF6A
++:105C900031E8007F0008614003E6C821032CC021E1
++:105CA00027090120012770243C010800A02F3DD0C6
++:105CB000AF4E080C3C0D08008DAD00903C040080F8
++:105CC0003482000301A65821016C182124650120AB
++:105CD00030AA007801424025AF48081C3C1F08004C
++:105CE0008FFF00908F88004003E6C0213319000722
++:105CF00003074824033A7821AF49002825E909C061
++:105D0000952E00023C0D08008DAD008C3C0A080069
++:105D10008D4A009031CC3FFF01A61821000C59801C
++:105D2000006B282100A72024AF44002C95220002FC
++:105D30003C1F08008FFF008C9107008530593FFF02
++:105D400003E678210019C1800146702101F868211D
++:105D500031CC007F31AB007F019A2821017A50219C
++:105D60003C03000C3C04000E00A328210144102138
++:105D700030E6002027470980AF82002CAF88001C46
++:105D8000AF890024AF85002010C00006AF8700282F
++:105D90008D0200508CA4010C0044302318C0007701
++:105DA00000000000910C0085240DFFDF018D3824D8
++:105DB000A10700858F8B001C8F8900248F87002806
++:105DC0008D65004CAF850018912F000D31EE00203D
++:105DD00011C000170000000024090001A38900047D
++:105DE000AF80000C8CE400248F85000C240A00088E
++:105DF000AF800008AF8000103C010800A42A3D7E5F
++:105E00003C010800A4203D920E000B32000030211E
++:105E10008F8500248FBF0010AF82001490A8000D62
++:105E200027BD00180008394203E0000830E20001F5
++:105E3000913F00022418000133F900FF001921826C
++:105E400010980039240800021088005B8F86002C0F
++:105E50008CE5002414A0001B8F9F002091220000DD
++:105E6000240A00053046003F10CA00472404000100
++:105E70008F860008A3840004AF860010AF86000C54
++:105E80008CE400248F85000C240A00083C010800E3
++:105E9000A42A3D7E3C010800A4203D920E000B3256
++:105EA000000000008F8500248FBF0010AF82001417
++:105EB00090A8000D27BD00180008394203E0000833
++:105EC00030E200018CF800088CF900248FEE00C449
++:105ED000A38000048CE40024AF8E000C8F85000C9E
++:105EE0008F86000803197823240A0008AF8F00105A
++:105EF0003C010800A42A3D7E3C010800A4203D92FC
++:105F00000E000B32000000008F8500248FBF0010B0
++:105F1000AF82001490A8000D27BD00180008394278
++:105F200003E0000830E20001912300003062003FEE
++:105F3000104400278F8500208CE400241480002169
++:105F4000000000008D2E00183C187FFF8F85002078
++:105F5000370FFFFF01CF1824AF8300088F9F000881
++:105F60008CA8008403E8C82B1720000203E020213E
++:105F70008CA400840A000BEDAF8400088CA3010CF4
++:105F80000A000BCBAF8300188D2C00188F860008F9
++:105F90003C0D7FFF8F89002035A3FFFF018358242C
++:105FA00024040001AF8B0010AD2000CCA3840004BA
++:105FB0000A000BF9AF86000C8CCA00140A000BED26
++:105FC000AF8A00088CA300C80A000C30AF83000819
++:105FD0008F84002C8CAC00648C8D0014018D582BA8
++:105FE00011600004000000008CA200640A000C3064
++:105FF000AF8200088C8200140A000C30AF820008C7
++:106000008F85000C27BDFFE0AFBF0018AFB10014B3
++:1060100014A00007AFB000108F86002424020005F2
++:1060200090C400003083003F106200B68F840020CF
++:106030008F91000800A080218F8C00283C0508006B
++:106040008CA53D708D8B000431663FFF00C5502B41
++:106050005540000100C02821938D000411A0007359
++:1060600000B0F82B8F98002024040034930F00BC5C
++:1060700031EE000251C000012404003000A4C82BFE
++:10608000172000D10000000000A4282300B0F82B46
++:106090003C010800A4243D7C17E000680200202198
++:1060A0003C0308008C633D6C0083102B54400001BE
++:1060B000008018218F8800243C010800AC233D7427
++:1060C000000048219104000D308300205060000141
++:1060D0008F490E188F8300140123382B10E00059CC
++:1060E000000000003C0408008C843D7400895821A5
++:1060F000006B502B114000560090602B006930233C
++:1061000000C020213C010800AC263D7412000003B1
++:10611000241FFFFC1090008A32270003009FC82430
++:106120003C010800AC393D743C010800A4203D92BC
++:106130008F84000C120400078F830020AF910008A9
++:10614000020020218C7100CCAF90000C26300001A1
++:10615000AC7000CC3C0208008C423D748F8A001069
++:10616000240700180082202301422823AF84000C5A
++:1061700010800002AF850010240700108F86001CDD
++:106180003C010800A0273D902407004090CC0085EA
++:10619000318B00C0116700408F8D001414A00015D2
++:1061A00000002021934A01098F420974314500FF04
++:1061B0000002260224A300013090007F3071007F8E
++:1061C0001230007A2407FF80A0C300833C09080036
++:1061D0008D293D8C8F880024240D0002352C000869
++:1061E0003C010800A02D3DD13C010800AC2C3D8CA9
++:1061F00024040010910E000D31C6002010C00005CF
++:1062000000801821240800013C010800AC283D74DE
++:10621000348300018FBF00188FB100148FB00010BD
++:106220000060102103E0000827BD00203C010800A9
++:10623000A4203D7C13E0FF9A020020210A000C817B
++:1062400000A020213C0408008C843D740090602B49
++:106250001180FFAE000000003C0F080095EF3D7C70
++:1062600001E4702101C6682B11A000072C820004F4
++:106270003C1F60008FF954043338003F1700FFE5DE
++:10628000240300422C8200041040FFA0240300429B
++:106290000A000CDF8FBF0018152DFFC000000000A2
++:1062A0008CDF00743C0380002405FF8003E3C825D5
++:1062B000ACD9007490D80085240E0004240400108A
++:1062C000330F003F01E54025A0C800858F880024DA
++:1062D0003C010800A02E3DD1240300019106000DD1
++:1062E00030C9002015200003000000003C03080016
++:1062F0008C633D743C010800AC233D6C0A000CD655
++:10630000000000008F8700108C88008400E8282B94
++:1063100014A0000200E088218C910084240900016F
++:10632000A38900048F440E18022028210E000B328E
++:1063300002203021022080210A000C67AF82001465
++:1063400000071823306600033C010800A4263D9294
++:10635000122000058F8C0020918B00BC316A000454
++:106360001540001524CD00043C0F080095EF3D9228
++:1063700001E4702100AE302B50C0FF6E8F84000C02
++:106380002C85000514A0FFA32403004230980003CD
++:1063900017000002009818232483FFFC3C0108002A
++:1063A000AC233D740A000CA30000000000A7582491
++:1063B0000A000CCB016718263C010800A42D3D9271
++:1063C0000A000D33000000003C010800AC203D74C1
++:1063D0000A000CDE240300428F83001014600007C3
++:1063E000000010218F88002424050005910600007C
++:1063F00030C400FF108500030000000003E0000827
++:1064000000000000910A0018314900FF000939C25C
++:1064100014E0FFFA8F85001C3C04080094843D7C46
++:106420003C0308008C633D943C1908008F393D748F
++:106430003C0F080095EF3D920064C0218CAD0054E4
++:106440000319702101CF6021018D58231960001DAF
++:1064500000000000910E001C8F8C002C974B0E103A
++:1064600031CD00FF8D850004016D30238D88000043
++:1064700030CEFFFF000E510000AAC82100003821D5
++:1064800001072021032A182B0083C021AD990004A5
++:10649000AD980000918F000A01CF6821A18D000AFC
++:1064A0008F88002C974B0E12A50B0008950A003818
++:1064B00025490001A50900389107000D34E60008C0
++:1064C000A106000D03E000080000000027BDFFE06A
++:1064D000938700048F8F00248FAD00143C0E7FFF44
++:1064E0008F89000C35C8FFFFAFBF001CAFB000188C
++:1064F00001A8182491EA000D000717C03C1FBFFF38
++:10650000006258252D2E00018F90001837F9FFFFEB
++:106510003C1808008F183D943C0F080095EF3D8A09
++:1065200001796824000E47803C07EFFF3C05F0FF2F
++:1065300001A818253149002034E2FFFF34ACFFFFE9
++:106540000310582327A500102406000225EA0002A4
++:1065500000621824008080211520000200004021E4
++:106560008F480E1CA7AA0012056000372407000000
++:1065700030FF00FF001FCF008F8B001C00793825F3
++:10658000AFA70014916F00853C08080091083D9169
++:106590003C18DFFF31EE00C0370AFFFF000E182B5A
++:1065A0003C1F080097FF3D8400EA6824A3A800115F
++:1065B0000003174001A248258FB90010AFA90014AD
++:1065C0003C0A0800914A3D93A7BF00168FA800140B
++:1065D000032CC0243C0B01003C0F0FFF030B1825BC
++:1065E0003147000335EEFFFF010C68240007160059
++:1065F000006EF8243C09700001A2C82503E9582563
++:10660000AFB90014AFAB00100E000076A3A00015C8
++:106610008F8C0024260200089186000D30C40020D3
++:10662000108000068FBF001C3C05080094A53D802B
++:1066300024B0FFFF3C010800A4303D808FB000185B
++:1066400003E0000827BD00208F9800140118502B8C
++:106650005540FFC7240700010A000DB630FF00FFB8
++:106660009382000427BDFFE0AFBF00181040000F69
++:10667000008050218F880024240B00058F8900089A
++:10668000910700008F8400200100282130E3003FA3
++:106690008F86002C106B000800003821AFA9001075
++:1066A0000E00040EAFAA0014A38000048FBF0018D0
++:1066B00003E0000827BD00208D1900183C0F0800DA
++:1066C0008DEF3D748F9800103C027FFF8D08001401
++:1066D000345FFFFF033F682401F8702101AE60239F
++:1066E00001883821AFA900100E00040EAFAA0014D3
++:1066F0000A000E04A38000048F8700243C050800D4
++:1067000094A53D923C0208008C423D8C90E6000D21
++:106710000005240030C300201060002C00444025F8
++:106720008F85001C00006021240B000190A30085D0
++:1067300000004821240A00013C0F800035EE007063
++:106740008DC70000AF8700308F5801780700FFFE2B
++:106750003C038000347900708F3800003C0508004D
++:106760008CA500743C0D08008DAD007003077823E4
++:1067700000AF38210000102100EF302B01A22021B2
++:10678000008618213C010800AC2700743C01080079
++:10679000AC230070AF4B01483C1908008F393D9481
++:1067A000A7490144A74A0146AF59014C3C0B0800D8
++:1067B000916B3D91A34B0152AF4801543C0810002E
++:1067C000A74C015803E00008AF4801788F4B0E1C1E
++:1067D0003C0A08008D4A3D7497490E16974D0E14D9
++:1067E00001456021312AFFFF0A000E2731A9FFFF72
++:1067F0008F8300249064000D308200201040002917
++:10680000000000000000482100005021000040214D
++:106810003C07800034EB00708D670000AF870030CC
++:106820008F4C01780580FFFE3C0D800035AC007078
++:106830008D8B00003C0508008CA500743C0408000A
++:106840008C8400700167302300A67821000010219D
++:1068500001E6C82B0082C021031970213C01080009
++:10686000AC2F00743C010800AC2E0070AF49014809
++:106870003C0D08008DAD3D94A7480144240900401B
++:10688000A74A01463C081000240AFF91AF4D014C75
++:10689000A34A0152AF490154A740015803E0000840
++:1068A000AF4801788F490E1897460E1297450E1083
++:1068B00030CAFFFF0A000E5D30A8FFFF8F8300245F
++:1068C00027BDFFF89064000D308200201040003A90
++:1068D00000000000240B000100004821240A0001F0
++:1068E0003C088000350700708CE30000AF83003067
++:1068F0008F4C01780580FFFE3C0E80003C040800B0
++:1069000090843DD035C700708CEC00003C05080039
++:106910008CA50074A3A400033C1908008F390070F3
++:106920008FAD00000183302300A638210000102124
++:106930000322782100E6C02B01F8602101AE40253A
++:10694000AFA800003C010800AC2700743C0108001F
++:10695000AC2C00709346010A3C04080090843DD1A1
++:10696000A3A00002A3A600018FA300003C0580FFA6
++:106970003099007F34A2FFFF006278240019C6001E
++:1069800001F87025240D3000AF4E014C27BD0008E2
++:10699000AF4D0154A7400158AF4B0148A7490144EE
++:1069A000A74A01463C091000240AFF80A34A01526D
++:1069B00003E00008AF4901788F4B0E1897460E127E
++:1069C00097450E1030CAFFFF0A000E9130A9FFFF55
++:1069D0008F85001C2402008090A40085308300C0B5
++:1069E000106200058F8600208F8800088F87000CBA
++:1069F000ACC800C8ACC700C403E000080000000039
++:106A00003C0A0800254A39543C09080025293A2047
++:106A10003C08080025082DD43C07080024E73B3437
++:106A20003C06080024C637C43C05080024A5353CB4
++:106A30003C040800248431643C0308002463385C6F
++:106A40003C020800244236303C010800AC2A3D508C
++:106A50003C010800AC293D4C3C010800AC283D48F5
++:106A60003C010800AC273D543C010800AC263D64C5
++:106A70003C010800AC253D5C3C010800AC243D58BD
++:106A80003C010800AC233D683C010800AC223D609D
++:086A900003E000080000000013
++:00000001FF
+diff --git a/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
+new file mode 100644
+index 0000000..43d7c4f
+--- /dev/null
++++ b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
+@@ -0,0 +1,6496 @@
++:10000000080001180800000000005594000000C816
++:1000100000000000000000000000000008005594EF
++:10002000000000380000565C080000A00800000036
++:100030000000574400005694080059200000008436
++:100040000000ADD808005744000001C00000AE5CBD
++:100050000800321008000000000092580000B01C98
++:10006000000000000000000000000000080092589E
++:100070000000033C000142740800049008000400E2
++:10008000000012FC000145B000000000000000006C
++:1000900000000000080016FC00000004000158AC3D
++:1000A000080000A80800000000003D00000158B052
++:1000B00000000000000000000000000008003D00FB
++:1000C00000000030000195B00A000046000000006A
++:1000D000000000000000000D636F6D362E322E31DF
++:1000E00062000000060201020000000000000003A0
++:1000F000000000C800000032000000030000000003
++:1001000000000000000000000000000000000000EF
++:1001100000000010000001360000EA600000000549
++:1001200000000000000000000000000000000008C7
++:1001300000000000000000000000000000000000BF
++:1001400000000000000000000000000000000000AF
++:10015000000000000000000000000000000000009F
++:10016000000000020000000000000000000000008D
++:10017000000000000000000000000000000000007F
++:10018000000000000000000000000010000000005F
++:10019000000000000000000000000000000000005F
++:1001A000000000000000000000000000000000004F
++:1001B000000000000000000000000000000000003F
++:1001C000000000000000000000000000000000002F
++:1001D000000000000000000000000000000000001F
++:1001E0000000000010000003000000000000000DEF
++:1001F0000000000D3C020800244256083C030800A1
++:1002000024635754AC4000000043202B1480FFFDB2
++:10021000244200043C1D080037BD9FFC03A0F021D0
++:100220003C100800261001183C1C0800279C5608AA
++:100230000E000256000000000000000D27BDFFB4B4
++:10024000AFA10000AFA20004AFA30008AFA4000C50
++:10025000AFA50010AFA60014AFA70018AFA8001CF0
++:10026000AFA90020AFAA0024AFAB0028AFAC002C90
++:10027000AFAD0030AFAE0034AFAF0038AFB8003C28
++:10028000AFB90040AFBC0044AFBF00480E001544FA
++:10029000000000008FBF00488FBC00448FB90040B1
++:1002A0008FB8003C8FAF00388FAE00348FAD003078
++:1002B0008FAC002C8FAB00288FAA00248FA90020C0
++:1002C0008FA8001C8FA700188FA600148FA5001000
++:1002D0008FA4000C8FA300088FA200048FA1000040
++:1002E00027BD004C3C1B60108F7A5030377B502864
++:1002F00003400008AF7A00008F82002427BDFFE092
++:10030000AFB00010AFBF0018AFB100148C42000CAA
++:100310003C1080008E110100104000348FBF001887
++:100320000E000D84000000008F85002024047FFF54
++:100330000091202BACB100008E030104960201084D
++:1003400000031C003042FFFF00621825ACA300042C
++:100350009202010A96030114304200FF3063FFFF4E
++:100360000002140000431025ACA200089603010C03
++:100370009602010E00031C003042FFFF00621825A8
++:10038000ACA3000C960301109602011200031C009E
++:100390003042FFFF00621825ACA300108E02011846
++:1003A000ACA200148E02011CACA20018148000083C
++:1003B0008F820024978200003C0420050044182509
++:1003C00024420001ACA3001C0A0000C6A782000062
++:1003D0003C0340189442001E00431025ACA2001CB0
++:1003E0000E000DB8240400018FBF00188FB1001457
++:1003F0008FB000100000102103E0000827BD00208E
++:100400003C0780008CE202B834E50100044100089A
++:10041000240300013C0208008C42006024420001D9
++:100420003C010800AC22006003E0000800601021DD
++:100430003C0208008C42005C8CA4002094A30016AF
++:100440008CA6000494A5000E24420001ACE40280B6
++:100450002463FFFC3C010800AC22005C3C0210005D
++:10046000A4E30284A4E5028600001821ACE6028819
++:10047000ACE202B803E000080060102127BDFFE0F5
++:100480003C028000AFB0001034420100AFBF001C3E
++:10049000AFB20018AFB100148C43000094450008BF
++:1004A0002462FE002C42038110400003000381C23D
++:1004B0000A00010226100004240201001462000553
++:1004C0003C1180003C02800890420004305000FF44
++:1004D0003C11800036320100964300143202000FB6
++:1004E00000021500004310253C0308008C63004403
++:1004F00030A40004AE220080246300013C01080007
++:10050000AC2300441080000730A200028FBF001C03
++:100510008FB200188FB100148FB000100A0000CE07
++:1005200027BD00201040002D0000182130A20080BF
++:1005300010400005362200708E44001C0E000C672F
++:10054000240500A0362200708C4400008F82000C2D
++:10055000008210232C43012C10600004AF82001095
++:10056000240300010A000145AF84000C8E42000400
++:100570003C036020AF84000CAC6200143C02080015
++:100580008C42005850400015000018218C62000475
++:10059000240301FE304203FF144300100000182121
++:1005A0002E020004104000032E0200080A00014041
++:1005B0000000802114400003000000000A000140F8
++:1005C0002610FFF90000000D2402000202021004B0
++:1005D0003C036000AC626914000018218FBF001C4E
++:1005E0008FB200188FB100148FB00010006010217E
++:1005F00003E0000827BD00203C0480008C8301003C
++:1006000024020100506200033C0280080000000D3B
++:100610003C02800890430004000010213063000F6A
++:1006200000031D0003E00008AC8300800004188074
++:100630002782FF9C00621821000410C00044102390
++:100640008C640000000210C03C030800246356E4E0
++:10065000004310213C038000AC64009003E00008DC
++:10066000AF8200243C0208008C42011410400019A3
++:100670003084400030A2007F000231C03C02020002
++:100680001080001400A218253C026020AC43001426
++:100690003C0408008C8456B83C0308008C630110AD
++:1006A0003C02800024050900AC4500200086202182
++:1006B000246300013C028008AC4400643C01080053
++:1006C000AC2301103C010800AC2456B803E000083C
++:1006D000000000003C02602003E00008AC4500146C
++:1006E00003E000080000102103E0000800001021D2
++:1006F00030A2000810400008240201003C0208005B
++:100700008C42010C244200013C010800AC22010C87
++:1007100003E0000800000000148200080000000050
++:100720003C0208008C4200FC244200013C0108000D
++:10073000AC2200FC0A0001A330A200203C02080009
++:100740008C420084244200013C010800AC22008459
++:1007500030A200201040000830A200103C02080027
++:100760008C420108244200013C010800AC2201082F
++:1007700003E0000800000000104000080000000036
++:100780003C0208008C420104244200013C010800A4
++:10079000AC22010403E00008000000003C02080055
++:1007A0008C420100244200013C010800AC220100FF
++:1007B00003E000080000000027BDFFE0AFB1001417
++:1007C0003C118000AFB20018AFBF001CAFB00010EA
++:1007D0003632010096500008320200041040000733
++:1007E000320300028FBF001C8FB200188FB10014BB
++:1007F0008FB000100A0000CE27BD00201060000B53
++:10080000020028218E2401000E00018A0000000051
++:100810003202008010400003240500A10E000C6786
++:100820008E44001C0A0001E3240200018E2301040F
++:100830008F82000810430006020028218E24010048
++:100840000E00018A000000008E220104AF82000821
++:10085000000010218FBF001C8FB200188FB1001450
++:100860008FB0001003E0000827BD00202C82000498
++:1008700014400002000018212483FFFD240200021E
++:10088000006210043C03600003E00008AC626914DD
++:1008900027BDFFE0AFBF001CAFB20018AFB100141E
++:1008A000AFB000103C048000948201083043700017
++:1008B000240220001062000A2862200154400052E5
++:1008C0008FBF001C24024000106200482402600018
++:1008D0001062004A8FBF001C0A0002518FB200183C
++:1008E00034820100904300098C5000189451000C90
++:1008F000240200091062001C0000902128620009F7
++:10090000144000218F8200242402000A5062001249
++:10091000323100FF2402000B1062000F00000000C3
++:100920002402000C146200188F8200243C0208008C
++:100930008C4256B824030900AC83002000501021DB
++:100940003C038008AC6200643C010800AC2256B84D
++:100950000A0002508FBF001C0E0001E900102602A1
++:100960000A0002308F8200240E0001E900102602E6
++:100970003C0380089462001A8C72000C3042FFFF26
++:10098000020280258F8200248C42000C5040001E01
++:100990008FBF001C0E000D84000000003C02800090
++:1009A00034420100944300088F82002400031C009D
++:1009B0009444001E8F82002000641825AC50000073
++:1009C00024040001AC510004AC520008AC40000CFF
++:1009D000AC400010AC400014AC4000180E000DB844
++:1009E000AC43001C0A0002508FBF001C0E000440E4
++:1009F000000000000A0002508FBF001C0E000C9F78
++:100A0000000000008FBF001C8FB200188FB10014CF
++:100A10008FB000100000102103E0000827BD002067
++:100A200027BDFFD8AFB400203C036010AFBF002447
++:100A3000AFB3001CAFB20018AFB10014AFB00010DC
++:100A40008C6450002402FF7F3C1408002694563822
++:100A5000008220243484380CAC6450003C028000B6
++:100A6000240300370E0014B0AC4300083C07080014
++:100A700024E70618028010212404001D2484FFFFAF
++:100A8000AC4700000481FFFD244200043C02080042
++:100A9000244207C83C010800AC2256403C02080032
++:100AA000244202303C030800246306203C04080072
++:100AB000248403B43C05080024A506F03C06080085
++:100AC00024C62C9C3C010800AC2256803C02080045
++:100AD000244205303C010800AC2756843C01080044
++:100AE000AC2656943C010800AC23569C3C010800FF
++:100AF000AC2456A03C010800AC2556A43C010800DB
++:100B0000AC2256A83C010800AC23563C3C0108002E
++:100B1000AC2456443C010800AC2056603C0108005F
++:100B2000AC2556643C010800AC2056703C0108001E
++:100B3000AC27567C3C010800AC2656903C010800CE
++:100B4000AC2356980E00056E00000000AF80000C2C
++:100B50003C0280008C5300008F8300043C0208009C
++:100B60008C420020106200213262000700008821C0
++:100B70002792FF9C3C100800261056E43C02080017
++:100B80008C42002024050001022518040043202483
++:100B90008F820004004310245044000C26310001D1
++:100BA00010800008AF9000248E4300003C028000BB
++:100BB000AC4300900E000D4BAE05000C0A0002C1C4
++:100BC00026310001AE00000C263100012E22000269
++:100BD000261000381440FFE9265200043C020800A9
++:100BE0008C420020AF820004326200071040FFD91F
++:100BF0003C028000326200011040002D326200028F
++:100C00003C0580008CA2010000002021ACA2002045
++:100C10008CA301042C42078110400008ACA300A85B
++:100C200094A2010824032000304270001443000302
++:100C30003C02800890420005304400FF0E0001593C
++:100C4000000000003C0280009042010B304300FF96
++:100C50002C62001E54400004000310800E00018628
++:100C60000A0002EC00000000005410218C42000039
++:100C70000040F80900000000104000043C02800021
++:100C80008C4301043C026020AC4300143C02080089
++:100C90008C4200343C0440003C03800024420001AC
++:100CA000AC6401383C010800AC220034326200021E
++:100CB00010400010326200043C1080008E0201409F
++:100CC000000020210E000159AE0200200E00038317
++:100CD000000000003C024000AE0201783C02080027
++:100CE0008C420038244200013C010800AC2200384C
++:100CF000326200041040FF973C0280003C108000EC
++:100D00008E020180000020210E000159AE02002059
++:100D10008E03018024020F00546200073C02800809
++:100D20008E0201883C0300E03042FFFF00431025A3
++:100D30000A000328AE020080344200809042000086
++:100D400024030050304200FF14430007000000005D
++:100D50000E000362000000001440000300000000C9
++:100D60000E000971000000003C0208008C42003CAB
++:100D70003C0440003C03800024420001AC6401B804
++:100D80003C010800AC22003C0A0002A33C028000A7
++:100D90003C02900034420001008220253C02800089
++:100DA000AC4400203C0380008C6200200440FFFE25
++:100DB0000000000003E00008000000003C0280008A
++:100DC000344300010083202503E00008AC440020E8
++:100DD00027BDFFE0AFB10014AFB000100080882144
++:100DE000AFBF00180E00033230B000FF8F83FF94B6
++:100DF000022020219062002502028025A07000259B
++:100E00008C7000183C0280000E00033D020280241A
++:100E10001600000B8FBF00183C0480008C8201F884
++:100E20000440FFFE348201C024030002AC510000E4
++:100E3000A04300043C021000AC8201F88FBF0018F0
++:100E40008FB100148FB0001003E0000827BD002010
++:100E500027BDFFE83C028000AFBF00103442018094
++:100E6000944300048C4400083063020010600005C5
++:100E7000000028210E00100C000000000A0003787A
++:100E8000240500013C02FF000480000700821824B2
++:100E90003C02040014620004240500018F82FF94C8
++:100EA00090420008240500018FBF001000A010210F
++:100EB00003E0000827BD00188F82FF982405000179
++:100EC000A040001A3C028000344201400A00034264
++:100ED0008C4400008F85FF9427BDFFE0AFBF001C4E
++:100EE000AFB20018AFB10014AFB0001090A2000074
++:100EF000304400FF38830020388200300003182B74
++:100F00000002102B0062182410600003240200501D
++:100F1000148200A88FBF001C90A20005304200017F
++:100F2000104000A48FBF001C3C02800034420140EE
++:100F3000904200082443FFFF2C6200051040009EF1
++:100F40008FB20018000310803C030800246355ACE6
++:100F5000004310218C420000004000080000000007
++:100F60003C028000345101400E0003328E24000008
++:100F70008F92FF948E2200048E50000C1602000205
++:100F800024020001AE42000C0E00033D8E2400003E
++:100F90008E220004145000068FBF001C8FB2001870
++:100FA0008FB100148FB000100A000F7827BD002009
++:100FB0008E42000C0A000419000000003C0480006E
++:100FC0003482014094A300108C4200043063FFFF80
++:100FD0001443001C0000000024020001A4A2001021
++:100FE0008C8202380441000F3C0380003C02003F29
++:100FF0003448F0003C0760003C06FFC08CE22BBC8C
++:1010000000461824004810240002130200031D8229
++:10101000106200583C0280008C8202380440FFF7C6
++:101020003C038000346201408C44000034620200C2
++:10103000AC4400003C021000AC6202380A00043BE1
++:101040008FBF001C94A200100A00041900000000C9
++:10105000240200201482000F3C0280003C03800028
++:1010600094A20012346301408C6300043042FFFFFD
++:10107000146200050000000024020001A4A2001276
++:101080000A0004028FBF001C94A200120A00041977
++:1010900000000000345101400E0003328E24000095
++:1010A0008F92FF948E230004964200123050FFFF6F
++:1010B0001603000224020001A64200120E00033DA6
++:1010C0008E2400008E220004160200068FBF001C32
++:1010D0008FB200188FB100148FB000100A00037C8B
++:1010E00027BD0020964200120A00041900000000EB
++:1010F0003C03800094A20014346301408C6300041C
++:101100003042FFFF14620008240200018FBF001C60
++:101110008FB200188FB100148FB00010A4A2001479
++:101120000A00146327BD002094A20014144000217B
++:101130008FBF001C0A000435000000003C03800043
++:1011400094A20016346301408C6300043042FFFF18
++:101150001462000D240200018FBF001C8FB2001822
++:101160008FB100148FB00010A4A200160A000B1457
++:1011700027BD00209442007824420004A4A200105D
++:101180000A00043B8FBF001C94A200162403000138
++:101190003042FFFF144300078FBF001C3C020800D1
++:1011A0008C420070244200013C010800AC22007017
++:1011B0008FBF001C8FB200188FB100148FB00010C9
++:1011C00003E0000827BD002027BDFFD8AFB20018FC
++:1011D0008F92FF94AFB10014AFBF0020AFB3001CDB
++:1011E000AFB000103C028000345101008C5001006F
++:1011F0009242000092230009304400FF2402001FA5
++:10120000106200AB28620020104000192402003850
++:101210002862000A1040000D2402000B286200081A
++:101220001040002E8F820024046001042862000216
++:101230001440002A8F820024240200061062002637
++:101240008FBF00200A00055F8FB3001C1062006092
++:101250002862000B144000FA8FBF00202402000E09
++:10126000106200788F8200240A00055F8FB3001C93
++:10127000106200D2286200391040000A2402008067
++:1012800024020036106200E528620037104000C3D7
++:1012900024020035106200D98FBF00200A00055FCC
++:1012A0008FB3001C1062002D2862008110400006E0
++:1012B000240200C824020039106200C98FBF002038
++:1012C0000A00055F8FB3001C106200A28FBF0020D0
++:1012D0000A00055F8FB3001C8F8200248C42000C33
++:1012E000104000D78FBF00200E000D8400000000CA
++:1012F0003C038000346301008C6200008F85002075
++:10130000946700089466000CACA200008C64000492
++:101310008F82002400063400ACA400049448001E10
++:101320008C62001800073C0000E83825ACA20008D9
++:101330008C62001C24040001ACA2000C9062000A24
++:1013400000C23025ACA60010ACA00014ACA0001860
++:10135000ACA7001C0A00051D8FBF00208F8200244F
++:101360008C42000C104000B68FBF00200E000D8490
++:10137000000000008F820024962400089625000CAF
++:101380009443001E000422029626000E8F82002045
++:10139000000426000083202500052C003C0300806B
++:1013A00000A6282500832025AC400000AC400004A6
++:1013B000AC400008AC40000CAC450010AC40001440
++:1013C000AC400018AC44001C0A00051C24040001B9
++:1013D0009622000C14400018000000009242000504
++:1013E0003042001014400014000000000E000332D0
++:1013F0000200202192420005020020213442001008
++:101400000E00033DA242000592420000240300208A
++:10141000304200FF10430089020020218FBF0020CE
++:101420008FB3001C8FB200188FB100148FB0001062
++:101430000A00107527BD00280000000D0A00055E97
++:101440008FBF00208C42000C1040007D8FBF002019
++:101450000E000D84000000008E2200048F84002006
++:101460009623000CAC8200003C0280089445002CBE
++:101470008F82002400031C0030A5FFFF9446001E4D
++:101480003C02400E0065182500C23025AC830004E4
++:10149000AC800008AC80000CAC800010AC80001464
++:1014A000AC800018AC86001C0A00051C2404000156
++:1014B0000E000332020020218F93FF9802002021AA
++:1014C0000E00033DA660000C020020210E00034226
++:1014D000240500018F8200248C42000C104000582B
++:1014E0008FBF00200E000D84000000009622000C2B
++:1014F0008F83002000021400AC700000AC62000476
++:10150000AC6000088E4400388F820024AC64000C6C
++:101510008E46003C9445001E3C02401FAC66001005
++:1015200000A228258E62000424040001AC6200148D
++:10153000AC600018AC65001C8FBF00208FB3001C8E
++:101540008FB200188FB100148FB000100A000DB8D0
++:1015500027BD0028240200201082003A8FB3001C0F
++:101560000E000F5E00000000104000358FBF00200D
++:101570003C0480008C8201F80440FFFE348201C0EC
++:1015800024030002AC500000A04300043C02100001
++:10159000AC8201F80A00055E8FBF00200200202106
++:1015A0008FBF00208FB3001C8FB200188FB10014C2
++:1015B0008FB000100A000EA727BD00289625000C4A
++:1015C000020020218FBF00208FB3001C8FB20018B3
++:1015D0008FB100148FB000100A000ECC27BD002878
++:1015E000020020218FB3001C8FB200188FB10014AD
++:1015F0008FB000100A000EF727BD00289225000DBD
++:10160000020020218FB3001C8FB200188FB100148C
++:101610008FB000100A000F4827BD002802002021CB
++:101620008FBF00208FB3001C8FB200188FB1001441
++:101630008FB000100A000F1F27BD00288FBF0020A9
++:101640008FB3001C8FB200188FB100148FB0001040
++:1016500003E0000827BD00283C0580008CA202782A
++:101660000440FFFE34A2024024030002AC44000008
++:10167000A04300043C02100003E00008ACA2027882
++:10168000A380001803E00008A38000193C03800039
++:101690008C6202780440FFFE8F82001CAC62024024
++:1016A00024020002A06202443C02100003E0000891
++:1016B000AC6202783C02600003E000088C425404F3
++:1016C0009083003024020005008040213063003FF9
++:1016D0000000482114620005000050219082004C57
++:1016E0009483004E304900FF306AFFFFAD00000CCC
++:1016F000AD000010AD000024950200148D05001C03
++:101700008D0400183042FFFF004910230002110031
++:10171000000237C3004038210086202300A2102B8E
++:101720000082202300A72823AD05001CAD0400186B
++:10173000A5090014A5090020A50A001603E0000869
++:10174000A50A002203E000080000000027BDFFD822
++:10175000AFB200183C128008AFB40020AFB3001C39
++:10176000AFB10014AFBF0024AFB00010365101007C
++:101770003C0260008C4254049222000C3C1408008D
++:10178000929400F7304300FF2402000110620032FF
++:101790000080982124020002146200353650008037
++:1017A0000E00143D000000009202004C2403FF8054
++:1017B0003C0480003042007F000211C024420240FD
++:1017C0000262102100431824AC8300949245000863
++:1017D0009204004C3042007F3C03800614850007D1
++:1017E000004380212402FFFFA22200112402FFFFF8
++:1017F000A62200120A0005D22402FFFF9602002052
++:10180000A222001196020022A62200128E020024BB
++:101810003C048008AE2200143485008090A2004C65
++:1018200034830100A06200108CA2003CAC6200185E
++:101830008C820068AC6200F48C820064AC6200F0C0
++:101840008C82006CAC6200F824020001A0A2006847
++:101850000A0005EE3C0480080E001456000000004B
++:1018600036420080A04000680A0005EE3C04800873
++:10187000A2000068A20000690A0006293C02800854
++:10188000348300808C62003834850100AC62006CC7
++:1018900024020001A062006990A200D59083000894
++:1018A000305100FF3072007F12320019001111C058
++:1018B00024420240026210212403FF8000431824C6
++:1018C0003C048000AC8300943042007F3C038006DF
++:1018D000004380218E02000C1040000D02002021E8
++:1018E0000E00057E0000000026220001305100FF9E
++:1018F0009203003C023410260002102B0002102339
++:101900003063007F022288240A0005F8A203003C0D
++:101910003C088008350401008C8200E03507008017
++:10192000ACE2003C8C8200E0AD02000090E5004C8F
++:10193000908600D590E3004C908400D52402FF806F
++:1019400000A228243063007F308400FF00A62825F1
++:101950000064182A1060000230A500FF38A500803E
++:10196000A0E5004CA10500093C0280089043000E50
++:10197000344400803C058000A043000A8C8300189A
++:101980003C027FFF3442FFFF00621824AC83001842
++:101990008CA201F80440FFFE00000000ACB301C0BF
++:1019A0008FBF00248FB400208FB3001C8FB20018AB
++:1019B0008FB100148FB0001024020002A0A201C455
++:1019C00027BD00283C02100003E00008ACA201F88B
++:1019D00090A2000024420001A0A200003C030800E5
++:1019E0008C6300F4304200FF144300020080302179
++:1019F000A0A0000090A200008F84001C000211C073
++:101A00002442024024830040008220212402FF80DF
++:101A1000008220243063007F3C02800A006218218B
++:101A20003C028000AC44002403E00008ACC300008A
++:101A300094820006908300058C85000C8C86001033
++:101A40008C8700188C88001C8C8400203C010800C6
++:101A5000A42256C63C010800A02356C53C0108003C
++:101A6000AC2556CC3C010800AC2656D03C01080001
++:101A7000AC2756D83C010800AC2856DC3C010800D5
++:101A8000AC2456E003E00008000000003C0280089F
++:101A9000344201008C4400343C038000346504006F
++:101AA000AC6400388C420038AF850028AC62003C42
++:101AB0003C020005AC6200300000000000000000A5
++:101AC00003E00008000000003C020006308400FF34
++:101AD000008220253C028000AC4400300000000061
++:101AE00000000000000000003C0380008C62000049
++:101AF000304200101040FFFD3462040003E0000893
++:101B0000AF82002894C200003C080800950800CA73
++:101B100030E7FFFF0080482101021021A4C200002D
++:101B200094C200003042FFFF00E2102B544000013D
++:101B3000A4C7000094A200003C0308008C6300CC02
++:101B400024420001A4A2000094A200003042FFFF42
++:101B5000144300073C0280080107102BA4A00000DA
++:101B60005440000101003821A4C700003C02800855
++:101B7000344601008CC3002894A200003C0480007D
++:101B80003042FFFE000210C000621021AC82003C17
++:101B90008C82003C006218231860000400000000E2
++:101BA0008CC200240A0006BA244200018CC2002420
++:101BB000AC8200383C020050344200103C038000EC
++:101BC000AC620030000000000000000000000000D7
++:101BD0008C620000304200201040FFFD0000000039
++:101BE00094A200003C04800030420001000210C0BA
++:101BF000004410218C430400AD2300008C420404F7
++:101C0000AD2200043C02002003E00008AC8200305A
++:101C100027BDFFE0AFB20018AFB10014AFB00010A5
++:101C2000AFBF001C94C2000000C080213C1208001D
++:101C3000965200C624420001A6020000960300004E
++:101C400094E2000000E03021144300058FB1003021
++:101C50000E00068F024038210A0006F10000000045
++:101C60008C8300048C82000424420040046100073D
++:101C7000AC8200048C8200040440000400000000D8
++:101C80008C82000024420001AC8200009602000019
++:101C90003042FFFF50520001A600000096220000D3
++:101CA00024420001A62200003C02800834420100C8
++:101CB000962300009442003C144300048FBF001C94
++:101CC00024020001A62200008FBF001C8FB2001862
++:101CD0008FB100148FB0001003E0000827BD002072
++:101CE00027BDFFE03C028008AFBF0018344201006E
++:101CF0008C4800343C03800034690400AC68003830
++:101D00008C42003830E700FFAF890028AC62003C0D
++:101D10003C020005AC620030000000000000000042
++:101D200000000000000000000000000000000000B3
++:101D30008C82000C8C82000C97830016AD22000070
++:101D40008C82001000604021AD2200048C820018BB
++:101D5000AD2200088C82001CAD22000C8CA2001465
++:101D6000AD2200108C820020AD220014908200056C
++:101D7000304200FF00021200AD2200188CA20018B1
++:101D8000AD22001C8CA2000CAD2200208CA2001001
++:101D9000AD2200248CA2001CAD2200288CA20020C1
++:101DA000AD22002C3402FFFFAD260030AD20003400
++:101DB000506200013408FFFFAD28003850E00011E8
++:101DC0003C0280083C048008348401009482005066
++:101DD0003042FFFFAD22003C9483004494850044D0
++:101DE000240200013063FFFF000318C200641821C1
++:101DF0009064006430A5000700A210040A00075C8C
++:101E00000044102534420100AD20003C94430044BE
++:101E1000944400443063FFFF000318C2006218219D
++:101E200030840007906500642402000100821004E1
++:101E30000002102700451024A0620064000000008A
++:101E400000000000000000003C0200063442004098
++:101E50003C038000AC620030000000000000000085
++:101E6000000000008C620000304200101040FFFDB6
++:101E70003C06800834C201503463040034C7014A70
++:101E800034C4013434C5014034C60144AFA200104B
++:101E90000E0006D2AF8300288FBF001803E00008B1
++:101EA00027BD00208F8300143C0608008CC600E884
++:101EB0008F82001C30633FFF000319800046102111
++:101EC000004310212403FF80004318243C068000B7
++:101ED000ACC300283042007F3C03800C004330211B
++:101EE00090C2000D30A500FF0000382134420010E0
++:101EF000A0C2000D8F8900143C028008344201000A
++:101F00009443004400091382304800032402000176
++:101F1000A4C3000E1102000B2902000210400005AC
++:101F2000240200021100000C240300010A0007A48F
++:101F30000000182111020006000000000A0007A49A
++:101F4000000018218CC2002C0A0007A424430001C1
++:101F50008CC20014244300018CC200180043102BD3
++:101F60005040000A240700012402002714A20003A5
++:101F70003C0380080A0007B1240700013463010014
++:101F80009462004C24420001A462004C00091382B8
++:101F9000304300032C620002104000090080282119
++:101FA000146000040000000094C200340A0007C15D
++:101FB0003046FFFF8CC600380A0007C10080282188
++:101FC000000030213C040800248456C00A000706A3
++:101FD0000000000027BDFF90AFB60068AFB50064F9
++:101FE000AFB40060AFB3005CAFB20058AFB1005403
++:101FF000AFBF006CAFB000508C9000000080B021EB
++:102000003C0208008C4200E8960400328F83001CDA
++:102010002414FF8030843FFF0062182100042180D7
++:1020200000641821007410243C13800000A090214B
++:1020300090A50000AE620028920400323C02800CA1
++:102040003063007F00628821308400C02402004099
++:10205000148200320000A8218E3500388E2200182C
++:102060001440000224020001AE2200189202003C3B
++:10207000304200201440000E8F83001C000511C068
++:102080002442024000621821306400783C02008043
++:102090000082202500741824AE630800AE64081086
++:1020A0008E2200188E03000800431021AE22001873
++:1020B0008E22002C8E230018244200010062182B6F
++:1020C0001060004300000000924200002442000122
++:1020D000A24200003C0308008C6300F4304200FF81
++:1020E00050430001A2400000924200008F84001C77
++:1020F000000211C024420240248300403063007F6C
++:10210000008220213C02800A0094202400621821D1
++:10211000AE6400240A0008D2AEC30000920300326D
++:102120002402FFC000431024304200FF1440000589
++:1021300024020001AE220018962200340A00084250
++:102140003055FFFF8E22001424420001AE220018F9
++:102150009202003000021600000216030441001C27
++:10216000000000009602003227A400100080282101
++:10217000A7A20016960200320000302124070001B9
++:102180003042FFFFAF8200140E000706AFA0001C14
++:10219000960200328F83001C3C0408008C8400E807
++:1021A00030423FFF000211800064182100621821B4
++:1021B00000741024AE62002C3063007F3C02800E5D
++:1021C000006218219062000D3042007FA062000D75
++:1021D0009222000D304200105040007892420000E0
++:1021E0003C028008344401009482004C8EC30000FD
++:1021F0003C130800967300C62442FFFFA482004CE3
++:10220000946200329623000E3054FFFF3070FFFFBF
++:102210003C0308008C6300D000701807A7A30038A7
++:102220009482003E3063FFFF3042FFFF14620007DC
++:10223000000000008C8200303C038000244200300B
++:10224000AC62003C0A00086A8C82002C9482004038
++:102250003042FFFF5462000927A400408C820038FE
++:102260003C03800024420030AC62003C8C8200348D
++:10227000AC6200380A0008793C03800027A50038CA
++:1022800027A60048026038210E00068FA7A000484C
++:102290008FA300403C02800024630030AC43003830
++:1022A0008FA30044AC43003C3C0380003C0200058B
++:1022B000AC6200303C028008344401009482004249
++:1022C000346304003042FFFF0202102B1440000769
++:1022D000AF8300289482004E9483004202021021B2
++:1022E000004310230A00088F3043FFFF9483004E01
++:1022F00094820042026318210050102300621823C8
++:102300003063FFFF3C028008344401009482003CAB
++:102310003042FFFF14430003000000000A00089F42
++:10232000240300019482003C3042FFFF0062102B26
++:10233000144000058F8200289482003C0062102324
++:102340003043FFFF8F820028AC550000AC400004F2
++:10235000AC540008AC43000C3C02000634420010B0
++:102360003C038000AC620030000000000000000070
++:10237000000000008C620000304200101040FFFDA1
++:102380003C04800834840100001018C20064182145
++:102390009065006432020007240600010046100424
++:1023A00000451025A0620064948300429622000E2E
++:1023B00050430001A386001892420000244200010D
++:1023C000A24200003C0308008C6300F4304200FF8E
++:1023D00050430001A2400000924200008F84001C84
++:1023E000000211C0244202402483004000822021C8
++:1023F0002402FF80008220243063007F3C02800A98
++:10240000006218213C028000AC440024AEC30000EE
++:102410008FBF006C8FB600688FB500648FB400600A
++:102420008FB3005C8FB200588FB100548FB0005052
++:1024300003E0000827BD007027BDFFD8AFB3001C24
++:10244000AFB20018AFB10014AFB00010AFBF0020A2
++:102450000080982100E0802130B1FFFF0E000D8444
++:1024600030D200FF0000000000000000000000006B
++:102470008F8200208F830024AC510000AC520004F6
++:10248000AC530008AC40000CAC400010AC40001451
++:10249000AC4000189463001E02038025AC50001C61
++:1024A0000000000000000000000000002404000103
++:1024B0008FBF00208FB3001C8FB200188FB10014A3
++:1024C0008FB000100A000DB827BD002830A5FFFF0F
++:1024D0000A0008DC30C600FF3C02800834430100DB
++:1024E0009462000E3C080800950800C63046FFFFC5
++:1024F00014C000043402FFFF946500EA0A000929B1
++:102500008F84001C10C20027000000009462004E5F
++:102510009464003C3045FFFF00A6102300A6182B52
++:102520003087FFFF106000043044FFFF00C5102318
++:1025300000E210233044FFFF0088102B1040000EF3
++:1025400000E810233C028008344401002403000109
++:1025500034420080A44300162402FFFFA482000E30
++:10256000948500EA8F84001C0000302130A5FFFF15
++:102570000A0009013C0760200044102A10400009AD
++:102580003C0280083443008094620016304200010F
++:10259000104000043C0280009442007E244200145B
++:1025A000A462001603E000080000000027BDFFE061
++:1025B0003C028008AFBF001CAFB0001834420100DD
++:1025C000944300429442004C104000193068FFFFD1
++:1025D0009383001824020001146200298FBF001C9D
++:1025E0003C06800834D00100000810C200501021C1
++:1025F000904200643103000734C70148304200FFB5
++:10260000006210073042000134C9014E34C4012C6D
++:1026100034C5013E1040001634C601420E0006D2F9
++:10262000AFA90010960200420A0009463048FFFF99
++:102630003C028008344401009483004494820042A8
++:102640001043000F8FBF001C94820044A4820042FC
++:1026500094820050A482004E8C820038AC820030FC
++:1026600094820040A482003E9482004AA4820048E2
++:102670008FBF001C8FB000180A00090427BD00207E
++:102680008FB0001803E0000827BD002027BDFFA081
++:10269000AFB1004C3C118000AFBF0058AFB3005445
++:1026A000AFB20050AFB000483626018890C2000398
++:1026B0003044007FA3A400108E32018090C200003D
++:1026C0003043007F240200031062003BAF92001CE5
++:1026D00028620004104000062402000424020002C4
++:1026E000106200098FBF00580A000B0F8FB300540F
++:1026F0001062004D240200051062014E8FBF005889
++:102700000A000B0F8FB30054000411C002421021C5
++:102710002404FF8024420240004410242643004049
++:10272000AE2200243063007F3C02800A0062182140
++:102730009062003CAFA3003C00441025A062003C26
++:102740008FA3003C9062003C304200401040016C7E
++:102750008FBF00583C108008A3800018361001007D
++:102760008E0200E08C63003427A4003C27A50010F3
++:10277000004310210E0007C3AE0200E093A2001038
++:102780003C038000A20200D58C6202780440FFFE68
++:102790008F82001CAC62024024020002A06202444C
++:1027A0003C021000AC6202780E0009390000000003
++:1027B0000A000B0E8FBF00583C05800890C3000133
++:1027C00090A2000B1443014E8FBF005834A4008028
++:1027D0008C8200189082004C90A200083C0260009D
++:1027E0008C4254048C8300183C027FFF3442FFFF6C
++:1027F000006218243C0208008C4200B4AC8300182C
++:102800003C038000244200013C010800AC2200B4DB
++:102810008C6201F80440FFFE8F82001CAC6201C094
++:102820000A000AD6240200023C10800890C300016E
++:102830009202000B144301328FBF005827A40018E6
++:1028400036050110240600033C0260008C4254044B
++:102850000E000E470000000027A40028360501F0F6
++:102860000E000E47240600038FA200283603010045
++:10287000AE0200648FA2002CAE0200688FA200306E
++:10288000AE02006C93A40018906300D52402FF8070
++:102890000082102400431025304900FF3084007F5F
++:1028A0003122007F0082102A544000013929008023
++:1028B000000411C0244202402403FF800242102180
++:1028C00000431024AE220094264200403042007F94
++:1028D0003C038006004340218FA3001C2402FFFF1D
++:1028E000AFA800403C130800927300F71062003359
++:1028F00093A2001995030014304400FF3063FFFFDA
++:102900000064182B106000100000000095040014F3
++:102910008D07001C8D0600183084FFFF0044202323
++:102920000004210000E438210000102100E4202BE5
++:1029300000C2302100C43021AD07001CAD060018D4
++:102940000A000A2F93A20019950400148D07001C99
++:102950008D0600183084FFFF008220230004210030
++:10296000000010210080182100C2302300E4202B39
++:1029700000C4302300E33823AD07001CAD06001867
++:1029800093A200198FA30040A462001497A2001A1A
++:10299000A46200168FA2001CAC6200108FA2001C63
++:1029A000AC62000C93A20019A462002097A2001A46
++:1029B000A46200228FA2001CAC6200243C048008A8
++:1029C000348300808C6200388FA20020012088218F
++:1029D000AC62003C8FA20020AC82000093A20018E1
++:1029E000A062004C93A20018A0820009A0600068B9
++:1029F00093A20018105100512407FF803229007F54
++:102A0000000911C024420240024210213046007FDA
++:102A10003C03800000471024AC6200943C02800616
++:102A200000C2302190C2003CAFA60040000020212F
++:102A300000471025A0C2003C8FA80040950200026C
++:102A4000950300148D07001C3042FFFF3063FFFF29
++:102A50008D060018004310230002110000E2382107
++:102A600000E2102B00C4302100C23021AD07001C51
++:102A7000AD06001895020002A5020014A50000167C
++:102A80008D020008AD0200108D020008AD02000C9E
++:102A900095020002A5020020A50000228D02000878
++:102AA000AD0200249102003C304200401040001A68
++:102AB000262200013C108008A3A90038A38000183A
++:102AC000361001008E0200E08D03003427A4004080
++:102AD00027A50038004310210E0007C3AE0200E016
++:102AE00093A200383C038000A20200D58C620278D9
++:102AF0000440FFFE8F82001CAC62024024020002F0
++:102B0000A06202443C021000AC6202780E00093957
++:102B100000000000262200013043007F14730004EF
++:102B2000004020212403FF8002231024004320269C
++:102B300093A200180A000A4B309100FF93A40018DA
++:102B40008FA3001C2402FFFF1062000A308900FFDF
++:102B500024820001248300013042007F14530005C9
++:102B6000306900FF2403FF800083102400431026F7
++:102B7000304900FF3C028008904200080120882173
++:102B8000305000FF123000193222007F000211C0C5
++:102B900002421021244202402403FF8000431824F3
++:102BA0003C048000AC8300943042007F3C038006EC
++:102BB000004310218C43000C004020211060000BCA
++:102BC000AFA200400E00057E000000002623000199
++:102BD0002405FF803062007F145300020225202468
++:102BE000008518260A000AAF307100FF3C048008F7
++:102BF000348400808C8300183C027FFF3442FFFF46
++:102C000000621824AC8300183C0380008C6201F839
++:102C10000440FFFE00000000AC7201C0240200026C
++:102C2000A06201C43C021000AC6201F80A000B0E65
++:102C30008FBF00583C04800890C300019082000BB5
++:102C40001443002F8FBF0058349000809202000878
++:102C500030420040104000200000000092020008B6
++:102C60000002160000021603044100050240202164
++:102C70000E000ECC240500930A000B0E8FBF0058E7
++:102C80009202000924030018304200FF1443000D93
++:102C900002402021240500390E000E64000030217E
++:102CA0000E0003328F84001C8F82FF9424030012D5
++:102CB000A04300090E00033D8F84001C0A000B0E88
++:102CC0008FBF0058240500360E000E64000030212E
++:102CD0000A000B0E8FBF00580E0003320240202165
++:102CE000920200058F84001C344200200E00033D38
++:102CF000A20200050E0010758F84001C8FBF0058C3
++:102D00008FB300548FB200508FB1004C8FB0004889
++:102D100003E0000827BD00603C0280083445010044
++:102D20003C0280008C42014094A3000E0000302140
++:102D300000402021AF82001C3063FFFF3402FFFF00
++:102D4000106200063C0760202402FFFFA4A2000ED0
++:102D500094A500EA0A00090130A5FFFF03E000087E
++:102D60000000000027BDFFC83C0280003C06800830
++:102D7000AFB5002CAFB1001CAFBF0030AFB400281E
++:102D8000AFB30024AFB20020AFB00018345101003F
++:102D900034C501008C4301008E2200148CA400E491
++:102DA0000000A821AF83001C0044102318400052EB
++:102DB000A38000188E22001400005021ACA200E471
++:102DC00090C3000890A200D53073007FA3A200102A
++:102DD0008CB200E08CB400E4304200FF1053003BA2
++:102DE00093A200108F83001C2407FF80000211C0F3
++:102DF0000062102124420240246300400047102456
++:102E00003063007F3C0980003C08800A006818217C
++:102E1000AD2200248C62003427A4001427A50010E2
++:102E2000024280210290102304400028AFA3001426
++:102E30009062003C00E21024304200FF1440001970
++:102E4000020090219062003C34420040A062003CAD
++:102E50008F86001C93A3001024C200403042007FE4
++:102E6000004828213C0208008C4200F42463000141
++:102E7000306400FF14820002A3A30010A3A000107E
++:102E800093A20010AFA50014000211C0244202401A
++:102E900000C2102100471024AD2200240A000B4577
++:102EA00093A200100E0007C3000000003C0280083F
++:102EB00034420100AC5000E093A30010240A00014A
++:102EC000A04300D50A000B4593A200102402000184
++:102ED000154200093C0380008C6202780440FFFE2A
++:102EE0008F82001CAC62024024020002A0620244F5
++:102EF0003C021000AC6202789222000B2403000214
++:102F0000304200FF144300720000000096220008C7
++:102F1000304300FF24020082146200402402008437
++:102F20003C028000344901008D22000C95230006EC
++:102F3000000216023063FFFF3045003F24020027E5
++:102F400010A2000FAF83001428A200281040000830
++:102F5000240200312402002110A2000924020025CD
++:102F600010A20007938200190A000BBD00000000A8
++:102F700010A20007938200190A000BBD0000000098
++:102F80000E000777012020210A000C3D0000000000
++:102F90003C0380008C6202780440FFFE8F82001C9C
++:102FA000AC62024024020002A06202443C02100013
++:102FB000AC6202780A000C3D000000009523000678
++:102FC000912400058D25000C8D2600108D270018FA
++:102FD0008D28001C8D290020244200013C0108009E
++:102FE000A42356C63C010800A02456C53C01080095
++:102FF000AC2556CC3C010800AC2656D03C0108005C
++:10300000AC2756D83C010800AC2856DC3C0108002F
++:10301000AC2956E00A000C3DA38200191462000A94
++:10302000240200813C02800834420100944500EAF9
++:10303000922600058F84001C30A5FFFF30C600FFDC
++:103040000A000BFE3C0760211462005C00000000D7
++:103050009222000A304300FF306200201040000737
++:10306000306200403C02800834420100944500EA8E
++:103070008F84001C0A000BFC24060040104000074F
++:10308000000316003C02800834420100944500EA27
++:103090008F84001C0A000BFC24060041000216036A
++:1030A000044100463C02800834420100944500EA95
++:1030B0008F84001C2406004230A5FFFF3C076019E6
++:1030C0000E000901000000000A000C3D0000000095
++:1030D0009222000B24040016304200FF1044000628
++:1030E0003C0680009222000B24030017304200FFB0
++:1030F000144300320000000034C5010090A2000B10
++:10310000304200FF1444000B000080218CA20020FC
++:103110008CA400202403FF800043102400021140EF
++:103120003084007F004410253C032000004310251C
++:10313000ACC2083094A2000800021400000214037C
++:10314000044200012410000194A2000830420080D3
++:103150005040001A0200A82194A20008304220002A
++:10316000504000160200A8218CA300183C021C2D20
++:10317000344219ED106200110200A8213C0208003F
++:103180008C4200D4104000053C0280082403000457
++:1031900034420100A04300FC3C028008344201009C
++:1031A000944500EA8F84001C2406000630A5FFFF2A
++:1031B0000E0009013C0760210200A8210E00093918
++:1031C000000000009222000A304200081040000473
++:1031D00002A010210E0013790000000002A01021AF
++:1031E0008FBF00308FB5002C8FB400288FB3002420
++:1031F0008FB200208FB1001C8FB0001803E00008D0
++:1032000027BD00382402FF80008220243C02900069
++:1032100034420007008220253C028000AC4400209C
++:103220003C0380008C6200200440FFFE0000000090
++:1032300003E00008000000003C0380002402FF803F
++:10324000008220243462000700822025AC64002024
++:103250008C6200200440FFFE0000000003E0000834
++:103260000000000027BDFFD8AFB3001CAFB10014B1
++:10327000AFB00010AFBF0020AFB200183C1180000B
++:103280003C0280088E32002034530100AE2400201E
++:10329000966300EA000514003C074000004738250B
++:1032A00000A08021000030210E0009013065FFFFE1
++:1032B000240200A1160200022402FFFFA2620009FC
++:1032C000AE3200208FBF00208FB3001C8FB20018D9
++:1032D0008FB100148FB0001003E0000827BD002854
++:1032E0003C0280082403000527BDFFE834420100AA
++:1032F000A04300FCAFBF00103C0280008C420100E4
++:10330000240500A1004020210E000C67AF82001CA4
++:103310003C0380008C6202780440FFFE8F82001C18
++:103320008FBF001027BD0018AC62024024020002CB
++:10333000A06202443C021000AC62027803E0000884
++:103340000000000027BDFFE83C068000AFBF001072
++:1033500034C7010094E20008304400FF3883008243
++:10336000388200842C6300012C4200010062182581
++:103370001060002D24020083938200195040003B0E
++:103380008FBF00103C020800904256CC8CC4010054
++:103390003C06080094C656C63045003F38A30032AC
++:1033A00038A2003F2C6300012C4200010062182566
++:1033B000AF84001CAF860014A380001914600007BE
++:1033C00000E020212402002014A2001200000000CE
++:1033D0003402FFFF14C2000F00000000240200208E
++:1033E00014A2000500E028218CE300142402FFFF52
++:1033F0005062000B8FBF00103C040800248456C0AC
++:10340000000030210E000706240700010A000CD638
++:103410008FBF00100E000777000000008FBF001064
++:103420000A00093927BD001814820004240200850F
++:103430008CC501040A000CE1000020211482000662
++:103440002482FF808CC50104240440008FBF00103B
++:103450000A00016727BD0018304200FF2C4200021D
++:1034600010400004240200228FBF00100A000B2726
++:1034700027BD0018148200048F8200248FBF001023
++:103480000A000C8627BD00188C42000C1040001E5C
++:1034900000E0282190E300092402001814620003D0
++:1034A000240200160A000CFC240300081462000722
++:1034B00024020017240300123C02800834420080DA
++:1034C000A04300090A000D0994A7000854620007F0
++:1034D00094A700088F82FF942404FFFE9043000508
++:1034E00000641824A043000594A7000890A6001BC0
++:1034F0008CA4000094A500068FBF001000073C00BC
++:103500000A0008DC27BD00188FBF001003E0000888
++:1035100027BD00188F8500243C04800094A2002A57
++:103520008CA30034000230C02402FFF000C210243B
++:1035300000621821AC83003C8CA200303C03800068
++:10354000AC8200383C02005034420010AC620030C3
++:103550000000000000000000000000008C6200007D
++:10356000304200201040FFFD30C20008104000062D
++:103570003C0280008C620408ACA200208C62040C27
++:103580000A000D34ACA200248C430400ACA300203C
++:103590008C420404ACA200243C0300203C028000C6
++:1035A000AC4300303C0480008C8200300043102487
++:1035B0001440FFFD8F8600243C020040AC820030A6
++:1035C00094C3002A94C2002894C4002C94C5002EF1
++:1035D00024630001004410213064FFFFA4C20028CE
++:1035E00014850002A4C3002AA4C0002A03E0000836
++:1035F000000000008F84002427BDFFE83C05800404
++:1036000024840010AFBF00100E000E472406000AED
++:103610008F840024948200129483002E3042000F85
++:10362000244200030043180424027FFF0043102BB0
++:1036300010400002AC8300000000000D0E000D13CE
++:10364000000000008F8300248FBF001027BD0018EA
++:10365000946200149463001A3042000F00021500B7
++:10366000006218253C02800003E00008AC4300A083
++:103670008F8300243C028004944400069462001A64
++:103680008C650000A4640016004410233042FFFF44
++:103690000045102B03E00008384200018F8400240D
++:1036A0003C0780049486001A8C85000094E2000692
++:1036B000A482001694E3000600C310233042FFFFEB
++:1036C0000045102B384200011440FFF8A483001677
++:1036D00003E00008000000008F8400243C02800406
++:1036E000944200069483001A8C850000A482001680
++:1036F000006210233042FFFF0045102B38420001CA
++:103700005040000D8F850024006030213C0780046C
++:1037100094E20006A482001694E3000600C310237E
++:103720003042FFFF0045102B384200011440FFF8E3
++:10373000A48300168F8500243C03800034620400BB
++:103740008CA40020AF820020AC6400388CA200243E
++:10375000AC62003C3C020005AC62003003E00008B3
++:10376000ACA000048F8400243C0300068C8200047B
++:1037700000021140004310253C038000AC62003081
++:103780000000000000000000000000008C6200004B
++:10379000304200101040FFFD34620400AC80000491
++:1037A00003E00008AF8200208F86002427BDFFE0E1
++:1037B000AFB10014AFB00010AFBF00188CC300044D
++:1037C0008CC500248F820020309000FF94C4001A22
++:1037D00024630001244200202484000124A7002047
++:1037E000ACC30004AF820020A4C4001AACC70024FC
++:1037F00004A100060000882104E2000594C2001A1A
++:103800008CC2002024420001ACC2002094C2001AE5
++:1038100094C300282E040001004310262C4200010E
++:10382000004410245040000594C2001A24020001F4
++:10383000ACC2000894C2001A94C300280010202BC8
++:10384000004310262C4200010044102514400007BC
++:10385000000000008CC20008144000042402001084
++:103860008CC300041462000F8F8500240E000DA786
++:10387000241100018F820024944300289442001AEE
++:1038800014430003000000000E000D1300000000B0
++:10389000160000048F8500240E000D840000000037
++:1038A0008F85002494A2001E94A4001C24420001D1
++:1038B0003043FFFF14640002A4A2001EA4A0001E57
++:1038C0001200000A3C02800494A2001494A3001A7F
++:1038D0003042000F00021500006218253C028000F3
++:1038E000AC4300A00A000E1EACA0000894420006E3
++:1038F00094A3001A8CA40000A4A200160062102356
++:103900003042FFFF0044102B384200011040000DF0
++:1039100002201021006030213C07800494E2000660
++:10392000A4A2001694E3000600C310233042FFFF58
++:103930000044102B384200011440FFF8A4A30016E5
++:10394000022010218FBF00188FB100148FB000101B
++:1039500003E0000827BD002003E00008000000008D
++:103960008F82002C3C03000600021140004310250A
++:103970003C038000AC62003000000000000000004A
++:10398000000000008C620000304200101040FFFD7B
++:1039900034620400AF82002803E00008AF80002CEE
++:1039A00003E000080000102103E000080000000010
++:1039B0003084FFFF30A5FFFF0000182110800007B2
++:1039C000000000003082000110400002000420428C
++:1039D000006518210A000E3D0005284003E000089C
++:1039E0000060102110C0000624C6FFFF8CA200005A
++:1039F00024A50004AC8200000A000E4724840004C1
++:103A000003E000080000000010A0000824A3FFFF4E
++:103A1000AC86000000000000000000002402FFFF50
++:103A20002463FFFF1462FFFA2484000403E000080B
++:103A3000000000003C0280083442008024030001A2
++:103A4000AC43000CA4430010A4430012A443001490
++:103A500003E00008A44300168F82002427BDFFD88E
++:103A6000AFB3001CAFB20018AFB10014AFB000107C
++:103A7000AFBF00208C47000C248200802409FF8007
++:103A80003C08800E3043007F008080213C0A80008B
++:103A9000004920240068182130B100FF30D200FF17
++:103AA00010E000290000982126020100AD44002CFE
++:103AB000004928243042007F004820219062000005
++:103AC00024030050304200FF1443000400000000B3
++:103AD000AD45002C948200EA3053FFFF0E000D84A8
++:103AE000000000008F8200248F83002000112C0032
++:103AF0009442001E001224003484000100A22825F4
++:103B00003C02400000A22825AC7000008FBF0020BE
++:103B1000AC6000048FB20018AC7300088FB10014C1
++:103B2000AC60000C8FB3001CAC6400108FB00010B0
++:103B3000AC60001424040001AC60001827BD00280C
++:103B40000A000DB8AC65001C8FBF00208FB3001CAD
++:103B50008FB200188FB100148FB0001003E000087E
++:103B600027BD00283C06800034C201009043000FAE
++:103B7000240200101062000E2865001110A000073A
++:103B800024020012240200082405003A10620006F4
++:103B90000000302103E0000800000000240500358B
++:103BA0001462FFFC000030210A000E6400000000D7
++:103BB0008CC200748F83FF9424420FA003E000089E
++:103BC000AC62000C27BDFFE8AFBF00100E0003423F
++:103BD000240500013C0480088FBF0010240200016E
++:103BE00034830080A462001227BD00182402000163
++:103BF00003E00008A080001A27BDFFE0AFB2001864
++:103C0000AFB10014AFB00010AFBF001C30B2FFFF67
++:103C10000E000332008088213C028008345000806E
++:103C20009202000924030004304200FF1443000CF8
++:103C30003C028008124000082402000A0E000E5BBD
++:103C400000000000920200052403FFFE0043102440
++:103C5000A202000524020012A20200093C02800810
++:103C600034420080022020210E00033DA0400027A6
++:103C700016400003022020210E000EBF00000000AD
++:103C800002202021324600FF8FBF001C8FB2001897
++:103C90008FB100148FB00010240500380A000E64A4
++:103CA00027BD002027BDFFE0AFBF001CAFB200184A
++:103CB000AFB10014AFB000100E00033200808021BD
++:103CC0000E000E5B000000003C02800834450080BE
++:103CD00090A2000924120018305100FF1232000394
++:103CE0000200202124020012A0A2000990A20005D7
++:103CF0002403FFFE004310240E00033DA0A2000594
++:103D00000200202124050020163200070000302187
++:103D10008FBF001C8FB200188FB100148FB000103D
++:103D20000A00034227BD00208FBF001C8FB200187D
++:103D30008FB100148FB00010240500390A000E6402
++:103D400027BD002027BDFFE83C028000AFB0001077
++:103D5000AFBF0014344201009442000C2405003629
++:103D60000080802114400012304600FF0E00033214
++:103D7000000000003C02800834420080240300124E
++:103D8000A043000990430005346300100E000E5B51
++:103D9000A04300050E00033D020020210200202167
++:103DA0000E000342240500200A000F3C0000000022
++:103DB0000E000E64000000000E00033202002021FD
++:103DC0003C0280089043001B2405FF9F0200202135
++:103DD000006518248FBF00148FB00010A043001B93
++:103DE0000A00033D27BD001827BDFFE0AFBF001844
++:103DF000AFB10014AFB0001030B100FF0E000332BD
++:103E0000008080213C02800824030012344200809C
++:103E10000E000E5BA04300090E00033D02002021AE
++:103E200002002021022030218FBF00188FB1001422
++:103E30008FB00010240500350A000E6427BD002055
++:103E40003C0480089083000E9082000A1443000B0B
++:103E5000000028218F82FF942403005024050001D4
++:103E600090420000304200FF1443000400000000B4
++:103E70009082000E24420001A082000E03E00008A0
++:103E800000A010213C0380008C6201F80440FFFE7A
++:103E900024020002AC6401C0A06201C43C02100014
++:103EA00003E00008AC6201F827BDFFE0AFB20018E4
++:103EB0003C128008AFB10014AFBF001CAFB00010BF
++:103EC00036510080922200092403000A304200FF8C
++:103ED0001443003E000000008E4300048E22003890
++:103EE000506200808FBF001C92220000240300500B
++:103EF000304200FF144300253C0280008C42014008
++:103F00008E4300043642010002202821AC43001CED
++:103F10009622005C8E2300383042FFFF00021040E2
++:103F200000621821AE23001C8E4300048E2400384A
++:103F30009622005C006418233042FFFF0003184300
++:103F4000000210400043102A10400006000000004C
++:103F50008E4200048E230038004310230A000FAA6B
++:103F6000000220439622005C3042FFFF0002204006
++:103F70003C0280083443010034420080ACA4002C91
++:103F8000A040002424020001A062000C0E000F5E7D
++:103F900000000000104000538FBF001C3C02800056
++:103FA0008C4401403C0380008C6201F80440FFFE19
++:103FB00024020002AC6401C0A06201C43C021000F3
++:103FC000AC6201F80A0010078FBF001C92220009A2
++:103FD00024030010304200FF144300043C02800020
++:103FE0008C4401400A000FEE0000282192220009B3
++:103FF00024030016304200FF14430006240200147C
++:10400000A22200093C0280008C4401400A001001F9
++:104010008FBF001C8E2200388E23003C00431023EB
++:10402000044100308FBF001C92220027244200016F
++:10403000A2220027922200272C42000414400016DE
++:104040003C1080009222000924030004304200FF4B
++:10405000144300093C0280008C4401408FBF001CC7
++:104060008FB200188FB100148FB000102405009398
++:104070000A000ECC27BD00208C440140240500938B
++:104080008FBF001C8FB200188FB100148FB00010CA
++:104090000A000F4827BD00208E0401400E000332A5
++:1040A000000000008E4200042442FFFFAE420004E4
++:1040B0008E22003C2442FFFFAE22003C0E00033D56
++:1040C0008E0401408E0401408FBF001C8FB2001887
++:1040D0008FB100148FB00010240500040A000342C1
++:1040E00027BD00208FB200188FB100148FB00010D0
++:1040F00003E0000827BD00203C0680008CC2018838
++:104100003C038008346500809063000E00021402B6
++:10411000304400FF306300FF1464000E3C0280084E
++:1041200090A20026304200FF104400098F82FF94C5
++:10413000A0A400262403005090420000304200FF5B
++:1041400014430006000000000A0005A18CC4018091
++:104150003C02800834420080A044002603E00008AE
++:104160000000000027BDFFE030E700FFAFB20018FD
++:10417000AFBF001CAFB10014AFB0001000809021A1
++:1041800014E0000630C600FF000000000000000D33
++:10419000000000000A001060240001163C038008A3
++:1041A0009062000E304200FF14460023346200800B
++:1041B00090420026304200FF1446001F000000001D
++:1041C0009062000F304200FF1446001B0000000008
++:1041D0009062000A304200FF144600038F90FF9463
++:1041E0000000000D8F90FF948F82FF983C1180009B
++:1041F000AE05003CAC450000A066000A0E0003328C
++:104200008E240100A20000240E00033D8E24010034
++:104210003C0380008C6201F80440FFFE240200028F
++:10422000AC7201C0A06201C43C021000AC6201F893
++:104230000A0010618FBF001C000000000000000D8C
++:10424000000000002400013F8FBF001C8FB2001847
++:104250008FB100148FB0001003E0000827BD0020CC
++:104260008F83FF943C0280008C44010034420100A3
++:104270008C65003C9046001B0A00102724070001B3
++:104280003C0280089043000E9042000A0043102632
++:10429000304200FF03E000080002102B27BDFFE0C2
++:1042A0003C028008AFB10014AFB00010AFBF0018DF
++:1042B0003450008092020005240300303042003068
++:1042C00014430085008088218F8200248C42000CDA
++:1042D000104000828FBF00180E000D840000000007
++:1042E0008F860020ACD100009202000892030009E2
++:1042F000304200FF00021200306300FF004310252F
++:10430000ACC200049202004D000216000002160327
++:1043100004410005000000003C0308008C630048D5
++:104320000A00109F3C1080089202000830420040B2
++:10433000144000030000182192020027304300FFC0
++:104340003C108008361100809222004D00031E00B0
++:10435000304200FF0002140000621825ACC30008C0
++:104360008E2400308F820024ACC4000C8E250034D3
++:104370009443001E3C02C00BACC50010006218251F
++:104380008E22003800002021ACC200148E22003C96
++:10439000ACC200180E000DB8ACC3001C8E020004A5
++:1043A0008F8400203C058000AC8200008E2200201B
++:1043B000AC8200048E22001CAC8200088E220058C1
++:1043C0008CA3007400431021AC82000C8E22002CC0
++:1043D000AC8200108E2200408E23004400021400A4
++:1043E00000431025AC8200149222004D240300806B
++:1043F000304200FF1443000400000000AC800018AD
++:104400000A0010E38F8200248E23000C2402000196
++:104410001062000E2402FFFF92220008304200408A
++:104420001440000A2402FFFF8E23000C8CA20074AB
++:10443000006218233C0208000062102414400002AD
++:10444000000028210060282100051043AC820018DC
++:104450008F820024000020219443001E3C02C00CE7
++:10446000006218258F8200200E000DB8AC43001C9E
++:104470003C038008346201008C4200008F850020DC
++:10448000346300808FBF0018ACA20000ACA0000411
++:104490008C6400488F8200248FB10014ACA4000803
++:1044A000ACA0000CACA00010906300059446001E68
++:1044B0003C02400D00031E0000C23025ACA30014D6
++:1044C0008FB00010ACA0001824040001ACA6001CA2
++:1044D0000A000DB827BD00208FBF00188FB100144F
++:1044E0008FB0001003E0000827BD00203C028000D0
++:1044F0009443007C3C02800834460100308400FF75
++:104500003065FFFF2402000524A34650A0C4000C20
++:104510005482000C3065FFFF90C2000D2C42000752
++:104520001040000724A30A0090C3000D24020014C9
++:104530000062100400A210210A00111F3045FFFF85
++:104540003065FFFF3C0280083442008003E0000831
++:10455000A44500143C03800834680080AD05003891
++:10456000346701008CE2001C308400FF00A210239D
++:104570001840000330C600FF24A2FFFCACE2001C80
++:1045800030820001504000083C0380088D02003C4E
++:1045900000A2102304410012240400058C620004D0
++:1045A00010A2000F3C0380088C62000414A2001EBD
++:1045B000000000003C0208008C4200D8304200207D
++:1045C000104000093C0280083462008090630008BB
++:1045D0009042004C144300043C0280082404000470
++:1045E0000A00110900000000344300803442010039
++:1045F000A040000C24020001A462001410C0000AB4
++:104600003C0280008C4401003C0380008C6201F875
++:104610000440FFFE24020002AC6401C0A06201C499
++:104620003C021000AC6201F803E00008000000004A
++:1046300027BDFFE800A61823AFBF00101860008058
++:10464000308800FF3C02800834470080A0E000244E
++:1046500034440100A0E000278C82001C00A210233B
++:1046600004400056000000008CE2003C94E3005C33
++:104670008CE4002C004530233063FFFF00C3182179
++:104680000083202B1080000400E018218CE2002C15
++:104690000A00117800A2102194E2005C3042FFFF72
++:1046A00000C2102100A21021AC62001C3C02800854
++:1046B000344400809482005C8C83001C3042FFFFF5
++:1046C0000002104000A210210043102B10400004F3
++:1046D000000000008C82001C0A00118B3C06800840
++:1046E0009482005C3042FFFF0002104000A21021C3
++:1046F0003C06800834C3010034C70080AC82001C33
++:10470000A060000CACE500388C62001C00A21023F5
++:104710001840000224A2FFFCAC62001C3102000120
++:10472000104000083C0380088CE2003C00A21023EB
++:1047300004410012240400058CC2000410A20010E1
++:104740008FBF00108C62000414A2004F8FBF0010B6
++:104750003C0208008C4200D8304200201040000A81
++:104760003C02800834620080906300089042004C54
++:10477000144300053C028008240400048FBF00108D
++:104780000A00110927BD001834430080344201009B
++:10479000A040000C24020001A46200143C0280002E
++:1047A0008C4401003C0380008C6201F80440FFFE51
++:1047B000240200020A0011D8000000008CE2001C54
++:1047C000004610230043102B54400001ACE5001CB0
++:1047D00094E2005C3042FFFF0062102B144000079F
++:1047E0002402000294E2005C8CE3001C3042FFFFD4
++:1047F00000621821ACE3001C24020002ACE5003882
++:104800000E000F5EA082000C1040001F8FBF001032
++:104810003C0280008C4401003C0380008C6201F863
++:104820000440FFFE24020002AC6401C0A06201C487
++:104830003C021000AC6201F80A0011F08FBF0010BA
++:1048400031020010104000108FBF00103C028008A1
++:10485000344500808CA3001C94A2005C00661823E1
++:104860003042FFFF006218213C023FFF3444FFFF4B
++:104870000083102B544000010080182100C3102138
++:10488000ACA2001C8FBF001003E0000827BD001879
++:1048900027BDFFE800C0402100A63023AFBF0010B5
++:1048A00018C00026308A00FF3C028008344900808E
++:1048B0008D24001C8D23002C008820230064182BDD
++:1048C0001060000F344701008CE2002000461021E8
++:1048D000ACE200208CE200200044102B1440000BBE
++:1048E0003C023FFF8CE2002000441023ACE2002099
++:1048F0009522005C3042FFFF0A0012100082202146
++:10490000ACE00020008620213C023FFF3443FFFF43
++:104910000064102B54400001006020213C028008FC
++:104920003442008000851821AC43001CA0400024C4
++:10493000A04000270A0012623C03800831420010A8
++:10494000104000433C0380083C06800834C40080CB
++:104950008C82003C004810235840003E34660080A2
++:104960009082002424420001A0820024908200242E
++:104970003C0308008C630024304200FF0043102BEE
++:10498000144000688FBF001034C201008C42001C2C
++:1049900000A2102318400063000000008CC3000434
++:1049A0009482005C006818233042FFFF0003184324
++:1049B000000210400043102A1040000500000000D3
++:1049C0008CC20004004810230A0012450002104364
++:1049D0009482005C3042FFFF000210403C068008D9
++:1049E000AC82002C34C5008094A2005C8CA4002C06
++:1049F00094A3005C3042FFFF00021040008220219F
++:104A00003063FFFF0083202101041021ACA2001CB1
++:104A10008CC2000434C60100ACC2001C2402000297
++:104A20000E000F5EA0C2000C1040003E8FBF0010B1
++:104A30003C0280008C4401003C0380008C6201F841
++:104A40000440FFFE240200020A001292000000004F
++:104A500034660080ACC50038346401008C82001CD0
++:104A600000A210231840000224A2FFFCAC82001C0C
++:104A7000314200015040000A3C0380088CC2003CD7
++:104A800000A2102304430014240400058C620004D7
++:104A900014A200033C0380080A00128424040005C9
++:104AA0008C62000414A2001F8FBF00103C0208009B
++:104AB0008C4200D8304200201040000A3C0280089E
++:104AC00034620080906300089042004C144300055B
++:104AD0003C028008240400048FBF00100A00110962
++:104AE00027BD00183443008034420100A040000C70
++:104AF00024020001A46200143C0280008C440100E6
++:104B00003C0380008C6201F80440FFFE2402000296
++:104B1000AC6401C0A06201C43C021000AC6201F8A8
++:104B20008FBF001003E0000827BD001827BDFFE875
++:104B30003C0A8008AFBF0010354900808D22003C40
++:104B400000C04021308400FF004610231840009D23
++:104B500030E700FF354701002402000100A63023A2
++:104B6000A0E0000CA0E0000DA522001418C0002455
++:104B7000308200108D23001C8D22002C0068182329
++:104B80000043102B1040000F000000008CE20020BA
++:104B900000461021ACE200208CE200200043102BE4
++:104BA0001440000B3C023FFF8CE200200043102326
++:104BB000ACE200209522005C3042FFFF0A0012C1E7
++:104BC00000621821ACE00020006618213C023FFF83
++:104BD0003446FFFF00C3102B5440000100C01821D1
++:104BE0003C0280083442008000651821AC43001C60
++:104BF000A0400024A04000270A00130F3C038008B7
++:104C0000104000403C0380088D22003C00481023E7
++:104C10005840003D34670080912200242442000166
++:104C2000A1220024912200243C0308008C6300246C
++:104C3000304200FF0043102B1440009A8FBF001039
++:104C40008CE2001C00A21023184000960000000017
++:104C50008D4300049522005C006818233042FFFF5A
++:104C600000031843000210400043102A10400005C2
++:104C7000012020218D420004004810230A0012F276
++:104C8000000210439522005C3042FFFF00021040FA
++:104C90003C068008AC82002C34C5008094A2005CE5
++:104CA0008CA4002C94A3005C3042FFFF0002104053
++:104CB000008220213063FFFF0083182101031021AF
++:104CC000ACA2001C8CC2000434C60100ACC2001CA3
++:104CD000240200020E000F5EA0C2000C1040007102
++:104CE0008FBF00103C0280008C4401003C03800018
++:104CF0008C6201F80440FFFE240200020A0013390E
++:104D00000000000034670080ACE500383466010024
++:104D10008CC2001C00A210231840000224A2FFFC39
++:104D2000ACC2001C30820001504000083C038008E7
++:104D30008CE2003C00A2102304430051240400052F
++:104D40008C62000410A2003E3C0380088C620004C8
++:104D500054A200548FBF00103C0208008C4200D8BF
++:104D600030420020104000063C028008346200807F
++:104D7000906300089042004C104300403C028008C1
++:104D80003443008034420100A040000C24020001A2
++:104D9000A46200143C0280008C4401003C038000AB
++:104DA0008C6201F80440FFFE24020002AC6401C0E2
++:104DB000A06201C43C021000AC6201F80A00137743
++:104DC0008FBF001024020005A120002714E2000A72
++:104DD0003C038008354301009062000D2C42000620
++:104DE000504000053C0380089062000D2442000101
++:104DF000A062000D3C03800834670080ACE50038F9
++:104E0000346601008CC2001C00A21023184000026E
++:104E100024A2FFFCACC2001C308200015040000AFA
++:104E20003C0380088CE2003C00A2102304410014E3
++:104E3000240400058C62000414A200033C038008D3
++:104E40000A00136E240400058C62000414A20015ED
++:104E50008FBF00103C0208008C4200D83042002076
++:104E60001040000A3C028008346200809063000811
++:104E70009042004C144300053C02800824040004C6
++:104E80008FBF00100A00110927BD001834430080AD
++:104E900034420100A040000C24020001A46200146E
++:104EA0008FBF001003E0000827BD00183C0B8008EE
++:104EB00027BDFFE83C028000AFBF00103442010074
++:104EC000356A00809044000A356901008C45001461
++:104ED0008D4800389123000C308400FF0105102319
++:104EE0001C4000B3306700FF2CE20006504000B1C8
++:104EF0008FBF00102402000100E2300430C2000322
++:104F00005440000800A8302330C2000C144000A117
++:104F100030C20030144000A38FBF00100A00143BC1
++:104F20000000000018C00024308200108D43001CD7
++:104F30008D42002C006818230043102B1040000FF6
++:104F4000000000008D22002000461021AD2200202C
++:104F50008D2200200043102B1440000B3C023FFF29
++:104F60008D22002000431023AD2200209542005CDA
++:104F70003042FFFF0A0013AF00621821AD2000206D
++:104F8000006618213C023FFF3446FFFF00C3102B90
++:104F90005440000100C018213C02800834420080C7
++:104FA00000651821AC43001CA0400024A04000274D
++:104FB0000A0013FD3C038008104000403C038008B9
++:104FC0008D42003C004810231840003D34670080AB
++:104FD0009142002424420001A14200249142002475
++:104FE0003C0308008C630024304200FF0043102B78
++:104FF000144000708FBF00108D22001C00A21023EF
++:105000001840006C000000008D6300049542005CB5
++:10501000006818233042FFFF0003184300021040CD
++:105020000043102A10400005014020218D62000439
++:10503000004810230A0013E0000210439542005C70
++:105040003042FFFF000210403C068008AC82002C7A
++:1050500034C5008094A2005C8CA4002C94A3005C56
++:105060003042FFFF00021040008220213063FFFF2A
++:105070000083182101031021ACA2001C8CC2000483
++:1050800034C60100ACC2001C240200020E000F5EF8
++:10509000A0C2000C104000478FBF00103C028000EF
++:1050A0008C4401003C0380008C6201F80440FFFE48
++:1050B000240200020A00142D000000003467008062
++:1050C000ACE50038346601008CC2001C00A210233D
++:1050D0001840000224A2FFFCACC2001C3082000178
++:1050E0005040000A3C0380088CE2003C00A21023E0
++:1050F00004430014240400058C62000414A200037D
++:105100003C0380080A00141F240400058C6200047C
++:1051100014A200288FBF00103C0208008C4200D867
++:10512000304200201040000A3C02800834620080B7
++:10513000906300089042004C144300053C02800834
++:10514000240400048FBF00100A00110927BD0018B5
++:105150003443008034420100A040000C24020001CE
++:10516000A46200143C0280008C4401003C038000D7
++:105170008C6201F80440FFFE24020002AC6401C00E
++:10518000A06201C43C021000AC6201F80A00143BAA
++:105190008FBF00108FBF0010010030210A00115A8C
++:1051A00027BD0018010030210A00129927BD001800
++:1051B0008FBF001003E0000827BD00183C038008E3
++:1051C0003464010024020003A082000C8C620004FD
++:1051D00003E00008AC82001C3C05800834A300807A
++:1051E0009062002734A501002406004324420001F8
++:1051F000A0620027906300273C0208008C42004810
++:10520000306300FF146200043C07602194A500EAAB
++:105210000A00090130A5FFFF03E0000800000000BC
++:1052200027BDFFE8AFBF00103C0280000E00144411
++:105230008C4401803C02800834430100A060000CD3
++:105240008C4200048FBF001027BD001803E0000847
++:10525000AC62001C27BDFFE03C028008AFBF001815
++:10526000AFB10014AFB000103445008034460100E7
++:105270003C0880008D09014090C3000C8CA4003CC8
++:105280008CA200381482003B306700FF9502007C3E
++:1052900090A30027146000093045FFFF2402000599
++:1052A00054E200083C04800890C2000D2442000132
++:1052B000A0C2000D0A00147F3C048008A0C0000DAD
++:1052C0003C048008348201009042000C2403000555
++:1052D000304200FF1443000A24A205DC348300801E
++:1052E000906200272C4200075040000524A20A00CB
++:1052F00090630027240200140062100400A2102111
++:105300003C108008361000803045FFFF012020212E
++:105310000E001444A60500149602005C8E030038AB
++:105320003C1180003042FFFF000210400062182153
++:10533000AE03001C0E0003328E24014092020025B1
++:1053400034420040A20200250E00033D8E2401409D
++:105350008E2401403C0380008C6201F80440FFFE73
++:1053600024020002AC6401C0A06201C43C0210002F
++:10537000AC6201F88FBF00188FB100148FB000101D
++:1053800003E0000827BD00203C0360103C02080039
++:1053900024420174AC62502C8C6250003C048000AA
++:1053A00034420080AC6250003C0208002442547C2D
++:1053B0003C010800AC2256003C020800244254384C
++:1053C0003C010800AC2256043C020002AC840008F8
++:1053D000AC82000C03E000082402000100A0302190
++:1053E0003C1C0800279C56083C0200023C050400B7
++:1053F00000852826008220260004102B2CA5000101
++:105400002C840001000210803C0308002463560035
++:105410000085202500431821108000030000102182
++:10542000AC6600002402000103E000080000000058
++:105430003C1C0800279C56083C0200023C05040066
++:1054400000852826008220260004102B2CA50001B0
++:105450002C840001000210803C03080024635600E5
++:105460000085202500431821108000050000102130
++:105470003C02080024425438AC62000024020001BF
++:1054800003E00008000000003C0200023C030400AE
++:1054900000821026008318262C4200012C63000194
++:1054A000004310251040000B000028213C1C080080
++:1054B000279C56083C0380008C62000824050001EC
++:1054C00000431025AC6200088C62000C00441025DB
++:1054D000AC62000C03E0000800A010213C1C080096
++:1054E000279C56083C0580008CA3000C0004202754
++:1054F000240200010064182403E00008ACA3000C9F
++:105500003C020002148200063C0560008CA208D018
++:105510002403FFFE0043102403E00008ACA208D0DF
++:105520003C02040014820005000000008CA208D098
++:105530002403FFFD00431024ACA208D003E00008C0
++:10554000000000003C02601A344200108C430080CE
++:1055500027BDFFF88C440084AFA3000093A3000094
++:10556000240200041462001AAFA4000493A20001F4
++:105570001040000797A300023062FFFC3C0380004C
++:10558000004310218C4200000A001536AFA200042F
++:105590003062FFFC3C03800000431021AC4400005B
++:1055A000A3A000003C0560008CA208D02403FFFEED
++:1055B0003C04601A00431024ACA208D08FA300045E
++:1055C0008FA2000034840010AC830084AC82008081
++:1055D00003E0000827BD000827BDFFE8AFBF0010AB
++:1055E0003C1C0800279C56083C0280008C43000CA1
++:1055F0008C420004004318243C0200021060001496
++:10560000006228243C0204003C04000210A00005B3
++:10561000006210243C0208008C4256000A00155B10
++:1056200000000000104000073C0404003C02080099
++:105630008C4256040040F809000000000A00156082
++:10564000000000000000000D3C1C0800279C5608CC
++:105650008FBF001003E0000827BD0018800802403B
++:1056600080080100800800808008000000000C8095
++:105670000000320008000E9808000EF408000F88A1
++:1056800008001028080010748008010080080080BD
++:10569000800800000A000028000000000000000050
++:1056A0000000000D6370362E322E316200000000C3
++:1056B00006020104000000000000000000000000DD
++:1056C000000000000000000038003C000000000066
++:1056D00000000000000000000000000000000020AA
++:1056E00000000000000000000000000000000000BA
++:1056F00000000000000000000000000000000000AA
++:10570000000000000000000021003800000000013F
++:105710000000002B000000000000000400030D400A
++:105720000000000000000000000000000000000079
++:105730000000000000000000100000030000000056
++:105740000000000D0000000D3C020800244259AC8E
++:105750003C03080024635BF4AC4000000043202BB2
++:105760001480FFFD244200043C1D080037BD9FFC4F
++:1057700003A0F0213C100800261000A03C1C0800EB
++:10578000279C59AC0E0002F6000000000000000D3E
++:1057900027BDFFB4AFA10000AFA20004AFA3000873
++:1057A000AFA4000CAFA50010AFA60014AFA700185F
++:1057B000AFA8001CAFA90020AFAA0024AFAB0028FF
++:1057C000AFAC002CAFAD0030AFAE0034AFAF00389F
++:1057D000AFB8003CAFB90040AFBC0044AFBF004819
++:1057E0000E000820000000008FBF00488FBC00445E
++:1057F0008FB900408FB8003C8FAF00388FAE0034B7
++:105800008FAD00308FAC002C8FAB00288FAA002406
++:105810008FA900208FA8001C8FA700188FA6001446
++:105820008FA500108FA4000C8FA300088FA2000486
++:105830008FA1000027BD004C3C1B60188F7A5030B0
++:10584000377B502803400008AF7A000000A01821E1
++:1058500000801021008028213C0460003C0760008B
++:105860002406000810600006348420788C42000072
++:10587000ACE220088C63000003E00008ACE3200CDD
++:105880000A000F8100000000240300403C02600079
++:1058900003E00008AC4320003C0760008F86000452
++:1058A0008CE520740086102100A2182B14600007DC
++:1058B000000028218F8AFDA024050001A1440013C7
++:1058C0008F89000401244021AF88000403E0000810
++:1058D00000A010218F84FDA08F8500049086001306
++:1058E00030C300FF00A31023AF82000403E00008D0
++:1058F000A08000138F84FDA027BDFFE8AFB000108B
++:10590000AFBF001490890011908700112402002875
++:10591000312800FF3906002830E300FF2485002CE1
++:105920002CD00001106200162484001C0E00006EB2
++:10593000000000008F8FFDA03C05600024020204DF
++:1059400095EE003E95ED003C000E5C0031ACFFFF93
++:10595000016C5025ACAA2010520000012402000462
++:10596000ACA22000000000000000000000000000C9
++:105970008FBF00148FB0001003E0000827BD00188F
++:105980000A0000A6000028218F85FDA027BDFFD8B2
++:10599000AFBF0020AFB3001CAFB20018AFB100140E
++:1059A000AFB000100080982190A4001124B0001C1A
++:1059B00024B1002C308300FF386200280E000090D4
++:1059C0002C5200010E00009800000000020020216F
++:1059D0001240000202202821000028210E00006E43
++:1059E000000000008F8DFDA03C0880003C05600099
++:1059F00095AC003E95AB003C02683025000C4C0095
++:105A0000316AFFFF012A3825ACA7201024020202C8
++:105A1000ACA6201452400001240200028FBF0020D7
++:105A20008FB3001C8FB200188FB100148FB000101C
++:105A300027BD002803E00008ACA2200027BDFFE03E
++:105A4000AFB20018AFB10014AFB00010AFBF001C70
++:105A50003C1160008E2320748F82000430D0FFFF41
++:105A600030F2FFFF1062000C2406008F0E00006E63
++:105A7000000000003C06801F0010440034C5FF00F9
++:105A80000112382524040002AE2720100000302126
++:105A9000AE252014AE2420008FBF001C8FB200184A
++:105AA0008FB100148FB0001000C0102103E0000877
++:105AB00027BD002027BDFFE0AFB0001030D0FFFFB2
++:105AC000AFBF0018AFB100140E00006E30F1FFFF41
++:105AD00000102400009180253C036000AC70201071
++:105AE0008FBF00188FB100148FB000102402000483
++:105AF000AC62200027BD002003E000080000102158
++:105B000027BDFFE03C046018AFBF0018AFB1001420
++:105B1000AFB000108C8850002403FF7F34028071E6
++:105B20000103382434E5380C241F00313C1980006F
++:105B3000AC8550003C11800AAC8253BCAF3F0008DA
++:105B40000E00054CAF9100400E00050A3C116000AC
++:105B50000E00007D000000008E3008083C0F570941
++:105B60002418FFF00218602435EEE00035EDF00057
++:105B7000018E5026018D58262D4600012D69000109
++:105B8000AF86004C0E000D09AF8900503C06601630
++:105B90008CC700003C0860148D0500A03C03FFFF8B
++:105BA00000E320243C02535300052FC2108200550D
++:105BB00034D07C00960201F2A780006C10400003F4
++:105BC000A780007C384B1E1EA78B006C960201F844
++:105BD000104000048F8D0050384C1E1EA78C007C96
++:105BE0008F8D005011A000058F83004C240E0020E3
++:105BF000A78E007CA78E006C8F83004C1060000580
++:105C00009785007C240F0020A78F007CA78F006C55
++:105C10009785007C2CB8008153000001240500808A
++:105C20009784006C2C91040152200001240404008C
++:105C30001060000B3C0260008FBF00188FB1001491
++:105C40008FB0001027BD0020A784006CA785007CC2
++:105C5000A380007EA780007403E00008A780009264
++:105C60008C4704382419103C30FFFFFF13F9000360
++:105C700030A8FFFF1100004624030050A380007EDF
++:105C80009386007E50C00024A785007CA780007CFE
++:105C90009798007CA780006CA7800074A780009272
++:105CA0003C010800AC3800800E00078700000000AF
++:105CB0003C0F60008DED0808240EFFF03C0B600ED9
++:105CC000260C0388356A00100000482100002821B6
++:105CD00001AE20243C105709AF8C0010AF8A004859
++:105CE000AF89001810900023AF8500148FBF0018F3
++:105CF0008FB100148FB0001027BD002003E0000812
++:105D0000AF80005400055080014648218D260004D4
++:105D10000A00014800D180219798007CA784006C7C
++:105D2000A7800074A78000923C010800AC38008076
++:105D30000E000787000000003C0F60008DED080892
++:105D4000240EFFF03C0B600E260C0388356A001011
++:105D5000000048210000282101AE20243C105709F2
++:105D6000AF8C0010AF8A0048AF8900181490FFDF95
++:105D7000AF85001424110001AF9100548FBF0018AB
++:105D80008FB100148FB0001003E0000827BD002081
++:105D90000A00017BA383007E3083FFFF8F880040D1
++:105DA0008F87003C000321403C0580003C020050EE
++:105DB000008248253C0660003C0A010034AC040027
++:105DC0008CCD08E001AA58241160000500000000F5
++:105DD0008CCF08E024E7000101EA7025ACCE08E092
++:105DE0008D19001001805821ACB900388D180014AD
++:105DF000ACB8003CACA9003000000000000000007E
++:105E00000000000000000000000000000000000092
++:105E100000000000000000003C0380008C640000D3
++:105E2000308200201040FFFD3C0F60008DED08E047
++:105E30003C0E010001AE18241460FFE100000000D8
++:105E4000AF87003C03E00008AF8B00588F8500400F
++:105E5000240BFFF03C06800094A7001A8CA90024B4
++:105E600030ECFFFF000C38C000EB5024012A402129
++:105E7000ACC8003C8CA400248CC3003C00831023DD
++:105E800018400033000000008CAD002025A2000166
++:105E90003C0F0050ACC2003835EE00103C068000CC
++:105EA000ACCE003000000000000000000000000048
++:105EB00000000000000000000000000000000000E2
++:105EC000000000003C0480008C9900003338002062
++:105ED0001300FFFD30E20008104000173C0980006D
++:105EE0008C880408ACA800108C83040CACA30014AC
++:105EF0003C1900203C188000AF19003094AE001807
++:105F000094AF001C01CF3021A4A6001894AD001A54
++:105F100025A70001A4A7001A94AB001A94AC001E98
++:105F2000118B00030000000003E0000800000000E7
++:105F300003E00008A4A0001A8D2A0400ACAA0010F7
++:105F40008D240404ACA400140A0002183C1900209B
++:105F50008CA200200A0002003C0F00500A0001EE53
++:105F60000000000027BDFFE8AFBF00100E000232A6
++:105F7000000000008F8900408FBF00103C038000AC
++:105F8000A520000A9528000A9527000427BD0018BF
++:105F90003105FFFF30E6000F0006150000A22025A6
++:105FA00003E00008AC6400803C0508008CA50020DC
++:105FB0008F83000C27BDFFE8AFB00010AFBF001407
++:105FC00010A300100000802124040001020430040A
++:105FD00000A6202400C3102450440006261000010F
++:105FE000001018802787FDA41480000A006718217C
++:105FF000261000012E0900025520FFF38F83000CAC
++:10600000AF85000C8FBF00148FB0001003E00008B4
++:1060100027BD00188C6800003C058000ACA8002457
++:106020000E000234261000013C0508008CA500205B
++:106030000A0002592E0900022405000100851804F7
++:106040003C0408008C84002027BDFFC8AFBF00348B
++:1060500000831024AFBE0030AFB7002CAFB60028CD
++:10606000AFB50024AFB40020AFB3001CAFB200182E
++:10607000AFB1001410400051AFB000108F84004049
++:10608000948700069488000A00E8302330D5FFFF8B
++:1060900012A0004B8FBF0034948B0018948C000A20
++:1060A000016C50233142FFFF02A2482B1520000251
++:1060B00002A02021004020212C8F000515E00002C5
++:1060C00000809821241300040E0001C102602021E9
++:1060D0008F87004002609021AF80004494F4000A52
++:1060E000026080211260004E3291FFFF3C1670006A
++:1060F0003C1440003C1E20003C1760008F99005863
++:106100008F380000031618241074004F0283F82BF8
++:1061100017E0003600000000107E00478F86004424
++:1061200014C0003A2403000102031023022320219B
++:106130003050FFFF1600FFF13091FFFF8F870040C6
++:106140003C1100203C108000AE11003094EB000A9E
++:106150003C178000024B5021A4EA000A94E9000A8F
++:1061600094E800043123FFFF3106000F00062D00E4
++:106170000065F025AEFE008094F3000A94F6001846
++:1061800012D30036001221408CFF00148CF4001052
++:1061900003E468210000C02101A4782B029870213B
++:1061A00001CF6021ACED0014ACEC001002B238233A
++:1061B00030F5FFFF16A0FFB88F8400408FBF00347A
++:1061C0008FBE00308FB7002C8FB600288FB500240B
++:1061D0008FB400208FB3001C8FB200188FB1001451
++:1061E0008FB0001003E0000827BD00381477FFCC03
++:1061F0008F8600440E000EE202002021004018218C
++:106200008F86004410C0FFC9020310230270702360
++:106210008F87004001C368210A0002E431B2FFFF0A
++:106220008F86004414C0FFC93C1100203C10800040
++:106230000A0002AEAE1100300E00046602002021FA
++:106240000A0002DB00401821020020210E0009395B
++:10625000022028210A0002DB004018210E0001EE76
++:10626000000000000A0002C702B2382327BDFFC8A1
++:10627000AFB7002CAFB60028AFB50024AFB40020F4
++:10628000AFB3001CAFB20018AFB10014AFB0001034
++:10629000AFBF00300E00011B241300013C047FFF40
++:1062A0003C0380083C0220003C010800AC20007048
++:1062B0003496FFFF34770080345200033C1512C03F
++:1062C000241400013C1080002411FF800E000245C0
++:1062D000000000008F8700488F8B00188F89001402
++:1062E0008CEA00EC8CE800E8014B302B01092823F4
++:1062F00000A6102314400006014B18231440000E82
++:106300003C05800002A3602B1180000B0000000000
++:106310003C0560008CEE00EC8CED00E88CA4180CC1
++:10632000AF8E001804800053AF8D00148F8F0010C3
++:10633000ADF400003C0580008CBF00003BF900017B
++:10634000333800011700FFE13C0380008C6201003C
++:1063500024060C0010460009000000008C680100B3
++:106360002D043080548000103C0480008C690100B2
++:106370002D2331811060000C3C0480008CAA0100A8
++:1063800011460004000020218CA6010024C5FF81D5
++:1063900030A400FF8E0B01000E000269AE0B00243A
++:1063A0000A00034F3C0480008C8D01002DAC3300AB
++:1063B00011800022000000003C0708008CE70098D4
++:1063C00024EE00013C010800AC2E00983C04800043
++:1063D0008C8201001440000300000000566000148D
++:1063E0003C0440008C9F01008C9801000000982123
++:1063F00003F1C82400193940330F007F00EF7025E6
++:1064000001D26825AC8D08308C8C01008C85010090
++:10641000258B0100017130240006514030A3007F1C
++:106420000143482501324025AC8808303C04400037
++:10643000AE0401380A00030E000000008C99010030
++:10644000240F0020AC99002092F80000330300FFD5
++:10645000106F000C241F0050547FFFDD3C048000AF
++:106460008C8401000E00154E000000000A00034F4E
++:106470003C04800000963824ACA7180C0A000327BF
++:106480008F8F00108C8501000E0008F72404008017
++:106490000A00034F3C04800000A4102B24030001D9
++:1064A00010400009000030210005284000A4102BF6
++:1064B00004A00003000318405440FFFC00052840DE
++:1064C0005060000A0004182B0085382B54E00004AB
++:1064D0000003184200C33025008520230003184222
++:1064E0001460FFF9000528420004182B03E000089F
++:1064F00000C310213084FFFF30C600FF3C0780003E
++:106500008CE201B80440FFFE00064C000124302557
++:106510003C08200000C820253C031000ACE00180AE
++:10652000ACE50184ACE4018803E00008ACE301B809
++:106530003C0660008CC5201C2402FFF03083020062
++:10654000308601001060000E00A2282434A500014E
++:106550003087300010E0000530830C0034A50004C3
++:106560003C04600003E00008AC85201C1060FFFDC7
++:106570003C04600034A5000803E00008AC85201C42
++:1065800054C0FFF334A500020A0003B03087300086
++:1065900027BDFFE8AFB00010AFBF00143C0760009C
++:1065A000240600021080001100A080218F83005873
++:1065B0000E0003A78C6400188F8200580000202171
++:1065C000240600018C45000C0E000398000000001A
++:1065D0001600000224020003000010218FBF0014E7
++:1065E0008FB0001003E0000827BD00188CE8201CC5
++:1065F0002409FFF001092824ACE5201C8F870058EE
++:106600000A0003CD8CE5000C3C02600E00804021A6
++:1066100034460100240900180000000000000000BA
++:10662000000000003C0A00503C0380003547020097
++:10663000AC68003834640400AC65003CAC670030E2
++:106640008C6C0000318B00201160FFFD2407FFFFE0
++:106650002403007F8C8D00002463FFFF248400044A
++:10666000ACCD00001467FFFB24C60004000000004E
++:10667000000000000000000024A402000085282B78
++:106680003C0300203C0E80002529FFFF010540212E
++:10669000ADC300301520FFE00080282103E0000892
++:1066A000000000008F82005827BDFFD8AFB3001C48
++:1066B000AFBF0020AFB20018AFB10014AFB00010F0
++:1066C00094460002008098218C5200182CC300814F
++:1066D0008C4800048C4700088C51000C8C49001039
++:1066E000106000078C4A00142CC4000414800013AE
++:1066F00030EB000730C5000310A0001000000000C0
++:106700002410008B02002021022028210E00039873
++:10671000240600031660000224020003000010217A
++:106720008FBF00208FB3001C8FB200188FB10014F0
++:106730008FB0001003E0000827BD00281560FFF1AE
++:106740002410008B3C0C80003C030020241F00011F
++:10675000AD830030AF9F0044000000000000000047
++:10676000000000002419FFF024D8000F031978243A
++:106770003C1000D0AD88003801F0702524CD000316
++:106780003C08600EAD87003C35850400AD8E0030BE
++:10679000000D38823504003C3C0380008C6B000007
++:1067A000316200201040FFFD0000000010E00008F2
++:1067B00024E3FFFF2407FFFF8CA800002463FFFFF2
++:1067C00024A50004AC8800001467FFFB24840004A7
++:1067D0003C05600EACA60038000000000000000080
++:1067E000000000008F8600543C0400203C0780001D
++:1067F000ACE4003054C000060120202102402021DA
++:106800000E0003A7000080210A00041D02002021C1
++:106810000E0003DD01402821024020210E0003A7C5
++:10682000000080210A00041D0200202127BDFFE096
++:10683000AFB200183092FFFFAFB10014AFBF001C21
++:10684000AFB000101640000D000088210A0004932C
++:106850000220102124050003508500278CE5000C40
++:106860000000000D262800013111FFFF24E2002066
++:106870000232802B12000019AF8200588F82004430
++:10688000144000168F8700583C0670003C0320001F
++:106890008CE5000000A62024148300108F84006083
++:1068A000000544023C09800000A980241480FFE90F
++:1068B000310600FF2CCA000B5140FFEB26280001D7
++:1068C000000668803C0E080025CE575801AE6021B6
++:1068D0008D8B0000016000080000000002201021E4
++:1068E0008FBF001C8FB200188FB100148FB0001042
++:1068F00003E0000827BD00200E0003982404008454
++:106900001600FFD88F8700580A000474AF8000601B
++:10691000020028210E0003BF240400018F870058C5
++:106920000A000474AF820060020028210E0003BF39
++:10693000000020210A0004A38F8700580E000404E1
++:10694000020020218F8700580A000474AF82006083
++:1069500030AFFFFF000F19C03C0480008C9001B8DD
++:106960000600FFFE3C1920043C181000AC83018097
++:10697000AC800184AC990188AC9801B80A00047518
++:106980002628000190E2000390E30002000020218D
++:106990000002FE0000033A0000FF2825240600083C
++:1069A0000E000398000000001600FFDC2402000324
++:1069B0008F870058000010210A000474AF82006025
++:1069C00090E8000200002021240600090A0004C308
++:1069D00000082E0090E4000C240900FF308500FF21
++:1069E00010A900150000302190F9000290F8000372
++:1069F000308F00FF94EB000400196E000018740043
++:106A0000000F62000186202501AE5025014B28258C
++:106A10003084FF8B0A0004C32406000A90E30002BE
++:106A200090FF0004000020210003360000DF28252D
++:106A30000A0004C32406000B0A0004D52406008BB8
++:106A4000000449C23127003F000443423C02800059
++:106A500000082040240316802CE60020AC43002CC4
++:106A600024EAFFE02482000114C0000330A900FFE3
++:106A700000801021314700FF000260803C0D800043
++:106A8000240A0001018D20213C0B000E00EA28049D
++:106A9000008B302111200005000538278CCE000026
++:106AA00001C5382503E00008ACC700008CD8000001
++:106AB0000307782403E00008ACCF000027BDFFE007
++:106AC000AFB10014AFB00010AFBF00183C076000BA
++:106AD0008CE408083402F0003C1160003083F000C0
++:106AE000240501C03C04800E000030211062000625
++:106AF000241000018CEA08083149F0003928E00030
++:106B00000008382B000780403C0D0200AE2D081411
++:106B1000240C16803C0B80008E2744000E000F8B47
++:106B2000AD6C002C120000043C02169124050001FB
++:106B3000120500103C023D2C345800E0AE384408E9
++:106B40003C1108008E31007C8FBF00183C066000AD
++:106B500000118540360F16808FB100148FB00010E1
++:106B60003C0E020027BD0020ACCF442003E000080B
++:106B7000ACCE08103C0218DA345800E0AE384408B5
++:106B80003C1108008E31007C8FBF00183C0660006D
++:106B900000118540360F16808FB100148FB00010A1
++:106BA0003C0E020027BD0020ACCF442003E00008CB
++:106BB000ACCE08100A0004EB240500010A0004EB27
++:106BC0000000282124020400A7820024A780001CC2
++:106BD000000020213C06080024C65A582405FFFF67
++:106BE00024890001000440803124FFFF01061821A0
++:106BF0002C87002014E0FFFAAC6500002404040098
++:106C0000A7840026A780001E000020213C06080063
++:106C100024C65AD82405FFFF248D0001000460809B
++:106C200031A4FFFF018658212C8A00201540FFFA6D
++:106C3000AD650000A7800028A7800020A780002263
++:106C4000000020213C06080024C65B582405FFFFF5
++:106C5000249900010004C0803324FFFF030678213B
++:106C60002C8E000415C0FFFAADE500003C05600065
++:106C70008CA73D002403E08F00E31024344601403C
++:106C800003E00008ACA63D002487007F000731C266
++:106C900024C5FFFF000518C2246400013082FFFFF5
++:106CA000000238C0A78400303C010800AC27003047
++:106CB000AF80002C0000282100002021000030219E
++:106CC0002489000100A728213124FFFF2CA81701E7
++:106CD000110000032C8300801460FFF924C600011A
++:106CE00000C02821AF86002C10C0001DA786002AF6
++:106CF00024CAFFFF000A11423C08080025085B581F
++:106D00001040000A00002021004030212407FFFF2E
++:106D1000248E00010004688031C4FFFF01A86021B7
++:106D20000086582B1560FFFAAD87000030A2001FC7
++:106D30005040000800043080240300010043C804D0
++:106D400000041080004878212738FFFF03E0000886
++:106D5000ADF8000000C820212405FFFFAC8500002D
++:106D600003E000080000000030A5FFFF30C6FFFF71
++:106D700030A8001F0080602130E700FF0005294295
++:106D80000000502110C0001D24090001240B000147
++:106D900025180001010B2004330800FF0126782686
++:106DA000390E00202DED00012DC2000101A2182591
++:106DB0001060000D014450250005C880032C4021BF
++:106DC0000100182110E0000F000A20278D040000A8
++:106DD000008A1825AD03000024AD00010000402109
++:106DE0000000502131A5FFFF252E000131C9FFFF12
++:106DF00000C9102B1040FFE72518000103E0000830
++:106E0000000000008D0A0000014440240A0005D162
++:106E1000AC68000027BDFFE830A5FFFF30C6FFFFCC
++:106E2000AFB00010AFBF001430E7FFFF00005021EB
++:106E30003410FFFF0000602124AF001F00C0482174
++:106E4000241800012419002005E0001601E010219B
++:106E50000002F943019F682A0009702B01AE40240B
++:106E600011000017000C18800064102110E00005CC
++:106E70008C4B000000F840040008382301675824B8
++:106E800000003821154000410000402155600016E7
++:106E90003169FFFF258B0001316CFFFF05E1FFEC3D
++:106EA00001E0102124A2003E0002F943019F682A5C
++:106EB0000009702B01AE40241500FFEB000C188078
++:106EC000154600053402FFFF020028210E0005B51B
++:106ED00000003821020010218FBF00148FB0001075
++:106EE00003E0000827BD00181520000301601821E9
++:106EF000000B1C0224080010306A00FF154000053A
++:106F0000306E000F250D000800031A0231A800FFA3
++:106F1000306E000F15C00005307F000325100004FF
++:106F200000031902320800FF307F000317E000055C
++:106F3000386900012502000200031882304800FF72
++:106F4000386900013123000110600004310300FFA3
++:106F5000250A0001314800FF310300FF000C6940A1
++:106F600001A34021240A000110CAFFD53110FFFF00
++:106F7000246E000131C800FF1119FFC638C9000195
++:106F80002D1F002053E0001C258B0001240D000163
++:106F90000A000648240E002051460017258B0001E8
++:106FA00025090001312800FF2D0900205120001281
++:106FB000258B000125430001010D5004014B1024D5
++:106FC000250900011440FFF4306AFFFF3127FFFF5D
++:106FD00010EE000C2582FFFF304CFFFF0000502117
++:106FE0003410FFFF312800FF2D0900205520FFF24B
++:106FF00025430001258B0001014648260A000602B0
++:10700000316CFFFF00003821000050210A000654B7
++:107010003410FFFF27BDFFD8AFB0001030F0FFFFE6
++:10702000AFB10014001039423211FFE000071080A8
++:10703000AFB3001C00B1282330D3FFFFAFB200185C
++:1070400030A5FFFF00809021026030210044202104
++:10705000AFBF00200E0005E03207001F022288218A
++:107060003403FFFF0240202102002821026030216A
++:1070700000003821104300093231FFFF02201021A7
++:107080008FBF00208FB3001C8FB200188FB1001487
++:107090008FB0001003E0000827BD00280E0005E0B7
++:1070A0000000000000408821022010218FBF002036
++:1070B0008FB3001C8FB200188FB100148FB0001076
++:1070C00003E0000827BD0028000424003C03600002
++:1070D000AC603D0810A00002348210063482101605
++:1070E00003E00008AC623D0427BDFFE0AFB0001034
++:1070F000309000FF2E020006AFBF001810400008BD
++:10710000AFB10014001030803C03080024635784A2
++:1071100000C328218CA400000080000800000000AB
++:10712000000020218FBF00188FB100148FB0001015
++:107130000080102103E0000827BD00209791002A5D
++:1071400016200051000020213C020800904200332C
++:107150000A0006BB00000000978D002615A0003134
++:10716000000020210A0006BB2402000897870024A3
++:1071700014E0001A00001821006020212402000100
++:107180001080FFE98FBF0018000429C2004530219C
++:1071900000A6582B1160FFE43C0880003C0720004B
++:1071A000000569C001A76025AD0C00203C038008E4
++:1071B0002402001F2442FFFFAC6000000441FFFDD9
++:1071C0002463000424A5000100A6702B15C0FFF560
++:1071D000000569C00A0006A58FBF00189787001C2C
++:1071E0003C04080024845A58240504000E0006605C
++:1071F00024060001978B002424440001308AFFFFFD
++:107200002569FFFF2D48040000402821150000409B
++:10721000A789002424AC3800000C19C00A0006B964
++:10722000A780001C9787001E3C04080024845AD8BD
++:10723000240504000E00066024060001979900262C
++:10724000244400013098FFFF272FFFFF2F0E04007A
++:107250000040882115C0002CA78F0026A780001EA3
++:107260003A020003262401003084FFFF0E00068D41
++:107270002C4500010011F8C027F00100001021C0CA
++:107280000A0006BB240200089785002E978700227B
++:107290003C04080024845B580E00066024060001AC
++:1072A0009787002A8F89002C2445000130A8FFFF12
++:1072B00024E3FFFF0109302B0040802114C0001897
++:1072C000A783002AA7800022978500300E000F7543
++:1072D00002002021244A05003144FFFF0E00068DE4
++:1072E000240500013C05080094A500320E000F752E
++:1072F00002002021244521003C0208009042003376
++:107300000A0006BB000521C00A0006F3A784001E80
++:1073100024AC3800000C19C00A0006B9A784001C70
++:107320000A00070DA7850022308400FF27BDFFE873
++:107330002C820006AFBF0014AFB000101040001543
++:1073400000A03821000440803C0308002463579CBF
++:10735000010328218CA40000008000080000000028
++:1073600024CC007F000751C2000C59C23170FFFFCE
++:107370002547C40030E5FFFF2784001C02003021B0
++:107380000E0005B52407000197860028020620217B
++:10739000A78400288FBF00148FB0001003E00008FE
++:1073A00027BD00183C0508008CA50030000779C2F5
++:1073B0000E00038125E4DF003045FFFF3C04080098
++:1073C00024845B58240600010E0005B52407000143
++:1073D000978E002A8FBF00148FB0001025CD0001BA
++:1073E00027BD001803E00008A78D002A0007C9C2C6
++:1073F0002738FF00001878C231F0FFFF3C04080076
++:1074000024845AD802002821240600010E0005B564
++:1074100024070001978D0026260E0100000E84002F
++:1074200025AC00013C0B6000A78C0026AD603D0838
++:1074300036040006000030213C0760008CE23D0469
++:10744000305F000617E0FFFD24C9000100061B00A5
++:10745000312600FF006440252CC50004ACE83D0443
++:1074600014A0FFF68FBF00148FB0001003E00008D7
++:1074700027BD0018000751C22549C8002406000195
++:10748000240700013C04080024845A580E0005B566
++:107490003125FFFF978700248FBF00148FB00010A5
++:1074A00024E6000127BD001803E00008A786002499
++:1074B0003C0660183C090800252900FCACC9502C8A
++:1074C0008CC850003C0580003C020002350700805B
++:1074D000ACC750003C04080024841FE03C030800B3
++:1074E00024631F98ACA50008ACA2000C3C01080066
++:1074F000AC2459A43C010800AC2359A803E00008BF
++:107500002402000100A030213C1C0800279C59AC3B
++:107510003C0C04003C0B0002008B3826008C4026FB
++:107520002CE200010007502B2D050001000A4880C5
++:107530003C030800246359A4004520250123182199
++:107540001080000300001021AC660000240200013E
++:1075500003E00008000000003C1C0800279C59AC18
++:107560003C0B04003C0A0002008A3026008B3826BF
++:107570002CC200010006482B2CE5000100094080C8
++:107580003C030800246359A4004520250103182169
++:1075900010800005000010213C0C0800258C1F986D
++:1075A000AC6C00002402000103E0000800000000B1
++:1075B0003C0900023C080400008830260089382677
++:1075C0002CC30001008028212CE400010083102539
++:1075D0001040000B000030213C1C0800279C59ACD7
++:1075E0003C0A80008D4E00082406000101CA68256F
++:1075F000AD4D00088D4C000C01855825AD4B000C9D
++:1076000003E0000800C010213C1C0800279C59AC76
++:107610003C0580008CA6000C0004202724020001F9
++:1076200000C4182403E00008ACA3000C3C020002D4
++:107630001082000B3C0560003C070400108700032B
++:107640000000000003E00008000000008CA908D042
++:10765000240AFFFD012A402403E00008ACA808D05A
++:107660008CA408D02406FFFE0086182403E000083E
++:10767000ACA308D03C05601A34A600108CC300806F
++:1076800027BDFFF88CC50084AFA3000093A40000C1
++:107690002402001010820003AFA5000403E00008DC
++:1076A00027BD000893A7000114E0001497AC000266
++:1076B00097B800023C0F8000330EFFFC01CF682119
++:1076C000ADA50000A3A000003C0660008CC708D058
++:1076D0002408FFFE3C04601A00E82824ACC508D04A
++:1076E0008FA300048FA200003499001027BD00086A
++:1076F000AF22008003E00008AF2300843C0B800031
++:10770000318AFFFC014B48218D2800000A00080C3B
++:10771000AFA8000427BDFFE8AFBF00103C1C080065
++:10772000279C59AC3C0580008CA4000C8CA2000462
++:107730003C0300020044282410A0000A00A31824DF
++:107740003C0604003C0400021460000900A610245A
++:107750001440000F3C0404000000000D3C1C080015
++:10776000279C59AC8FBF001003E0000827BD00180C
++:107770003C0208008C4259A40040F80900000000B7
++:107780003C1C0800279C59AC0A0008358FBF00102C
++:107790003C0208008C4259A80040F8090000000093
++:1077A0000A00083B000000003C0880008D0201B880
++:1077B0000440FFFE35090180AD2400003C031000A9
++:1077C00024040040AD250004A1240008A1260009DE
++:1077D000A527000A03E00008AD0301B83084FFFFCD
++:1077E0000080382130A5FFFF000020210A00084555
++:1077F000240600803087FFFF8CA400002406003898
++:107800000A000845000028218F8300788F860070C9
++:107810001066000B008040213C07080024E75B68ED
++:10782000000328C000A710218C440000246300013D
++:10783000108800053063000F5466FFFA000328C06B
++:1078400003E00008000010213C07080024E75B6CFF
++:1078500000A7302103E000088CC200003C03900028
++:1078600034620001008220253C038000AC640020CB
++:107870008C65002004A0FFFE0000000003E000086B
++:10788000000000003C0280003443000100832025FA
++:1078900003E00008AC44002027BDFFE0AFB10014B6
++:1078A0003091FFFFAFB00010AFBF001812200013DF
++:1078B00000A080218CA20000240400022406020003
++:1078C0001040000F004028210E0007250000000096
++:1078D00000001021AE000000022038218FBF0018E8
++:1078E0008FB100148FB0001000402021000028212B
++:1078F000000030210A00084527BD00208CA20000AE
++:10790000022038218FBF00188FB100148FB00010F3
++:107910000040202100002821000030210A000845F5
++:1079200027BD002000A010213087FFFF8CA5000498
++:107930008C4400000A000845240600068F83FD9C45
++:1079400027BDFFE8AFBF0014AFB00010906700087C
++:10795000008010210080282130E600400000202116
++:1079600010C000088C5000000E0000BD0200202155
++:10797000020020218FBF00148FB000100A000548BC
++:1079800027BD00180E0008A4000000000E0000BD76
++:1079900002002021020020218FBF00148FB00010B0
++:1079A0000A00054827BD001827BDFFE0AFB0001052
++:1079B0008F90FD9CAFBF001CAFB20018AFB1001498
++:1079C00092060001008088210E00087230D2000467
++:1079D00092040005001129C2A6050000348300406E
++:1079E000A20300050E00087C022020210E00054A9B
++:1079F0000220202124020001AE02000C02202821D6
++:107A0000A602001024040002A602001224060200AE
++:107A1000A60200140E000725A60200161640000F4D
++:107A20008FBF001C978C00743C0B08008D6B007896
++:107A30002588FFFF3109FFFF256A0001012A382B45
++:107A400010E00006A78800743C0F6006240E0016A4
++:107A500035ED0010ADAE00508FBF001C8FB2001886
++:107A60008FB100148FB0001003E0000827BD002084
++:107A700027BDFFE0AFB10014AFBF0018AFB00010DA
++:107A80001080000400A088212402008010820007DA
++:107A9000000000000000000D8FBF00188FB100141F
++:107AA0008FB0001003E0000827BD00200E00087210
++:107AB00000A020218F86FD9C0220202190C500057A
++:107AC0000E00087C30B000FF2403003E1603FFF1D7
++:107AD0003C0680008CC401780480FFFE34C801405D
++:107AE000240900073C071000AD11000002202021EE
++:107AF000A10900048FBF00188FB100148FB00010CF
++:107B0000ACC701780A0008C527BD002027BDFFE0EB
++:107B1000AFB00010AFBF0018AFB100143C10800030
++:107B20008E110020000000000E00054AAE04002067
++:107B3000AE1100208FBF00188FB100148FB000105D
++:107B400003E0000827BD00203084FFFF00803821BB
++:107B50002406003500A020210A0008450000282145
++:107B60003084FFFF008038212406003600A0202149
++:107B70000A0008450000282127BDFFD0AFB500242A
++:107B80003095FFFFAFB60028AFB40020AFBF002C88
++:107B9000AFB3001CAFB20018AFB10014AFB000100B
++:107BA00030B6FFFF12A000270000A0218F920058DE
++:107BB0008E4300003C0680002402004000033E0289
++:107BC00000032C0230E4007F006698241482001D1C
++:107BD00030A500FF8F8300682C68000A1100001098
++:107BE0008F8D0044000358803C0C0800258C57B84A
++:107BF000016C50218D4900000120000800000000A8
++:107C000002D4302130C5FFFF0E0008522404008446
++:107C1000166000028F920058AF8000688F8D00447C
++:107C20002659002026980001032090213314FFFFDD
++:107C300015A00004AF9900580295202B1480FFDC9A
++:107C400000000000028010218FBF002C8FB600289A
++:107C50008FB500248FB400208FB3001C8FB20018A2
++:107C60008FB100148FB0001003E0000827BD003072
++:107C70002407003414A70149000000009247000EB9
++:107C80008F9FFDA08F90FD9C24181600A3E700197C
++:107C90009242000D3C0880003C07800CA3E20018D3
++:107CA000964A00123C0D60003C117FFFA60A005C62
++:107CB000964400103623FFFF240200053099FFFF91
++:107CC000AE1900548E46001CAD1800288CEF000041
++:107CD0008DAE444801E6482601C93021AE06003881
++:107CE0008E05003824CB00013C0E7F00AE05003C21
++:107CF0008E0C003CAFEC0004AE0B00208E13002075
++:107D0000AE13001CA3E0001BAE03002CA3E2001284
++:107D10008E4A001424130050AE0A00348E0400343E
++:107D2000AFE400148E590018AE1900489258000CA8
++:107D3000A218004E920D000835AF0020A20F0008D7
++:107D40008E090018012E282434AC4000AE0C001817
++:107D5000920B0000317200FF1253027F2403FF8058
++:107D60003C04080024845BE80E0008AA0000000020
++:107D70003C1108008E315BE80E00087202202021C1
++:107D80002405000424080001A2050025022020216A
++:107D90000E00087CA20800053C0580008CB001782C
++:107DA0000600FFFE8F92005834AE0140240F0002FF
++:107DB0003C091000ADD10000A1CF0004ACA90178AE
++:107DC0000A000962AF8000682CAD003751A0FF9413
++:107DD0008F8D0044000580803C110800263157E05B
++:107DE000021178218DEE000001C0000800000000A3
++:107DF0002411000414B1008C3C0780003C080800EA
++:107E00008D085BE88F86FD9CACE800208E4500085D
++:107E10008F99FDA0240D0050ACC500308E4C000899
++:107E2000ACCC00508E4B000CACCB00348E43001019
++:107E3000ACC300388E4A0010ACCA00548E42001405
++:107E4000ACC2003C8E5F0018AF3F00048E50001C97
++:107E5000ACD0002090C40000309800FF130D024AFF
++:107E6000000000008CC400348CD00030009030231F
++:107E700004C000F12404008C126000EE2402000310
++:107E80000A000962AF8200682419000514B900666F
++:107E90003C0580003C0808008D085BE88F86FD9C4F
++:107EA000ACA800208E4C00048F8AFDA0240720007F
++:107EB000ACCC001C924B000824120008A14B001906
++:107EC0008F82005890430009A14300188F85005805
++:107ED00090BF000A33E400FF1092001028890009C7
++:107EE000152000BA240E0002240D0020108D000B76
++:107EF000340780002898002117000008240740005C
++:107F000024100040109000053C0700012419008057
++:107F1000109900023C070002240740008CC20018A0
++:107F20003C03FF00004350240147F825ACDF001854
++:107F300090B2000BA0D200278F8300589464000CED
++:107F4000108001FE000000009467000C3C1F8000C0
++:107F50002405FFBFA4C7005C9063000E2407000443
++:107F6000A0C300088F820058904A000FA0CA0009E1
++:107F70008F8900588D3200108FE400740244C823AA
++:107F8000ACD900588D300014ACD0002C95380018B6
++:107F9000330DFFFFACCD00409531001A322FFFFFAB
++:107FA000ACCF00448D2E001CACCE00489128000EB2
++:107FB000A0C8000890CC000801855824126001B6C2
++:107FC000A0CB00088F9200580A000962AF870068B2
++:107FD0002406000614A600143C0E80003C0F080086
++:107FE0008DEF5BE88F85FD98ADCF00208E4900189E
++:107FF0008F86FD9C8F8BFDA0ACA900008CC800383B
++:1080000024040005ACA800048CCC003C1260008164
++:10801000AD6C00000A000962AF84006824110007FB
++:1080200010B1004B240400063C05080024A55BE8C1
++:108030000E000881240400818F9200580013102B39
++:108040000A000962AF820068241F002314BFFFF6F4
++:108050003C0C80003C0508008CA55BE88F8BFDA0E4
++:10806000AD8500208F91FD9C8E4600042564002084
++:1080700026450014AE260028240600030E000F81BA
++:10808000257000308F87005802002021240600034D
++:108090000E000F8124E500083C04080024845BE8FE
++:1080A0000E0008AA0000000092230000240A0050DD
++:1080B000306200FF544AFFE18F9200580E000F6CAF
++:1080C000000000000A000A6A8F920058240800335A
++:1080D00014A800323C0380003C1108008E315BE89C
++:1080E0008F8FFDA0AC7100208E420008240D002867
++:1080F0008F89FD9CADE200308E4A000C24060009F9
++:10810000ADEA00348E5F0010ADFF00388E440014DD
++:10811000ADE400208E590018ADF900248E58001CE3
++:10812000ADF80028A1ED00118E4E00041260003160
++:10813000AD2E00288F9200580A000962AF860068B1
++:10814000240D002214ADFFB8000000002404000735
++:108150003C1008008E105BE83C188000AF10002037
++:108160005660FEAEAF8400683C04080024845BE8DF
++:108170000E0008AA241300508F84FD9C90920000EA
++:10818000325900FF1333014B000000008F9200585A
++:10819000000020210A000962AF8400683C05080045
++:1081A00024A55BE80E000858240400810A000A6A2E
++:1081B0008F92005802D498213265FFFF0E000852BA
++:1081C000240400840A0009628F920058108EFF5325
++:1081D000240704002887000310E00179241100041B
++:1081E000240F0001548FFF4D240740000A000A228B
++:1081F000240701003C05080024A55BE80E0008A444
++:10820000240400828F920058000030210A00096285
++:10821000AF8600683C04080024845BE88CC2003808
++:108220000E0008AA8CC3003C8F9200580A000AC0B6
++:1082300000002021240400823C05080024A55BE8FE
++:108240000E0008A4000000008F92005800001021CA
++:108250000A000962AF8200688E5000048F91FD9C75
++:108260003C078000ACF00020922C00050200282181
++:10827000318B0002156001562404008A8F92FDA004
++:108280002404008D9245001B30A6002014C001502C
++:1082900002002821922E00092408001231C900FF93
++:1082A0001128014B240400810E00087202002021D5
++:1082B0009258001B240F000402002021370D0042B9
++:1082C000A24D001B0E00087CA22F00253C0580005B
++:1082D0008CA401780480FFFE34B90140241F000201
++:1082E000AF300000A33F00048F9200583C101000F4
++:1082F000ACB001780A000A6B0013102B8E500004FA
++:108300008F91FD9C3C038000AC700020922A0005F8
++:108310000200282131420002144000172404008A80
++:10832000922C00092412000402002821318B00FF46
++:1083300011720011240400810E0008720200202135
++:108340008F89FDA0240800122405FFFE912F001B39
++:108350000200202135EE0020A12E001BA2280009DA
++:108360009226000500C538240E00087CA2270005CF
++:1083700002002821000020210E0009330000000027
++:108380000A000A6A8F9200588E4C00043C07800055
++:108390003C10080026105BE8ACEC00203C01080013
++:1083A000AC2C5BE8924B0003317100041220013BBE
++:1083B0008F84FD9C24020006A0820009924F001BBE
++:1083C000240EFFC031E9003F012E4025A08800089F
++:1083D0009245000330A6000114C0013200000000E5
++:1083E0008E420008AE0200083C0208008C425BF09E
++:1083F000104001318F90FDA0000219C28F8DFD9CAD
++:10840000A603000C8E4A000C24180001240400145A
++:10841000AE0A002C8E420010AE02001C965F0016C1
++:10842000A61F003C96590014A619003EADB8000CDA
++:10843000A5B80010A5B80012A5B80014A5B800167C
++:1084400012600144A2040011925100033232000272
++:108450002E5300018F920058266200080A0009621C
++:10846000AF8200688E4400043C1980003C068008FE
++:10847000AF2400208E45000890D80000240D005045
++:10848000331100FF122D009C2407008824060009E8
++:108490000E000845000000000A000A6A8F9200588A
++:1084A0008E5000043C0980003C118008AD30002053
++:1084B0009228000024050050310400FF10850110AF
++:1084C0002407008802002021000028210E00084512
++:1084D0002406000E922D00002418FF80020028219F
++:1084E00001B8802524040004240600300E0007256E
++:1084F000A23000000A000A6A8F9200588E500004D1
++:108500008F91FDA03C028000AC500020923F001BE8
++:1085100033F900101320006C240700810200202191
++:10852000000028212406001F0E000845000000005E
++:108530000A000A6A8F9200588E44001C0E00085DE3
++:1085400000000000104000E3004048218F880058E0
++:1085500024070089012020218D05001C240600012C
++:108560000E000845000000000A000A6A8F920058B9
++:10857000964900023C10080026105BE831280004F0
++:10858000110000973C0460008E4E001C3C0F8000E0
++:10859000ADEE00203C010800AC2E5BE896470002DF
++:1085A00030E40001148000E6000000008E42000468
++:1085B000AE0200083C1008008E105BF0120000ECC8
++:1085C0003C0F80008F92FD9C241000018E4E0018FD
++:1085D0008F8DFDA08F9FFD9801CF4825AE490018D3
++:1085E000A2400005AE50000C3C0808008D085BF06E
++:1085F0008F840058A6500010000839C2A6500012FF
++:10860000A6500014A6500016A5A7000C8C8C0008DC
++:108610008F8B00588F8A0058ADAC002C8D63000CF6
++:1086200024070002ADA3001C91460010A1A6001172
++:108630008F82005890450011A3E500088F990058DB
++:1086400093380012A258004E8F910058922F0013B9
++:10865000A1AF00128F920058964E0014A5AE003CB8
++:1086600096490016A5A9003E8E480018ADA8001432
++:108670005660FD6AAF8700683C05080024A55BE8EA
++:108680000E000881000020218F9200580000382140
++:108690000A000962AF8700683C05080024A55BE872
++:1086A0000E0008A4240400828F9200580A000A4D8C
++:1086B000000038210E000F6C000000008F9200585F
++:1086C0000A000AC0000020210E00087202002021CA
++:1086D0009223001B02002021346A00100E00087C47
++:1086E000A22A001B000038210200202100002821BE
++:1086F0000A000BA52406001F9242000C305F000107
++:1087000013E0000300000000964A000EA4CA002CEB
++:10871000924B000C316300025060000600003821CB
++:108720008E470014964C0012ACC7001CA4CC001A53
++:10873000000038210A000B7F240600093C050800D0
++:1087400024A55BE80E0008A42404008B8F92005837
++:108750000A000A4D0013382B3C0C08008D8C5BE896
++:1087600024DFFFFE25930100326B007F016790211B
++:1087700002638824AD110028AE4600E0AE4000E45C
++:108780000A0009B3AE5F001CACC000543C0D0800E9
++:108790008DAD5BE83C18800C37090100ACED00287A
++:1087A0008E510014AD3100E08E4F0014AD2F00E467
++:1087B0008E4E001025C7FFFE0A0009F4AD27001CED
++:1087C0005491FDD6240740000A000A222407100015
++:1087D0000E00092D000000000A000A6A8F9200585E
++:1087E0008C83442C3C12DEAD3651BEEF3C010800B8
++:1087F000AC205BE810710062000000003C196C6264
++:1088000037387970147800082404000297850074C2
++:108810009782006C2404009200A2F82B13E0001948
++:1088200002002821240400020E00069524050200FF
++:108830003C068000ACC200203C010800AC225BE892
++:108840001040000D8F8C0058240A002824040003D7
++:10885000918B0010316300FF546A00012404000171
++:108860000E0000810000000010400004240400837A
++:108870000A000BC28F920058240400833C050800B4
++:1088800024A55BE80E000881000000008F920058CC
++:108890000013382B0A000962AF8700680A000B49F1
++:1088A000240200128E4400080E00085D0000000043
++:1088B0000A000B55AE0200083C05080024A55BE841
++:1088C0000E000858240400878F9200580A000B728B
++:1088D0000013102B240400040E000695240500301C
++:1088E0001440002A004048218F8800582407008344
++:1088F000012020218D05001C0A000BB32406000175
++:108900008F8300788F8600701066FEEE000038219D
++:108910003C07080024E75B6C000320C00087282187
++:108920008CAE000011D0005D246F000131E3000F18
++:108930005466FFFA000320C00A000B8C00003821A7
++:108940008E4400040E00085D000000000A000BC801
++:10895000AE0200083C05080024A55BE80E0008A450
++:10896000240400828F9200580A000B72000010212C
++:108970003C05080024A55BE80A000C7C2404008761
++:108980008C83442C0A000C5B3C196C628F88005865
++:108990003C0780083C0C8000240B0050240A000196
++:1089A000AD820020A0EB0000A0EA000191030004CA
++:1089B000A0E3001891040005A0E400199106000648
++:1089C0003C04080024845B6CA0E6001A91020007B6
++:1089D0003C06080024C65B68A0E2001B9105000865
++:1089E000A0E5001C911F0009A0FF001D9119000ABD
++:1089F000A0F9001E9118000BA0F8001F9112000CA6
++:108A0000A0F200209111000DA0F100219110000EA4
++:108A1000A0F00022910F000FA0EF0023910E001094
++:108A2000A0EE0024910D0011A0ED0025950C00147E
++:108A3000A4EC0028950B00168F8A00708F920078A6
++:108A4000A4EB002A95030018000A10C02545000178
++:108A5000A4E3002C8D1F001C0044C0210046C82147
++:108A600030A5000FAF3F0000AF09000010B20006B4
++:108A7000AF850070000038218D05001C01202021E9
++:108A80000A000BB32406000124AD000131A7000F3A
++:108A9000AF8700780A000CF9000038213C06080076
++:108AA00024C65B680086902100003821ACA000003D
++:108AB0000A000B8CAE4000003C0482013C036000C5
++:108AC00034820E02AC603D68AF80009803E000087D
++:108AD000AC623D6C27BDFFE8AFB000103090FFFFE7
++:108AE000001018422C620041AFBF00141440000275
++:108AF00024040080240300403C010800AC300060E6
++:108B00003C010800AC2300640E000F7500602821B2
++:108B1000244802BF2409FF8001092824001039805D
++:108B2000001030408FBF00148FB0001000A720212C
++:108B300000861821AF8300803C010800AC25005856
++:108B40003C010800AC24005C03E0000827BD0018CD
++:108B5000308300FF30C6FFFF30E400FF3C08800098
++:108B60008D0201B80440FFFE000354000144382583
++:108B70003C09600000E920253C031000AD050180A0
++:108B8000AD060184AD04018803E00008AD0301B81F
++:108B90008F8500583C0A6012354800108CAC0004E8
++:108BA0003C0D600E35A60010318B00062D690001CA
++:108BB000AD0900C48CA70004ACC731808CA20008AA
++:108BC00094A40002ACC231848CA3001C0460000396
++:108BD000A784009003E00008000000008CAF00189C
++:108BE000ACCF31D08CAE001C03E00008ACCE31D449
++:108BF0008F8500588F87FF288F86FF308CAE00044A
++:108C00003C0F601235E80010ACEE00788CAD000827
++:108C1000ACED007C8CAC0010ACCC004C8CAB000CF0
++:108C2000ACCB004894CA00543C0208008C4200447B
++:108C300025490001A4C9005494C400543083FFFFA7
++:108C400010620017000000003C0208008C42004047
++:108C5000A4C200528CA30018ACE300308CA2001414
++:108C6000ACE2002C8CB90018ACF900388CB80014B8
++:108C700024050001ACF800348D0600BC50C5001975
++:108C80008D0200B48D0200B8A4E2004894E40048CC
++:108C9000A4E4004A94E800EA03E000083102FFFF80
++:108CA0003C0208008C420024A4C00054A4C200521C
++:108CB0008CA30018ACE300308CA20014ACE2002CB2
++:108CC0008CB90018ACF900388CB8001424050001E8
++:108CD000ACF800348D0600BC54C5FFEB8D0200B823
++:108CE0008D0200B4A4E2004894E40048A4E4004AE1
++:108CF00094E800EA03E000083102FFFF8F86005885
++:108D00003C0480008CC900088CC80008000929C0F8
++:108D1000000839C0AC87002090C30007306200040F
++:108D20001040003EAF85009490CB0007316A0008E8
++:108D30001140003D8F87FF2C8CCD000C8CCE001491
++:108D400001AE602B11800036000000008CC2000CC8
++:108D5000ACE200708CCB00188F85FF288F88FF3025
++:108D6000ACEB00748CCA00102402FFF8ACAA00D847
++:108D70008CC9000CAD0900608CC4001CACA400D0F0
++:108D800090E3007C0062C824A0F9007C90D8000722
++:108D9000330F000811E000040000000090ED007C9B
++:108DA00035AC0001A0EC007C90CF000731EE000153
++:108DB00011C000060000000090E3007C241800347D
++:108DC00034790002A0F9007CACB800DC90C2000746
++:108DD0003046000210C000040000000090E8007C53
++:108DE00035040004A0E4007C90ED007D3C0B600E97
++:108DF000356A001031AC003FA0EC007D8D4931D4C4
++:108E00003127000110E00002240E0001A0AE00098D
++:108E100094AF00EA03E0000831E2FFFF8F87FF2CE8
++:108E20000A000DAF8CC200140A000DB0ACE0007057
++:108E30008F8C005827BDFFD8AFB3001CAFB200180D
++:108E4000AFB00010AFBF0020AFB10014918F00157C
++:108E50003C13600E3673001031EB000FA38B009CA7
++:108E60008D8F00048D8B0008959F0012959900103E
++:108E70009584001A9598001E958E001C33EDFFFF17
++:108E8000332AFFFF3089FFFF3308FFFF31C7FFFFA1
++:108E90003C010800AC2D00243C010800AC29004432
++:108EA0003C010800AC2A0040AE683178AE67317CE6
++:108EB00091850015959100163C12601236520010F3
++:108EC00030A200FF3230FFFFAE623188AE5000B4F6
++:108ED00091830014959F0018240600010066C804C1
++:108EE00033F8FFFFAE5900B8AE5800BC918E0014A5
++:108EF000AF8F00843C08600631CD00FFAE4D00C04E
++:108F0000918A00159584000E3C07600A314900FFE4
++:108F1000AF8B00883084FFFFAE4900C835110010C8
++:108F20000E000D1034F004103C0208008C4200606A
++:108F30003C0308008C6300643C0608008CC60058A3
++:108F40003C0508008CA5005C8F8400808FBF00204A
++:108F5000AE23004CAE65319CAE030054AE4500DC40
++:108F6000AE6231A0AE6331A4AE663198AE22004845
++:108F70008FB3001CAE0200508FB10014AE4200E06F
++:108F8000AE4300E4AE4600D88FB000108FB2001898
++:108F90000A00057D27BD0028978500929783007CF5
++:108FA00027BDFFE8AFB0001000A3102BAFBF001427
++:108FB000240400058F900058104000552409000239
++:108FC0000E0006958F850080AF8200942404000374
++:108FD0001040004F240900023C0680000E00008172
++:108FE000ACC2002024070001240820001040004DDE
++:108FF00024040005978E00928F8AFF2C24090050CC
++:1090000025C50001A7850092A14900003C0D08007C
++:109010008DAD0064240380008F84FF28000D66005E
++:10902000AD4C0018A5400006954B000A8F85FF3017
++:109030002402FF8001633024A546000A915F000AE4
++:109040000000482103E2C825A159000AA0A0000899
++:10905000A140004CA08000D5961800029783009094
++:109060003C020004A49800EA960F00022418FFBFF7
++:1090700025EE2401A48E00BE8E0D0004ACAD00448C
++:109080008E0C0008ACAC0040A4A00050A4A000547A
++:109090008E0B000C240C0030AC8B00288E060010C8
++:1090A000AC860024A480003EA487004EA487005014
++:1090B000A483003CAD420074AC8800D8ACA800602A
++:1090C000A08700FC909F00D433F9007FA09900D4C2
++:1090D000909000D402187824A08F00D4914E007C88
++:1090E00035CD0001A14D007C938B009CAD480070F4
++:1090F000AC8C00DCA08B00D68F8800888F87008422
++:10910000AC8800C4AC8700C8A5400078A540007AB0
++:109110008FBF00148FB000100120102103E0000861
++:1091200027BD00188F8500940E0007258F860080CC
++:109130000A000E9F2409000227BDFFE0AFB0001017
++:109140008F900058AFB10014AFBF00188E09000413
++:109150000E00054A000921C08E0800048F84FF28F4
++:109160008F82FF30000839C03C068000ACC7002069
++:10917000948500EA904300131460001C30B1FFFF97
++:109180008F8CFF2C918B0008316A00401540000B3A
++:10919000000000008E0D0004022030218FBF001857
++:1091A0008FB100148FB00010240400220000382179
++:1091B000000D29C00A000D2F27BD00200E000098C9
++:1091C000000000008E0D0004022030218FBF001827
++:1091D0008FB100148FB00010240400220000382149
++:1091E000000D29C00A000D2F27BD00200E000090A1
++:1091F000000000008E0D0004022030218FBF0018F7
++:109200008FB100148FB00010240400220000382118
++:10921000000D29C00A000D2F27BD002027BDFFE04B
++:10922000AFB200183092FFFFAFB00010AFBF001C0C
++:10923000AFB100141240001E000080218F8600583C
++:109240008CC500002403000600053F02000514023F
++:1092500030E4000714830016304500FF2CA80006F8
++:1092600011000040000558803C0C0800258C58BCBB
++:10927000016C50218D490000012000080000000011
++:109280008F8E0098240D000111CD005024020002A1
++:10929000AF820098260900013130FFFF24C800206A
++:1092A0000212202B010030211480FFE5AF88005806
++:1092B000020010218FBF001C8FB200188FB1001464
++:1092C0008FB0001003E0000827BD00209387007EC8
++:1092D00054E00034000030210E000DE700000000D3
++:1092E0008F8600580A000EFF240200018F87009825
++:1092F0002405000210E50031240400130000282199
++:1093000000003021240700010E000D2F0000000096
++:109310000A000F008F8600588F83009824020002F5
++:109320001462FFF6240400120E000D9A00000000E3
++:109330008F85009400403021240400120E000D2F70
++:10934000000038210A000F008F8600588F83009894
++:109350002411000310710029241F0002107FFFCE8A
++:1093600026090001240400100000282100003021FB
++:109370000A000F1D240700018F91009824060002A7
++:109380001626FFF9240400100E000E410000000014
++:10939000144000238F9800588F8600580A000EFF53
++:1093A00024020003240400140E000D2F00002821C5
++:1093B0008F8600580A000EFF240200020E000EA93C
++:1093C000000000000A000F008F8600580E000D3FBD
++:1093D00000000000241900022404001400002821C9
++:1093E0000000302100003821AF9900980E000D2FA9
++:1093F000000000000A000F008F8600580E000D5775
++:10940000000000008F8500942419000200403021E4
++:1094100024040010000038210A000F56AF9900986C
++:109420000040382124040010970F0002000028217A
++:109430000E000D2F31E6FFFF8F8600580A000F0047
++:10944000AF9100988F84FF2C3C077FFF34E6FFFF2D
++:109450008C8500182402000100A61824AC83001893
++:1094600003E00008A08200053084FFFF30A5FFFF65
++:109470001080000700001821308200011040000217
++:1094800000042042006518211480FFFB00052840DD
++:1094900003E000080060102110C000070000000079
++:1094A0008CA2000024C6FFFF24A50004AC820000AB
++:1094B00014C0FFFB2484000403E000080000000047
++:1094C00010A0000824A3FFFFAC86000000000000ED
++:1094D000000000002402FFFF2463FFFF1462FFFA74
++:1094E0002484000403E0000800000000000411C010
++:1094F00003E000082442024027BDFFE8AFB000109F
++:1095000000808021AFBF00140E000F9600A0202124
++:1095100000504821240AFF808FBF00148FB0001034
++:10952000012A30243127007F3C08800A3C042100B6
++:1095300000E8102100C428253C03800027BD001846
++:10954000AC650024AF820038AC400000AC6500245C
++:1095500003E00008AC4000403C0D08008DAD005811
++:1095600000056180240AFF8001A45821016C482174
++:10957000012A30243127007F3C08800C3C04210064
++:1095800000E8102100C428253C038000AC650028B9
++:10959000AF82003403E00008AC40002430A5FFFF98
++:1095A0003C0680008CC201B80440FFFE3C086015F8
++:1095B00000A838253C031000ACC40180ACC0018475
++:1095C000ACC7018803E00008ACC301B83C0D08003B
++:1095D0008DAD005800056180240AFF8001A4582148
++:1095E000016C4021010A4824000931403107007F05
++:1095F00000C728253C04200000A418253C02800058
++:10960000AC43083003E00008AF80003427BDFFE81A
++:10961000AFB0001000808021AFBF00140E000F9685
++:1096200000A0202100504821240BFF80012B502452
++:10963000000A39403128007F3C0620008FBF00140B
++:109640008FB0001000E8282534C2000100A21825C0
++:109650003C04800027BD0018AC83083003E00008FC
++:10966000AF8000383C0580088CA700603C0680086D
++:109670000087102B144000112C8340008CA8006040
++:109680002D0340001060000F240340008CC90060CF
++:109690000089282B14A00002008018218CC30060D0
++:1096A00000035A42000B30803C0A0800254A59202A
++:1096B00000CA202103E000088C8200001460FFF340
++:1096C0002403400000035A42000B30803C0A08008B
++:1096D000254A592000CA202103E000088C8200009E
++:1096E0003C05800890A60008938400AB24C20001CA
++:1096F000304200FF3043007F1064000C0002382726
++:10970000A0A200083C0480008C85017804A0FFFE24
++:109710008F8A00A0240900023C081000AC8A014096
++:10972000A089014403E00008AC8801780A00101BFE
++:1097300030E2008027BDFFD8AFB200188F9200A49E
++:10974000AFBF0020AFB3001CAFB00010AFB100142A
++:109750008F9300348E5900283C1000803C0EFFEFA0
++:10976000AE7900008E580024A260000A35CDFFFFBC
++:10977000AE7800049251002C3C0BFF9F356AFFFF2E
++:10978000A271000C8E6F000C3C080040A271000B0F
++:1097900001F06025018D4824012A382400E8302595
++:1097A000AE66000C8E450004AE6000183C0400FF5D
++:1097B000AE6500148E43002C3482FFFFA6600008C3
++:1097C0000062F824AE7F00108E5900088F9000A030
++:1097D000964E0012AE7900208E51000C31D83FFF1A
++:1097E00000187980AE7100248E4D001401F06021C4
++:1097F00031CB0001AE6D00288E4A0018000C41C22A
++:10980000000B4B80AE6A002C8E46001C01093821EB
++:10981000A667001CAE660030964500028E4400200C
++:10982000A665001EAE64003492430033306200042B
++:1098300054400006924700003C0280083443010077
++:109840008C7F00D0AE7F0030924700008F860038BA
++:10985000A0C700309245003330A4000250800007BA
++:10986000925100018F880038240BFF80910A00304C
++:10987000014B4825A1090030925100018F9000381A
++:10988000240CFFBF2404FFDFA21100318F8D0038AC
++:109890003C1880083711008091AF003C31EE007F0A
++:1098A000A1AE003C8F890038912B003C016C502404
++:1098B000A12A003C8F9F00388E68001493E6003C7C
++:1098C0002D0700010007114000C4282400A218251C
++:1098D000A3E3003C8F87003896590012A4F90032A8
++:1098E0008E450004922E007C30B0000300107823D7
++:1098F00031ED000300AD102131CC000215800002D3
++:1099000024460034244600303C0280083443008062
++:10991000907F007C00BFC824333800041700000289
++:1099200024C2000400C010218F98003824190002BE
++:10993000ACE20034A3190000924F003F8F8E003834
++:109940003C0C8008358B0080A1CF00018F9100383E
++:10995000924D003F8E440004A62D0002956A005CE3
++:109960000E000FF43150FFFF00024B800209382532
++:109970003C08420000E82825AE2500048E4400384B
++:109980008F850038ACA400188E460034ACA6001CAD
++:10999000ACA0000CACA00010A4A00014A4A0001661
++:1099A000A4A00020A4A00022ACA000248E62001479
++:1099B00050400001240200018FBF00208FB3001C23
++:1099C0008FB200188FB100148FB00010ACA2000845
++:1099D0000A00101327BD002827BDFFC83C058008DA
++:1099E00034A40080AFBF0034AFBE0030AFB7002C4E
++:1099F000AFB60028AFB50024AFB40020AFB3001C51
++:109A0000AFB20018AFB10014AFB00010948300786B
++:109A10009482007A104300512405FFFF0080F0215A
++:109A20000A0011230080B821108B004D8FBF003435
++:109A30008F8600A03C1808008F18005C2411FF805E
++:109A40003C1680000306782101F18024AED0002C62
++:109A500096EE007A31EC007F3C0D800E31CB7FFF1B
++:109A6000018D5021000B4840012AA82196A4000036
++:109A70003C0808008D0800582405FF8030953FFF02
++:109A800001061821001539800067C8210325F82434
++:109A90003C02010003E290253338007F3C11800C2A
++:109AA000AED20028031190219250000D320F000415
++:109AB00011E0003702E0982196E3007A96E8007AF8
++:109AC00096E5007A2404800031077FFF24E300013B
++:109AD00030627FFF00A4F82403E2C825A6F9007ACB
++:109AE00096E6007A3C1408008E94006030D67FFF22
++:109AF00012D400C1000000008E5800188F8400A00E
++:109B000002A028212713FFFF0E000FCEAE53002C1A
++:109B100097D5007897D4007A12950010000028217C
++:109B20003C098008352401003C0A8008914800085F
++:109B3000908700D53114007F30E400FF0284302B81
++:109B400014C0FFB9268B0001938E00AB268C000158
++:109B5000008E682115ACFFB78F8600A08FBF003440
++:109B60008FBE00308FB7002C8FB600288FB5002431
++:109B70008FB400208FB3001C8FB200188FB1001477
++:109B80008FB0001000A0102103E0000827BD0038AE
++:109B900000C020210E000F99028028218E4B00105A
++:109BA0008E4C00308F84003824090002016C502351
++:109BB000AE4A0010A089000096E3005C8E4400309D
++:109BC0008F9100380E000FF43070FFFF00024380C9
++:109BD000020838253C02420000E22825AE25000498
++:109BE0008E5F00048F8A00388E590000240B000815
++:109BF000AD5F001CAD590018AD40000CAD40001029
++:109C00009246000A240400052408C00030D000FF5A
++:109C1000A550001496580008A55800169251000A45
++:109C20003C188008322F00FFA54F0020964E0008F8
++:109C300037110100A54E0022AD400024924D000BCB
++:109C400031AC00FFA54C0002A14B00018E49003051
++:109C50008F830038240BFFBFAC690008A06400307C
++:109C60008F9000382403FFDF9607003200E8282495
++:109C700000B51025A6020032921F003233F9003FD2
++:109C800037260040A20600328F8C0038AD800034A9
++:109C90008E2F00D0AD8F0038918E003C3C0F7FFF9F
++:109CA00031CD007FA18D003C8F84003835EEFFFF61
++:109CB000908A003C014B4824A089003C8F850038E5
++:109CC00090A8003C01033824A0A7003C8E42003439
++:109CD0008F9100383C038008AE2200408E59002C42
++:109CE0008E5F0030033F3023AE26004492300048A0
++:109CF0003218007FA23800488F8800388E4D00301F
++:109D00008D0C004801AE582401965024014B482583
++:109D1000AD0900489244000AA104004C964700088F
++:109D20008F850038A4A7004E8E5000308E4400303E
++:109D30000E0003818C65006092F9007C0002F940FE
++:109D4000004028210002110003E2302133360002D6
++:109D500012C00003020680210005B0800216802197
++:109D6000926D007C31B30004126000020005708027
++:109D7000020E80218E4B00308F8800382405800031
++:109D8000316A0003000A4823312400030204182129
++:109D9000AD03003496E4007A96F0007A96F1007AEA
++:109DA00032027FFF2447000130FF7FFF0225C824D5
++:109DB000033F3025A6E6007A96F8007A3C120800A8
++:109DC0008E520060330F7FFF11F200180000000078
++:109DD0008F8400A00E000FCE02A028218F8400A047
++:109DE0000E000FDE028028210E001013000000007C
++:109DF0000A00111F0000000096F1007A022480245E
++:109E0000A6F0007A92EF007A92EB007A31EE00FF32
++:109E1000000E69C2000D6027000C51C03169007F3F
++:109E2000012A20250A001119A2E4007A96E6007A98
++:109E300000C5C024A6F8007A92EF007A92F3007A67
++:109E400031F200FF001271C2000E6827000DB1C090
++:109E5000326C007F01962825A2E5007A0A0011D015
++:109E60008F8400A03C0380003084FFFF30A5FFFFFB
++:109E7000AC640018AC65001C03E000088C620014A0
++:109E800027BDFFA03C068008AFBF005CAFBE0058F6
++:109E9000AFB70054AFB60050AFB5004CAFB40048F8
++:109EA000AFB30044AFB20040AFB1003CAFB0003838
++:109EB00034C80100910500D590C700083084FFFF29
++:109EC00030A500FF30E2007F0045182AAFA4001043
++:109ED000A7A00018A7A0002610600055AFA000148E
++:109EE00090CA00083149007F00A9302324D3FFFF26
++:109EF0000013802B8FB400100014902B02128824C2
++:109F0000522000888FB300143C03800894790052DB
++:109F1000947E00508FB60010033EC0230018BC0092
++:109F2000001714030016FC0002C2A82A16A00002A3
++:109F3000001F2C030040282100133C0000072403CD
++:109F400000A4102A5440000100A020212885000907
++:109F500014A000020080A021241400083C0C8008FA
++:109F60008D860048001459808D88004C3C03800089
++:109F70003169FFFF3C0A0010012A202534710400DA
++:109F8000AC660038AF9100A4AC68003CAC64003013
++:109F900000000000000000000000000000000000C1
++:109FA00000000000000000000000000000000000B1
++:109FB0008C6E000031CD002011A0FFFD0014782A26
++:109FC00001F01024104000390000A8213C16800840
++:109FD00092D700083C1280008E44010032F6007FC8
++:109FE0000E000F9902C028218E3900108E44010006
++:109FF0000000902133373FFF0E000FB102E028210F
++:10A00000923800003302003F2C500008520000102C
++:10A0100000008821000210803C030800246358E4FB
++:10A020000043F8218FFE000003C00008000000007C
++:10A0300090CF0008938C00AB31EE007F00AE682318
++:10A04000018D58210A0012172573FFFF0000882197
++:10A050003C1E80008FC401000E000FCE02E02821BC
++:10A060008FC401000E000FDE02C028211220000F55
++:10A070000013802B8F8B00A426A400010004AC00E9
++:10A08000027298230015AC032578004002B4B02A70
++:10A090000013802B241700010300882102D0102414
++:10A0A000AF9800A41440FFC9AFB700143C07800864
++:10A0B00094E200508FAE00103C05800002A288217F
++:10A0C0003C060020A4F10050ACA6003094F40050EF
++:10A0D00094EF005201D51823306CFFFF11F4001EDD
++:10A0E000AFAC00108CEF004C001561808CF500487F
++:10A0F00001EC28210000202100AC582B02A4C02133
++:10A10000030BB021ACE5004CACF600488FB4001056
++:10A110000014902B021288241620FF7C3C03800838
++:10A120008FB300148FBF005C8FBE00583A620001ED
++:10A130008FB700548FB600508FB5004C8FB40048D5
++:10A140008FB300448FB200408FB1003C8FB0003815
++:10A1500003E0000827BD006094FE00548CF2004428
++:10A1600033C9FFFE0009C8C00259F821ACBF003C4A
++:10A170008CE800448CAD003C010D50231940003B9D
++:10A18000000000008CF7004026E20001ACA200387D
++:10A190003C05005034A700103C038000AC67003041
++:10A1A00000000000000000000000000000000000AF
++:10A1B000000000000000000000000000000000009F
++:10A1C0008C7800003316002012C0FFFD3C1180087F
++:10A1D000962200543C1580003C068008304E000159
++:10A1E000000E18C0007578218DEC04003C070800B3
++:10A1F0008CE700443C040020ACCC00488DF40404FF
++:10A20000240B0001ACD4004C10EB0260AEA4003073
++:10A21000963900523C0508008CA5004000B99021F9
++:10A22000A6320052963F005427ED0001A62D00549F
++:10A230009626005430C4FFFF5487FF2F8FB40010C0
++:10A2400030A5FFFF0E0011F4A62000543C070800C3
++:10A250008CE70024963E00520047B82303D74823DA
++:10A26000A62900520A0012198FB400108CE2004097
++:10A270000A0012BE00000000922400012407000121
++:10A280003085007F14A7001C97AD00268E2B00148C
++:10A29000240CC000316A3FFF01AC48243C06080092
++:10A2A0008CC60060012A402531043FFF0086882BC0
++:10A2B00012200011A7A800263C0508008CA5005814
++:10A2C0008F9100A0000439802402FF8000B1182182
++:10A2D0000067F82103E2F02433F8007F3C1280008D
++:10A2E0003C19800EAE5E002C0319702191D0000D38
++:10A2F000360F0004A1CF000D0E001028241200011B
++:10A30000241100013C1E80008FC401000E000FCEFE
++:10A3100002E028218FC401000E000FDE02C02821B8
++:10A320001620FF558F8B00A40A0012860013802B85
++:10A330008F8600A490C80001310400201080019194
++:10A34000241000013C048008348B0080916A007C5A
++:10A350008F9E0034AFA0002C314900011120000F66
++:10A36000AFB000288CCD00148C8E006001AE602B45
++:10A370001580000201A038218C8700603C188008FD
++:10A38000370300808C70007000F0782B15E000021D
++:10A3900000E020218C640070AFA4002C3C028008F7
++:10A3A000344500808CD200148CBF0070025FC82B33
++:10A3B00017200002024020218CA400708FA7002CDF
++:10A3C0000087182310600003AFA3003024050002AB
++:10A3D000AFA500288FA400280264882B162000BA9D
++:10A3E000000018218CD000388FCE000C3C0F00806C
++:10A3F000AFD000008CCD00343C0CFF9F01CF58251E
++:10A40000AFCD000490CA003F3586FFFF01662024CF
++:10A410003C0900203C08FFEFA3CA000B0089382547
++:10A420003511FFFF00F118243C0500088F8700A4B8
++:10A430000065C825AFD9000C8CE20014AFC000182D
++:10A440008FA60030AFC200148CF800188FB0002C1B
++:10A450003C1FFFFBAFD8001C8CEF000837F2FFFF5A
++:10A4600003326824AFCF00248CEC000C020670216C
++:10A47000AFCD000CA7C00038A7C0003AAFCE002C6B
++:10A48000AFCC0020AFC000288CEA00148FAB002CAA
++:10A49000014B48230126402311000011AFC80010D2
++:10A4A00090EB003D8FC900048FC80000000B5100E5
++:10A4B000012A28210000102100AA882B010218215E
++:10A4C0000071F821AFC50004AFDF000090F2003D3D
++:10A4D000A3D2000A8F9900A497380006A7D80008D5
++:10A4E0008F910038240800023C038008A228000055
++:10A4F0003465008094BF005C8FA4002C33F0FFFF14
++:10A500000E000FF48F9200380002CB808F8500A4DC
++:10A51000021978253C18420001F87025AE4E00045F
++:10A520008F8400388CAD0038AC8D00188CAC0034B2
++:10A53000AC8C001CAC80000CAC800010A48000141B
++:10A54000A4800016A4800020A4800022AC800024F7
++:10A5500090A6003F8FA7002CA486000250E0019235
++:10A56000240700018FA200305040000290A2003D5D
++:10A5700090A2003E244A0001A08A00018F84003886
++:10A580008FA9002CAC8900083C128008364D008051
++:10A5900091AC007C3186000214C000022407003414
++:10A5A000240700308F8500A43C198008373F0080C5
++:10A5B00090B0000093F9007C240E0004A0900030BD
++:10A5C0008F8F00A48FB8002C8F8D003891F200017E
++:10A5D0003304000301C46023A1B200318F8E003820
++:10A5E0008F8600A42402C00095CA003294C90012CC
++:10A5F0008FAB002C0142402431233FFF010388250B
++:10A60000A5D1003291D000323185000300EBF82152
++:10A610003218003F370F0040A1CF00328FA4002C2A
++:10A6200003E5382133280004108000028F850038AC
++:10A6300000E838213C0A8008ACA700343549010005
++:10A640008D2800D08FA3002C2419FFBFACA80038A0
++:10A6500090B1003C2C640001240FFFDF3227007F03
++:10A66000A0A7003C8F98003800049140931F003C45
++:10A6700003F98024A310003C8F8C0038918E003C9D
++:10A6800001CF682401B23025A186003C8F8900A447
++:10A690008F8800388D2B0020AD0B00408D220024C8
++:10A6A000AD0200448D2A0028AD0A00488D23002CFD
++:10A6B0000E001013AD03004C8FB1002824070002D8
++:10A6C000122700118FA300280003282B00058023E8
++:10A6D0000270982400608021006090210A00126FAF
++:10A6E0000010882B962900128F8400A00000902172
++:10A6F0003125FFFFA7A900180E000FC22411000189
++:10A700000A00131D3C1E80003C0B80003C12800898
++:10A710008D640100924900088F92FF340E000F995A
++:10A720003125007F8F9900388FA700288FA4003033
++:10A73000A3270000965F005C33F0FFFF0E000FF4CC
++:10A740008F91003800026B80020D80253C0842008A
++:10A750008F8D00A402085025AE2A00048DA5003874
++:10A760008F8A003800007821000F1100AD450018D5
++:10A770008DB800343C047FFF3488FFFFAD58001CC7
++:10A7800091A6003E8D4C001C8D4900180006190052
++:10A79000000677020183C821004E58250323882B29
++:10A7A000012B382100F1F821AD59001CAD5F0018D4
++:10A7B000AD40000CAD40001091B0003E8FA40030C1
++:10A7C00024090005A550001495A500042419C00013
++:10A7D00000884024A545001691B8003EA5580020E9
++:10A7E00095AF0004A54F0022AD40002491AE003F7C
++:10A7F000A54E000291A6003E91AC003D01861023BB
++:10A80000244B0001A14B00018F9100388FA3003031
++:10A810003C028008344B0100AE230008A22900301E
++:10A820008F8C00388F8700A4959F003294F000121F
++:10A830002407FFBF033FC02432053FFF03057825EF
++:10A84000A58F0032918E00322418FFDF31CD003FFA
++:10A8500035A60040A18600328F910038240DFFFFFD
++:10A86000240CFF80AE2000348D6A00D0AE2A003860
++:10A870009223003C3069007FA229003C8F90003871
++:10A880003C0380009219003C0327F824A21F003CDF
++:10A890008F8E003891C5003C00B87824A1CF003CD1
++:10A8A0008F8A00383C0E8008AD4D00408FA6002CEA
++:10A8B000AD46004491420048004C5825A14B004849
++:10A8C0008F9000388F9900A48E09004801238824B6
++:10A8D00002283825AE070048933F003EA21F004CD7
++:10A8E0008F9800A48F8F003897050004A5E5004ECF
++:10A8F0000E0003818DC500609246007C8FAC003055
++:10A9000000026940000291000040282130CB000283
++:10A9100001B21021156000AA018230213C0E80088E
++:10A9200035C20080904C007C31830004106000032D
++:10A930008FB900300005788000CF3021241F00043B
++:10A940008F910038332D000303ED8023320800037C
++:10A9500000C85021AE2A00343C188000A7C500383A
++:10A960003C0680088F04010090DE00080E000FDE18
++:10A9700033C5007F0E001013000000000A00140D04
++:10A980008FA300288F9800348CC90038241F00033F
++:10A99000A7000008AF0900008CC50034A300000A1E
++:10A9A0008F9900A4AF0500043C080080932D003F60
++:10A9B000A31F000C8F0A000C3C02FF9FA30D000B8D
++:10A9C0000148F0253451FFFF3C12FFEF8F9900A49E
++:10A9D00003D170243646FFFF01C61824AF03000CD4
++:10A9E0008F2C0014972900128F8400A0AF0C001048
++:10A9F0008F2F0014AF000018AF000020AF0F00141D
++:10AA0000AF0000248F270018312F3FFF000F59801F
++:10AA1000AF0700288F2500080164F821312D0001BF
++:10AA2000AF0500308F31000C8F920038001F51C2EB
++:10AA3000000D438001481021241E00023C068008BE
++:10AA4000A702001CA7000034AF11002CA25E00007A
++:10AA500034D20080964E005C8F9900383C0342004F
++:10AA600031CCFFFF01833825AF2700048F8B00A472
++:10AA7000240500012402C0008D640038240700343E
++:10AA8000AF2400188D690034AF29001CAF20000CE2
++:10AA9000AF200010A7200014A7200016A720002038
++:10AAA000A7200022AF200024A7300002A325000128
++:10AAB0008F8800388F9F00A4AD10000893ED000030
++:10AAC000A10D00308F8A00A48F98003891510001A9
++:10AAD000A31100318F8B0038957E003203C27024A1
++:10AAE00001CF6025A56C0032916300323064003FD5
++:10AAF000A16400329249007C3125000214A00002BA
++:10AB00008F840038240700303C198008AC8700345B
++:10AB1000373201008E5F00D0240AFFBF020090216F
++:10AB2000AC9F0038908D003C31A8007FA088003C8D
++:10AB30008F9E003893C2003C004A8824A3D1003C79
++:10AB40008F8300380010882B9066003C34CE0020A4
++:10AB5000A06E003C8F8400A48F9800388C8C00205D
++:10AB6000AF0C00408C8F0024AF0F00448C8700286E
++:10AB7000AF0700488C8B002CAF0B004C0E0010135D
++:10AB80003C1E80000A0012700000000094C80052B1
++:10AB90003C0A08008D4A002401488821A4D10052B3
++:10ABA0000A0012198FB40010A08700018F840038AA
++:10ABB000240B0001AC8B00080A0013BE3C12800875
++:10ABC000000520800A0014A200C4302127BDFFE048
++:10ABD0003C0D8008AFB20018AFB00010AFBF001C32
++:10ABE000AFB1001435B200808E4C001835A80100BA
++:10ABF000964B000695A70050910900FC000C5602E8
++:10AC0000016728233143007F312600FF240200031F
++:10AC1000AF8300A8AF8400A010C2001B30B0FFFFBC
++:10AC2000910600FC2412000530C200FF10520033D0
++:10AC300000000000160000098FBF001C8FB2001832
++:10AC40008FB100148FB00010240D0C003C0C80005C
++:10AC500027BD002003E00008AD8D00240E0011FB8D
++:10AC6000020020218FBF001C8FB200188FB100148A
++:10AC70008FB00010240D0C003C0C800027BD00207C
++:10AC800003E00008AD8D0024965800789651007AB4
++:10AC9000924E007D0238782631E8FFFF31C400C0B3
++:10ACA000148000092D11000116000037000000007B
++:10ACB0005620FFE28FBF001C0E0010D100000000E4
++:10ACC0000A00156A8FBF001C1620FFDA0000000082
++:10ACD0000E0010D1000000001440FFD88FBF001CF0
++:10ACE0001600002200000000925F007D33E2003F6A
++:10ACF000A242007D0A00156A8FBF001C950900EA78
++:10AD00008F86008000802821240400050E0007257E
++:10AD10003130FFFF978300923C0480002465FFFFE1
++:10AD2000A78500928C8A01B80540FFFE0000000054
++:10AD3000AC8001808FBF001CAC9001848FB20018E2
++:10AD40008FB100148FB000103C0760133C0B100053
++:10AD5000240D0C003C0C800027BD0020AC8701882E
++:10AD6000AC8B01B803E00008AD8D00240E0011FB90
++:10AD7000020020215040FFB18FBF001C925F007D78
++:10AD80000A00159733E2003F0E0011FB020020215C
++:10AD90001440FFAA8FBF001C122000070000000013
++:10ADA0009259007D3330003F36020040A242007DC0
++:10ADB0000A00156A8FBF001C0E0010D100000000B1
++:10ADC0005040FF9E8FBF001C9259007D3330003FE2
++:10ADD0000A0015C636020040000000000000001BFB
++:10ADE0000000000F0000000A00000008000000063C
++:10ADF0000000000500000005000000040000000441
++:10AE00000000000300000003000000030000000336
++:10AE10000000000300000002000000020000000229
++:10AE2000000000020000000200000002000000021A
++:10AE3000000000020000000200000002000000020A
++:10AE400000000002000000020000000200000002FA
++:10AE50000000000100000001000000018008010066
++:10AE6000800800808008000000000C000000308096
++:10AE7000080011D00800127C08001294080012A8E3
++:10AE8000080012BC080011D0080011D0080012F010
++:10AE90000800132C080013400800138808001A8CBF
++:10AEA00008001A8C08001AC408001AC408001AD82E
++:10AEB00008001AA808001D0008001CCC08001D5836
++:10AEC00008001D5808001DE008001D108008024001
++:10AED000080027340800256C0800275C080027F4C8
++:10AEE0000800293C0800298808002AAC080029B479
++:10AEF00008002A38080025DC08002EDC08002EA4F3
++:10AF000008002588080025880800258808002B20CF
++:10AF100008002B20080025880800258808002DD06F
++:10AF2000080025880800258808002588080025884D
++:10AF300008002E0C080025880800258808002588B0
++:10AF4000080025880800258808002588080025882D
++:10AF5000080025880800258808002588080025881D
++:10AF6000080025880800258808002588080029A8E9
++:10AF7000080025880800258808002E680800258814
++:10AF800008002588080025880800258808002588ED
++:10AF900008002588080025880800258808002588DD
++:10AFA00008002588080025880800258808002588CD
++:10AFB00008002588080025880800258808002588BD
++:10AFC00008002CF4080025880800258808002C6853
++:10AFD00008002BC408003CE408003CB808003C848E
++:10AFE00008003C5808003C3808003BEC8008010091
++:10AFF00080080080800800008008008008004C6401
++:10B0000008004C9C08004BE408004C6408004C64A9
++:10B01000080049B808004C64080050500A000C842D
++:10B0200000000000000000000000000D7278703683
++:10B030002E322E31620000000602010300000000E3
++:10B0400000000001000000000000000000000000FF
++:10B0500000000000000000000000000000000000F0
++:10B0600000000000000000000000000000000000E0
++:10B0700000000000000000000000000000000000D0
++:10B0800000000000000000000000000000000000C0
++:10B0900000000000000000000000000000000000B0
++:10B0A00000000000000000000000000000000000A0
++:10B0B0000000000000000000000000000000000090
++:10B0C0000000000000000000000000000000000080
++:10B0D0000000000000000000000000000000000070
++:10B0E0000000000000000000000000000000000060
++:10B0F0000000000000000000000000000000000050
++:10B10000000000000000000000000000000000003F
++:10B11000000000000000000000000000000000002F
++:10B12000000000000000000000000000000000001F
++:10B13000000000000000000000000000000000000F
++:10B1400000000000000000000000000000000000FF
++:10B1500000000000000000000000000000000000EF
++:10B1600000000000000000000000000000000000DF
++:10B1700000000000000000000000000000000000CF
++:10B1800000000000000000000000000000000000BF
++:10B1900000000000000000000000000000000000AF
++:10B1A000000000000000000000000000000000009F
++:10B1B000000000000000000000000000000000008F
++:10B1C000000000000000000000000000000000007F
++:10B1D000000000000000000000000000000000006F
++:10B1E000000000000000000000000000000000005F
++:10B1F000000000000000000000000000000000004F
++:10B20000000000000000000000000000000000003E
++:10B21000000000000000000000000000000000002E
++:10B22000000000000000000000000000000000001E
++:10B23000000000000000000000000000000000000E
++:10B2400000000000000000000000000000000000FE
++:10B2500000000000000000000000000000000000EE
++:10B2600000000000000000000000000000000000DE
++:10B2700000000000000000000000000000000000CE
++:10B2800000000000000000000000000000000000BE
++:10B2900000000000000000000000000000000000AE
++:10B2A000000000000000000000000000000000009E
++:10B2B000000000000000000000000000000000008E
++:10B2C000000000000000000000000000000000007E
++:10B2D000000000000000000000000000000000006E
++:10B2E000000000000000000000000000000000005E
++:10B2F000000000000000000000000000000000004E
++:10B30000000000000000000000000000000000003D
++:10B31000000000000000000000000000000000002D
++:10B32000000000000000000000000000000000001D
++:10B33000000000000000000000000000000000000D
++:10B3400000000000000000000000000000000000FD
++:10B3500000000000000000000000000000000000ED
++:10B3600000000000000000000000000000000000DD
++:10B3700000000000000000000000000000000000CD
++:10B3800000000000000000000000000000000000BD
++:10B3900000000000000000000000000000000000AD
++:10B3A000000000000000000000000000000000009D
++:10B3B000000000000000000000000000000000008D
++:10B3C000000000000000000000000000000000007D
++:10B3D000000000000000000000000000000000006D
++:10B3E000000000000000000000000000000000005D
++:10B3F000000000000000000000000000000000004D
++:10B40000000000000000000000000000000000003C
++:10B41000000000000000000000000000000000002C
++:10B42000000000000000000000000000000000001C
++:10B43000000000000000000000000000000000000C
++:10B4400000000000000000000000000000000000FC
++:10B4500000000000000000000000000000000000EC
++:10B4600000000000000000000000000000000000DC
++:10B4700000000000000000000000000000000000CC
++:10B4800000000000000000000000000000000000BC
++:10B4900000000000000000000000000000000000AC
++:10B4A000000000000000000000000000000000009C
++:10B4B000000000000000000000000000000000008C
++:10B4C000000000000000000000000000000000007C
++:10B4D000000000000000000000000000000000006C
++:10B4E000000000000000000000000000000000005C
++:10B4F000000000000000000000000000000000004C
++:10B50000000000000000000000000000000000003B
++:10B51000000000000000000000000000000000002B
++:10B52000000000000000000000000000000000001B
++:10B53000000000000000000000000000000000000B
++:10B5400000000000000000000000000000000000FB
++:10B5500000000000000000000000000000000000EB
++:10B5600000000000000000000000000000000000DB
++:10B5700000000000000000000000000000000000CB
++:10B5800000000000000000000000000000000000BB
++:10B5900000000000000000000000000000000000AB
++:10B5A000000000000000000000000000000000009B
++:10B5B000000000000000000000000000000000008B
++:10B5C000000000000000000000000000000000007B
++:10B5D000000000000000000000000000000000006B
++:10B5E000000000000000000000000000000000005B
++:10B5F000000000000000000000000000000000004B
++:10B60000000000000000000000000000000000003A
++:10B61000000000000000000000000000000000002A
++:10B62000000000000000000000000000000000001A
++:10B63000000000000000000000000000000000000A
++:10B6400000000000000000000000000000000000FA
++:10B6500000000000000000000000000000000000EA
++:10B6600000000000000000000000000000000000DA
++:10B6700000000000000000000000000000000000CA
++:10B6800000000000000000000000000000000000BA
++:10B6900000000000000000000000000000000000AA
++:10B6A000000000000000000000000000000000009A
++:10B6B000000000000000000000000000000000008A
++:10B6C000000000000000000000000000000000007A
++:10B6D000000000000000000000000000000000006A
++:10B6E000000000000000000000000000000000005A
++:10B6F000000000000000000000000000000000004A
++:10B700000000000000000000000000000000000039
++:10B710000000000000000000000000000000000029
++:10B720000000000000000000000000000000000019
++:10B730000000000000000000000000000000000009
++:10B7400000000000000000000000000000000000F9
++:10B7500000000000000000000000000000000000E9
++:10B7600000000000000000000000000000000000D9
++:10B7700000000000000000000000000000000000C9
++:10B7800000000000000000000000000000000000B9
++:10B7900000000000000000000000000000000000A9
++:10B7A0000000000000000000000000000000000099
++:10B7B0000000000000000000000000000000000089
++:10B7C0000000000000000000000000000000000079
++:10B7D0000000000000000000000000000000000069
++:10B7E0000000000000000000000000000000000059
++:10B7F0000000000000000000000000000000000049
++:10B800000000000000000000000000000000000038
++:10B810000000000000000000000000000000000028
++:10B820000000000000000000000000000000000018
++:10B830000000000000000000000000000000000008
++:10B8400000000000000000000000000000000000F8
++:10B8500000000000000000000000000000000000E8
++:10B8600000000000000000000000000000000000D8
++:10B8700000000000000000000000000000000000C8
++:10B8800000000000000000000000000000000000B8
++:10B8900000000000000000000000000000000000A8
++:10B8A0000000000000000000000000000000000098
++:10B8B0000000000000000000000000000000000088
++:10B8C0000000000000000000000000000000000078
++:10B8D0000000000000000000000000000000000068
++:10B8E0000000000000000000000000000000000058
++:10B8F0000000000000000000000000000000000048
++:10B900000000000000000000000000000000000037
++:10B910000000000000000000000000000000000027
++:10B920000000000000000000000000000000000017
++:10B930000000000000000000000000000000000007
++:10B9400000000000000000000000000000000000F7
++:10B9500000000000000000000000000000000000E7
++:10B9600000000000000000000000000000000000D7
++:10B9700000000000000000000000000000000000C7
++:10B9800000000000000000000000000000000000B7
++:10B9900000000000000000000000000000000000A7
++:10B9A0000000000000000000000000000000000097
++:10B9B0000000000000000000000000000000000087
++:10B9C0000000000000000000000000000000000077
++:10B9D0000000000000000000000000000000000067
++:10B9E0000000000000000000000000000000000057
++:10B9F0000000000000000000000000000000000047
++:10BA00000000000000000000000000000000000036
++:10BA10000000000000000000000000000000000026
++:10BA20000000000000000000000000000000000016
++:10BA30000000000000000000000000000000000006
++:10BA400000000000000000000000000000000000F6
++:10BA500000000000000000000000000000000000E6
++:10BA600000000000000000000000000000000000D6
++:10BA700000000000000000000000000000000000C6
++:10BA800000000000000000000000000000000000B6
++:10BA900000000000000000000000000000000000A6
++:10BAA0000000000000000000000000000000000096
++:10BAB0000000000000000000000000000000000086
++:10BAC0000000000000000000000000000000000076
++:10BAD0000000000000000000000000000000000066
++:10BAE0000000000000000000000000000000000056
++:10BAF0000000000000000000000000000000000046
++:10BB00000000000000000000000000000000000035
++:10BB10000000000000000000000000000000000025
++:10BB20000000000000000000000000000000000015
++:10BB30000000000000000000000000000000000005
++:10BB400000000000000000000000000000000000F5
++:10BB500000000000000000000000000000000000E5
++:10BB600000000000000000000000000000000000D5
++:10BB700000000000000000000000000000000000C5
++:10BB800000000000000000000000000000000000B5
++:10BB900000000000000000000000000000000000A5
++:10BBA0000000000000000000000000000000000095
++:10BBB0000000000000000000000000000000000085
++:10BBC0000000000000000000000000000000000075
++:10BBD0000000000000000000000000000000000065
++:10BBE0000000000000000000000000000000000055
++:10BBF0000000000000000000000000000000000045
++:10BC00000000000000000000000000000000000034
++:10BC10000000000000000000000000000000000024
++:10BC20000000000000000000000000000000000014
++:10BC30000000000000000000000000000000000004
++:10BC400000000000000000000000000000000000F4
++:10BC500000000000000000000000000000000000E4
++:10BC600000000000000000000000000000000000D4
++:10BC700000000000000000000000000000000000C4
++:10BC800000000000000000000000000000000000B4
++:10BC900000000000000000000000000000000000A4
++:10BCA0000000000000000000000000000000000094
++:10BCB0000000000000000000000000000000000084
++:10BCC0000000000000000000000000000000000074
++:10BCD0000000000000000000000000000000000064
++:10BCE0000000000000000000000000000000000054
++:10BCF0000000000000000000000000000000000044
++:10BD00000000000000000000000000000000000033
++:10BD10000000000000000000000000000000000023
++:10BD20000000000000000000000000000000000013
++:10BD30000000000000000000000000000000000003
++:10BD400000000000000000000000000000000000F3
++:10BD500000000000000000000000000000000000E3
++:10BD600000000000000000000000000000000000D3
++:10BD700000000000000000000000000000000000C3
++:10BD800000000000000000000000000000000000B3
++:10BD900000000000000000000000000000000000A3
++:10BDA0000000000000000000000000000000000093
++:10BDB0000000000000000000000000000000000083
++:10BDC0000000000000000000000000000000000073
++:10BDD0000000000000000000000000000000000063
++:10BDE0000000000000000000000000000000000053
++:10BDF0000000000000000000000000000000000043
++:10BE00000000000000000000000000000000000032
++:10BE10000000000000000000000000000000000022
++:10BE20000000000000000000000000000000000012
++:10BE30000000000000000000000000000000000002
++:10BE400000000000000000000000000000000000F2
++:10BE500000000000000000000000000000000000E2
++:10BE600000000000000000000000000000000000D2
++:10BE700000000000000000000000000000000000C2
++:10BE800000000000000000000000000000000000B2
++:10BE900000000000000000000000000000000000A2
++:10BEA0000000000000000000000000000000000092
++:10BEB0000000000000000000000000000000000082
++:10BEC0000000000000000000000000000000000072
++:10BED0000000000000000000000000000000000062
++:10BEE0000000000000000000000000000000000052
++:10BEF0000000000000000000000000000000000042
++:10BF00000000000000000000000000000000000031
++:10BF10000000000000000000000000000000000021
++:10BF20000000000000000000000000000000000011
++:10BF30000000000000000000000000000000000001
++:10BF400000000000000000000000000000000000F1
++:10BF500000000000000000000000000000000000E1
++:10BF600000000000000000000000000000000000D1
++:10BF700000000000000000000000000000000000C1
++:10BF800000000000000000000000000000000000B1
++:10BF900000000000000000000000000000000000A1
++:10BFA0000000000000000000000000000000000091
++:10BFB0000000000000000000000000000000000081
++:10BFC0000000000000000000000000000000000071
++:10BFD0000000000000000000000000000000000061
++:10BFE0000000000000000000000000000000000051
++:10BFF0000000000000000000000000000000000041
++:10C000000000000000000000000000000000000030
++:10C010000000000000000000000000000000000020
++:10C020000000000000000000000000000000000010
++:10C030000000000000000000000000000000000000
++:10C0400000000000000000000000000000000000F0
++:10C0500000000000000000000000000000000000E0
++:10C0600000000000000000000000000000000000D0
++:10C0700000000000000000000000000000000000C0
++:10C0800000000000000000000000000000000000B0
++:10C0900000000000000000000000000000000000A0
++:10C0A0000000000000000000000000000000000090
++:10C0B0000000000000000000000000000000000080
++:10C0C0000000000000000000000000000000000070
++:10C0D0000000000000000000000000000000000060
++:10C0E0000000000000000000000000000000000050
++:10C0F0000000000000000000000000000000000040
++:10C10000000000000000000000000000000000002F
++:10C11000000000000000000000000000000000001F
++:10C12000000000000000000000000000000000000F
++:10C1300000000000000000000000000000000000FF
++:10C1400000000000000000000000000000000000EF
++:10C1500000000000000000000000000000000000DF
++:10C1600000000000000000000000000000000000CF
++:10C1700000000000000000000000000000000000BF
++:10C1800000000000000000000000000000000000AF
++:10C19000000000000000000000000000000000009F
++:10C1A000000000000000000000000000000000008F
++:10C1B000000000000000000000000000000000007F
++:10C1C000000000000000000000000000000000006F
++:10C1D000000000000000000000000000000000005F
++:10C1E000000000000000000000000000000000004F
++:10C1F000000000000000000000000000000000003F
++:10C20000000000000000000000000000000000002E
++:10C21000000000000000000000000000000000001E
++:10C22000000000000000000000000000000000000E
++:10C2300000000000000000000000000000000000FE
++:10C2400000000000000000000000000000000000EE
++:10C2500000000000000000000000000000000000DE
++:10C2600000000000000000000000000000000000CE
++:10C2700000000000000000000000000000000000BE
++:10C2800000000000000000000000000000000000AE
++:10C29000000000000000000000000000000000009E
++:10C2A000000000000000000000000000000000008E
++:10C2B000000000000000000000000000000000007E
++:10C2C000000000000000000000000000000000006E
++:10C2D000000000000000000000000000000000005E
++:10C2E000000000000000000000000000000000004E
++:10C2F000000000000000000000000000000000003E
++:10C30000000000000000000000000000000000002D
++:10C31000000000000000000000000000000000001D
++:10C32000000000000000000000000000000000000D
++:10C3300000000000000000000000000000000000FD
++:10C3400000000000000000000000000000000000ED
++:10C3500000000000000000000000000000000000DD
++:10C3600000000000000000000000000000000000CD
++:10C3700000000000000000000000000000000000BD
++:10C3800000000000000000000000000000000000AD
++:10C39000000000000000000000000000000000009D
++:10C3A000000000000000000000000000000000008D
++:10C3B000000000000000000000000000000000007D
++:10C3C000000000000000000000000000000000006D
++:10C3D000000000000000000000000000000000005D
++:10C3E000000000000000000000000000000000004D
++:10C3F000000000000000000000000000000000003D
++:10C40000000000000000000000000000000000002C
++:10C41000000000000000000000000000000000001C
++:10C42000000000000000000000000000000000000C
++:10C4300000000000000000000000000000000000FC
++:10C4400000000000000000000000000000000000EC
++:10C4500000000000000000000000000000000000DC
++:10C4600000000000000000000000000000000000CC
++:10C4700000000000000000000000000000000000BC
++:10C4800000000000000000000000000000000000AC
++:10C49000000000000000000000000000000000009C
++:10C4A000000000000000000000000000000000008C
++:10C4B000000000000000000000000000000000007C
++:10C4C000000000000000000000000000000000006C
++:10C4D000000000000000000000000000000000005C
++:10C4E000000000000000000000000000000000004C
++:10C4F000000000000000000000000000000000003C
++:10C50000000000000000000000000000000000002B
++:10C51000000000000000000000000000000000001B
++:10C52000000000000000000000000000000000000B
++:10C5300000000000000000000000000000000000FB
++:10C5400000000000000000000000000000000000EB
++:10C5500000000000000000000000000000000000DB
++:10C5600000000000000000000000000000000000CB
++:10C5700000000000000000000000000000000000BB
++:10C5800000000000000000000000000000000000AB
++:10C59000000000000000000000000000000000009B
++:10C5A000000000000000000000000000000000008B
++:10C5B000000000000000000000000000000000007B
++:10C5C000000000000000000000000000000000006B
++:10C5D000000000000000000000000000000000005B
++:10C5E000000000000000000000000000000000004B
++:10C5F000000000000000000000000000000000003B
++:10C60000000000000000000000000000000000002A
++:10C61000000000000000000000000000000000001A
++:10C62000000000000000000000000000000000000A
++:10C6300000000000000000000000000000000000FA
++:10C6400000000000000000000000000000000000EA
++:10C6500000000000000000000000000000000000DA
++:10C6600000000000000000000000000000000000CA
++:10C6700000000000000000000000000000000000BA
++:10C6800000000000000000000000000000000000AA
++:10C69000000000000000000000000000000000009A
++:10C6A000000000000000000000000000000000008A
++:10C6B000000000000000000000000000000000007A
++:10C6C000000000000000000000000000000000006A
++:10C6D000000000000000000000000000000000005A
++:10C6E000000000000000000000000000000000004A
++:10C6F000000000000000000000000000000000003A
++:10C700000000000000000000000000000000000029
++:10C710000000000000000000000000000000000019
++:10C720000000000000000000000000000000000009
++:10C7300000000000000000000000000000000000F9
++:10C7400000000000000000000000000000000000E9
++:10C7500000000000000000000000000000000000D9
++:10C7600000000000000000000000000000000000C9
++:10C7700000000000000000000000000000000000B9
++:10C7800000000000000000000000000000000000A9
++:10C790000000000000000000000000000000000099
++:10C7A0000000000000000000000000000000000089
++:10C7B0000000000000000000000000000000000079
++:10C7C0000000000000000000000000000000000069
++:10C7D0000000000000000000000000000000000059
++:10C7E0000000000000000000000000000000000049
++:10C7F0000000000000000000000000000000000039
++:10C800000000000000000000000000000000000028
++:10C810000000000000000000000000000000000018
++:10C820000000000000000000000000000000000008
++:10C8300000000000000000000000000000000000F8
++:10C8400000000000000000000000000000000000E8
++:10C8500000000000000000000000000000000000D8
++:10C8600000000000000000000000000000000000C8
++:10C8700000000000000000000000000000000000B8
++:10C8800000000000000000000000000000000000A8
++:10C890000000000000000000000000000000000098
++:10C8A0000000000000000000000000000000000088
++:10C8B0000000000000000000000000000000000078
++:10C8C0000000000000000000000000000000000068
++:10C8D0000000000000000000000000000000000058
++:10C8E0000000000000000000000000000000000048
++:10C8F0000000000000000000000000000000000038
++:10C900000000000000000000000000000000000027
++:10C910000000000000000000000000000000000017
++:10C920000000000000000000000000000000000007
++:10C9300000000000000000000000000000000000F7
++:10C9400000000000000000000000000000000000E7
++:10C9500000000000000000000000000000000000D7
++:10C9600000000000000000000000000000000000C7
++:10C9700000000000000000000000000000000000B7
++:10C9800000000000000000000000000000000000A7
++:10C990000000000000000000000000000000000097
++:10C9A0000000000000000000000000000000000087
++:10C9B0000000000000000000000000000000000077
++:10C9C0000000000000000000000000000000000067
++:10C9D0000000000000000000000000000000000057
++:10C9E0000000000000000000000000000000000047
++:10C9F0000000000000000000000000000000000037
++:10CA00000000000000000000000000000000000026
++:10CA10000000000000000000000000000000000016
++:10CA20000000000000000000000000000000000006
++:10CA300000000000000000000000000000000000F6
++:10CA400000000000000000000000000000000000E6
++:10CA500000000000000000000000000000000000D6
++:10CA600000000000000000000000000000000000C6
++:10CA700000000000000000000000000000000000B6
++:10CA800000000000000000000000000000000000A6
++:10CA90000000000000000000000000000000000096
++:10CAA0000000000000000000000000000000000086
++:10CAB0000000000000000000000000000000000076
++:10CAC0000000000000000000000000000000000066
++:10CAD0000000000000000000000000000000000056
++:10CAE0000000000000000000000000000000000046
++:10CAF0000000000000000000000000000000000036
++:10CB00000000000000000000000000000000000025
++:10CB10000000000000000000000000000000000015
++:10CB20000000000000000000000000000000000005
++:10CB300000000000000000000000000000000000F5
++:10CB400000000000000000000000000000000000E5
++:10CB500000000000000000000000000000000000D5
++:10CB600000000000000000000000000000000000C5
++:10CB700000000000000000000000000000000000B5
++:10CB800000000000000000000000000000000000A5
++:10CB90000000000000000000000000000000000095
++:10CBA0000000000000000000000000000000000085
++:10CBB0000000000000000000000000000000000075
++:10CBC0000000000000000000000000000000000065
++:10CBD0000000000000000000000000000000000055
++:10CBE0000000000000000000000000000000000045
++:10CBF0000000000000000000000000000000000035
++:10CC00000000000000000000000000000000000024
++:10CC10000000000000000000000000000000000014
++:10CC20000000000000000000000000000000000004
++:10CC300000000000000000000000000000000000F4
++:10CC400000000000000000000000000000000000E4
++:10CC500000000000000000000000000000000000D4
++:10CC600000000000000000000000000000000000C4
++:10CC700000000000000000000000000000000000B4
++:10CC800000000000000000000000000000000000A4
++:10CC90000000000000000000000000000000000094
++:10CCA0000000000000000000000000000000000084
++:10CCB0000000000000000000000000000000000074
++:10CCC0000000000000000000000000000000000064
++:10CCD0000000000000000000000000000000000054
++:10CCE0000000000000000000000000000000000044
++:10CCF0000000000000000000000000000000000034
++:10CD00000000000000000000000000000000000023
++:10CD10000000000000000000000000000000000013
++:10CD20000000000000000000000000000000000003
++:10CD300000000000000000000000000000000000F3
++:10CD400000000000000000000000000000000000E3
++:10CD500000000000000000000000000000000000D3
++:10CD600000000000000000000000000000000000C3
++:10CD700000000000000000000000000000000000B3
++:10CD800000000000000000000000000000000000A3
++:10CD90000000000000000000000000000000000093
++:10CDA0000000000000000000000000000000000083
++:10CDB0000000000000000000000000000000000073
++:10CDC0000000000000000000000000000000000063
++:10CDD0000000000000000000000000000000000053
++:10CDE0000000000000000000000000000000000043
++:10CDF0000000000000000000000000000000000033
++:10CE00000000000000000000000000000000000022
++:10CE10000000000000000000000000000000000012
++:10CE20000000000000000000000000000000000002
++:10CE300000000000000000000000000000000000F2
++:10CE400000000000000000000000000000000000E2
++:10CE500000000000000000000000000000000000D2
++:10CE600000000000000000000000000000000000C2
++:10CE700000000000000000000000000000000000B2
++:10CE800000000000000000000000000000000000A2
++:10CE90000000000000000000000000000000000092
++:10CEA0000000000000000000000000000000000082
++:10CEB0000000000000000000000000000000000072
++:10CEC0000000000000000000000000000000000062
++:10CED0000000000000000000000000000000000052
++:10CEE0000000000000000000000000000000000042
++:10CEF0000000000000000000000000000000000032
++:10CF00000000000000000000000000000000000021
++:10CF10000000000000000000000000000000000011
++:10CF20000000000000000000000000000000000001
++:10CF300000000000000000000000000000000000F1
++:10CF400000000000000000000000000000000000E1
++:10CF500000000000000000000000000000000000D1
++:10CF600000000000000000000000000000000000C1
++:10CF700000000000000000000000000000000000B1
++:10CF800000000000000000000000000000000000A1
++:10CF90000000000000000000000000000000000091
++:10CFA0000000000000000000000000000000000081
++:10CFB0000000000000000000000000000000000071
++:10CFC0000000000000000000000000000000000061
++:10CFD0000000000000000000000000000000000051
++:10CFE0000000000000000000000000000000000041
++:10CFF0000000000000000000000000000000000031
++:10D000000000000000000000000000000000000020
++:10D010000000000000000000000000000000000010
++:10D020000000000000000000000000000000000000
++:10D0300000000000000000000000000000000000F0
++:10D0400000000000000000000000000000000000E0
++:10D0500000000000000000000000000000000000D0
++:10D0600000000000000000000000000000000000C0
++:10D0700000000000000000000000000000000000B0
++:10D0800000000000000000000000000000000000A0
++:10D090000000000000000000000000000000000090
++:10D0A0000000000000000000000000000000000080
++:10D0B0000000000000000000000000000000000070
++:10D0C0000000000000000000000000000000000060
++:10D0D0000000000000000000000000000000000050
++:10D0E0000000000000000000000000000000000040
++:10D0F0000000000000000000000000000000000030
++:10D10000000000000000000000000000000000001F
++:10D11000000000000000000000000000000000000F
++:10D1200000000000000000000000000000000000FF
++:10D1300000000000000000000000000000000000EF
++:10D1400000000000000000000000000000000000DF
++:10D1500000000000000000000000000000000000CF
++:10D1600000000000000000000000000000000000BF
++:10D1700000000000000000000000000000000000AF
++:10D18000000000000000000000000000000000009F
++:10D19000000000000000000000000000000000008F
++:10D1A000000000000000000000000000000000007F
++:10D1B000000000000000000000000000000000006F
++:10D1C000000000000000000000000000000000005F
++:10D1D000000000000000000000000000000000004F
++:10D1E000000000000000000000000000000000003F
++:10D1F000000000000000000000000000000000002F
++:10D20000000000000000000000000000000000001E
++:10D21000000000000000000000000000000000000E
++:10D2200000000000000000000000000000000000FE
++:10D2300000000000000000000000000000000000EE
++:10D2400000000000000000000000000000000000DE
++:10D2500000000000000000000000000000000000CE
++:10D2600000000000000000000000000000000000BE
++:10D2700000000000000000000000000000000000AE
++:10D28000000000000000000000000000000000009E
++:10D29000000000000000000000000000000000008E
++:10D2A000000000000000000000000000000000007E
++:10D2B000000000000000000000000000000000006E
++:10D2C000000000000000000000000000000000005E
++:10D2D000000000000000000000000000000000004E
++:10D2E000000000000000000000000000000000003E
++:10D2F000000000000000000000000000000000002E
++:10D30000000000000000000000000000000000001D
++:10D31000000000000000000000000000000000000D
++:10D3200000000000000000000000000000000000FD
++:10D3300000000000000000000000000000000000ED
++:10D3400000000000000000000000000000000000DD
++:10D3500000000000000000000000000000000000CD
++:10D3600000000000000000000000000000000000BD
++:10D3700000000000000000000000000000000000AD
++:10D38000000000000000000000000000000000009D
++:10D39000000000000000000000000000000000008D
++:10D3A000000000000000000000000000000000007D
++:10D3B000000000000000000000000000000000006D
++:10D3C000000000000000000000000000000000005D
++:10D3D000000000000000000000000000000000004D
++:10D3E000000000000000000000000000000000003D
++:10D3F000000000000000000000000000000000002D
++:10D40000000000000000000000000000000000001C
++:10D41000000000000000000000000000000000000C
++:10D4200000000000000000000000000000000000FC
++:10D4300000000000000000000000000000000000EC
++:10D4400000000000000000000000000000000000DC
++:10D4500000000000000000000000000000000000CC
++:10D4600000000000000000000000000000000000BC
++:10D4700000000000000000000000000000000000AC
++:10D48000000000000000000000000000000000009C
++:10D49000000000000000000000000000000000008C
++:10D4A000000000000000000000000000000000007C
++:10D4B000000000000000000000000000000000006C
++:10D4C000000000000000000000000000000000005C
++:10D4D000000000000000000000000000000000004C
++:10D4E000000000000000000000000000000000003C
++:10D4F000000000000000000000000000000000002C
++:10D50000000000000000000000000000000000001B
++:10D51000000000000000000000000000000000000B
++:10D5200000000000000000000000000000000000FB
++:10D5300000000000000000000000000000000000EB
++:10D5400000000000000000000000000000000000DB
++:10D5500000000000000000000000000000000000CB
++:10D5600000000000000000000000000000000000BB
++:10D5700000000000000000000000000000000000AB
++:10D58000000000000000000000000000000000009B
++:10D59000000000000000000000000000000000008B
++:10D5A000000000000000000000000000000000007B
++:10D5B000000000000000000000000000000000006B
++:10D5C000000000000000000000000000000000005B
++:10D5D000000000000000000000000000000000004B
++:10D5E000000000000000000000000000000000003B
++:10D5F000000000000000000000000000000000002B
++:10D60000000000000000000000000000000000001A
++:10D61000000000000000000000000000000000000A
++:10D6200000000000000000000000000000000000FA
++:10D6300000000000000000000000000000000000EA
++:10D6400000000000000000000000000000000000DA
++:10D6500000000000000000000000000000000000CA
++:10D6600000000000000000000000000000000000BA
++:10D6700000000000000000000000000000000000AA
++:10D68000000000000000000000000000000000009A
++:10D69000000000000000000000000000000000008A
++:10D6A000000000000000000000000000000000007A
++:10D6B000000000000000000000000000000000006A
++:10D6C000000000000000000000000000000000005A
++:10D6D000000000000000000000000000000000004A
++:10D6E000000000000000000000000000000000003A
++:10D6F000000000000000000000000000000000002A
++:10D700000000000000000000000000000000000019
++:10D710000000000000000000000000000000000009
++:10D7200000000000000000000000000000000000F9
++:10D7300000000000000000000000000000000000E9
++:10D7400000000000000000000000000000000000D9
++:10D7500000000000000000000000000000000000C9
++:10D7600000000000000000000000000000000000B9
++:10D7700000000000000000000000000000000000A9
++:10D780000000000000000000000000000000000099
++:10D790000000000000000000000000000000000089
++:10D7A0000000000000000000000000000000000079
++:10D7B0000000000000000000000000000000000069
++:10D7C0000000000000000000000000000000000059
++:10D7D0000000000000000000000000000000000049
++:10D7E0000000000000000000000000000000000039
++:10D7F0000000000000000000000000000000000029
++:10D800000000000000000000000000000000000018
++:10D810000000000000000000000000000000000008
++:10D8200000000000000000000000000000000000F8
++:10D8300000000000000000000000000000000000E8
++:10D8400000000000000000000000000000000000D8
++:10D8500000000000000000000000000000000000C8
++:10D8600000000000000000000000000000000000B8
++:10D8700000000000000000000000000000000000A8
++:10D880000000000000000000000000000000000098
++:10D890000000000000000000000000000000000088
++:10D8A0000000000000000000000000000000000078
++:10D8B0000000000000000000000000000000000068
++:10D8C0000000000000000000000000000000000058
++:10D8D0000000000000000000000000000000000048
++:10D8E0000000000000000000000000000000000038
++:10D8F0000000000000000000000000000000000028
++:10D900000000000000000000000000000000000017
++:10D910000000000000000000000000000000000007
++:10D9200000000000000000000000000000000000F7
++:10D9300000000000000000000000000000000000E7
++:10D9400000000000000000000000000000000000D7
++:10D9500000000000000000000000000000000000C7
++:10D9600000000000000000000000000000000000B7
++:10D9700000000000000000000000000000000000A7
++:10D980000000000000000000000000000000000097
++:10D990000000000000000000000000000000000087
++:10D9A0000000000000000000000000000000000077
++:10D9B0000000000000000000000000000000000067
++:10D9C0000000000000000000000000000000000057
++:10D9D0000000000000000000000000000000000047
++:10D9E0000000000000000000000000000000000037
++:10D9F0000000000000000000000000000000000027
++:10DA00000000000000000000000000000000000016
++:10DA10000000000000000000000000000000000006
++:10DA200000000000000000000000000000000000F6
++:10DA300000000000000000000000000000000000E6
++:10DA400000000000000000000000000000000000D6
++:10DA500000000000000000000000000000000000C6
++:10DA600000000000000000000000000000000000B6
++:10DA700000000000000000000000000000000000A6
++:10DA80000000000000000000000000000000000096
++:10DA90000000000000000000000000000000000086
++:10DAA0000000000000000000000000000000000076
++:10DAB0000000000000000000000000000000000066
++:10DAC0000000000000000000000000000000000056
++:10DAD0000000000000000000000000000000000046
++:10DAE0000000000000000000000000000000000036
++:10DAF0000000000000000000000000000000000026
++:10DB00000000000000000000000000000000000015
++:10DB10000000000000000000000000000000000005
++:10DB200000000000000000000000000000000000F5
++:10DB300000000000000000000000000000000000E5
++:10DB400000000000000000000000000000000000D5
++:10DB500000000000000000000000000000000000C5
++:10DB600000000000000000000000000000000000B5
++:10DB700000000000000000000000000000000000A5
++:10DB80000000000000000000000000000000000095
++:10DB90000000000000000000000000000000000085
++:10DBA0000000000000000000000000000000000075
++:10DBB0000000000000000000000000000000000065
++:10DBC0000000000000000000000000000000000055
++:10DBD0000000000000000000000000000000000045
++:10DBE0000000000000000000000000000000000035
++:10DBF0000000000000000000000000000000000025
++:10DC00000000000000000000000000000000000014
++:10DC10000000000000000000000000000000000004
++:10DC200000000000000000000000000000000000F4
++:10DC300000000000000000000000000000000000E4
++:10DC400000000000000000000000000000000000D4
++:10DC500000000000000000000000000000000000C4
++:10DC600000000000000000000000000000000000B4
++:10DC700000000000000000000000000000000000A4
++:10DC80000000000000000000000000000000000094
++:10DC90000000000000000000000000000000000084
++:10DCA0000000000000000000000000000000000074
++:10DCB0000000000000000000000000000000000064
++:10DCC0000000000000000000000000000000000054
++:10DCD0000000000000000000000000000000000044
++:10DCE0000000000000000000000000000000000034
++:10DCF0000000000000000000000000000000000024
++:10DD00000000000000000000000000000000000013
++:10DD10000000000000000000000000000000000003
++:10DD200000000000000000000000000000000000F3
++:10DD300000000000000000000000000000000000E3
++:10DD400000000000000000000000000000000000D3
++:10DD500000000000000000000000000000000000C3
++:10DD600000000000000000000000000000000000B3
++:10DD700000000000000000000000000000000000A3
++:10DD80000000000000000000000000000000000093
++:10DD90000000000000000000000000000000000083
++:10DDA0000000000000000000000000000000000073
++:10DDB0000000000000000000000000000000000063
++:10DDC0000000000000000000000000000000000053
++:10DDD0000000000000000000000000000000000043
++:10DDE0000000000000000000000000000000000033
++:10DDF0000000000000000000000000000000000023
++:10DE00000000000000000000000000000000000012
++:10DE10000000000000000000000000000000000002
++:10DE200000000000000000000000000000000000F2
++:10DE300000000000000000000000000000000000E2
++:10DE400000000000000000000000000000000000D2
++:10DE500000000000000000000000000000000000C2
++:10DE600000000000000000000000000000000000B2
++:10DE700000000000000000000000000000000000A2
++:10DE80000000000000000000000000000000000092
++:10DE90000000000000000000000000000000000082
++:10DEA0000000000000000000000000000000000072
++:10DEB0000000000000000000000000000000000062
++:10DEC0000000000000000000000000000000000052
++:10DED0000000000000000000000000000000000042
++:10DEE0000000000000000000000000000000000032
++:10DEF0000000000000000000000000000000000022
++:10DF00000000000000000000000000000000000011
++:10DF10000000000000000000000000000000000001
++:10DF200000000000000000000000000000000000F1
++:10DF300000000000000000000000000000000000E1
++:10DF400000000000000000000000000000000000D1
++:10DF500000000000000000000000000000000000C1
++:10DF600000000000000000000000000000000000B1
++:10DF700000000000000000000000000000000000A1
++:10DF80000000000000000000000000000000000091
++:10DF90000000000000000000000000000000000081
++:10DFA0000000000000000000000000000000000071
++:10DFB0000000000000000000000000000000000061
++:10DFC0000000000000000000000000000000000051
++:10DFD0000000000000000000000000000000000041
++:10DFE0000000000000000000000000000000000031
++:10DFF0000000000000000000000000000000000021
++:10E000000000000000000000000000000000000010
++:10E010000000000000000000000000000000000000
++:10E0200000000000000000000000000000000000F0
++:10E0300000000000000000000000000000000000E0
++:10E0400000000000000000000000000000000000D0
++:10E0500000000000000000000000000000000000C0
++:10E0600000000000000000000000000000000000B0
++:10E0700000000000000000000000000000000000A0
++:10E080000000000000000000000000000000000090
++:10E090000000000000000000000000000000000080
++:10E0A0000000000000000000000000000000000070
++:10E0B0000000000000000000000000000000000060
++:10E0C0000000000000000000000000000000000050
++:10E0D0000000000000000000000000000000000040
++:10E0E0000000000000000000000000000000000030
++:10E0F0000000000000000000000000000000000020
++:10E10000000000000000000000000000000000000F
++:10E1100000000000000000000000000000000000FF
++:10E1200000000000000000000000000000000000EF
++:10E1300000000000000000000000000000000000DF
++:10E1400000000000000000000000000000000000CF
++:10E1500000000000000000000000000000000000BF
++:10E1600000000000000000000000000000000000AF
++:10E17000000000000000000000000000000000009F
++:10E18000000000000000000000000000000000008F
++:10E19000000000000000000000000000000000007F
++:10E1A000000000000000000000000000000000006F
++:10E1B000000000000000000000000000000000005F
++:10E1C000000000000000000000000000000000004F
++:10E1D000000000000000000000000000000000003F
++:10E1E000000000000000000000000000000000002F
++:10E1F000000000000000000000000000000000809F
++:10E20000000000000000000000000000000000000E
++:10E2100000000000000000000000000000000000FE
++:10E220000000000A000000000000000000000000E4
++:10E2300010000003000000000000000D0000000DB1
++:10E240003C020801244295C03C030801246397FC6A
++:10E25000AC4000000043202B1480FFFD244200044A
++:10E260003C1D080037BD9FFC03A0F0213C100800B6
++:10E27000261032103C1C0801279C95C00E0012BECF
++:10E28000000000000000000D3C02800030A5FFFFF0
++:10E2900030C600FF344301803C0880008D0901B87E
++:10E2A0000520FFFE00000000AC6400002404000212
++:10E2B000A4650008A066000AA064000BAC67001803
++:10E2C0003C03100003E00008AD0301B83C0560000A
++:10E2D0008CA24FF80440FFFE00000000ACA44FC029
++:10E2E0003C0310003C040200ACA44FC403E000084F
++:10E2F000ACA34FF89486000C00A050212488001491
++:10E3000000062B0200051080004448210109182B4B
++:10E310001060001100000000910300002C6400094F
++:10E320005080000991190001000360803C0D080134
++:10E3300025AD9258018D58218D67000000E000083E
++:10E340000000000091190001011940210109302B42
++:10E3500054C0FFF29103000003E000080000102108
++:10E360000A000CCC25080001910F0001240E000AC0
++:10E3700015EE00400128C8232F38000A1700003D81
++:10E38000250D00028D580000250F0006370E0100F4
++:10E39000AD4E0000910C000291AB000191A400026F
++:10E3A00091A60003000C2E00000B3C0000A71025D6
++:10E3B00000041A000043C8250326C025AD580004F8
++:10E3C000910E000691ED000191E7000291E5000336
++:10E3D000000E5E00000D6400016C30250007220075
++:10E3E00000C41025004518252508000A0A000CCC99
++:10E3F000AD430008910F000125040002240800022B
++:10E4000055E80001012020210A000CCC00804021A9
++:10E41000910C0001240B0003158B00160000000076
++:10E420008D580000910E000225080003370D0008EA
++:10E43000A14E00100A000CCCAD4D00009119000156
++:10E44000240F0004172F000B0000000091070002AA
++:10E45000910400038D43000000072A0000A410254A
++:10E460003466000425080004AD42000C0A000CCC00
++:10E47000AD46000003E000082402000127BDFFE8CC
++:10E48000AFBF0014AFB000100E00164E0080802108
++:10E490003C0480083485008090A600052403FFFE1C
++:10E4A0000200202100C310248FBF00148FB0001081
++:10E4B000A0A200050A00165827BD001827BDFFE8D6
++:10E4C000AFB00010AFBF00140E000FD40080802149
++:10E4D0003C06800834C5008090A40000240200504F
++:10E4E000308300FF106200073C09800002002021F9
++:10E4F0008FBF00148FB00010AD2001800A00108F74
++:10E5000027BD0018240801003C07800002002021DC
++:10E510008FBF00148FB00010ACE801800A00108F8C
++:10E5200027BD001827BDFF783C058008AFBE0080DE
++:10E53000AFB7007CAFB3006CAFB10064AFBF008475
++:10E54000AFB60078AFB50074AFB40070AFB200687A
++:10E55000AFB0006034A600803C0580008CB201287A
++:10E5600090C400098CA701043C020001309100FF17
++:10E5700000E218240000B8210000F021106000071C
++:10E58000000098213C0908008D2931F02413000176
++:10E59000252800013C010800AC2831F0ACA0008423
++:10E5A00090CC0005000C5827316A0001154000721C
++:10E5B000AFA0005090CD00002406002031A400FF41
++:10E5C00010860018240E0050108E009300000000EA
++:10E5D0003C1008008E1000DC260F00013C010800F2
++:10E5E000AC2F00DC0E0016C7000000000040182110
++:10E5F0008FBF00848FBE00808FB7007C8FB60078FD
++:10E600008FB500748FB400708FB3006C8FB2006848
++:10E610008FB100648FB000600060102103E000083B
++:10E6200027BD00880000000D3C1F8000AFA0003017
++:10E6300097E501168FE201043C04002030B9FFFF8A
++:10E64000004438240007182B00033140AFA60030E7
++:10E650008FF5010437F80C003C1600400338802188
++:10E6600002B6A02434C40040128000479215000D69
++:10E6700032A800201500000234860080008030217E
++:10E6800014C0009FAFA600303C0D800835A6008066
++:10E6900090CC0008318B0040516000063C06800899
++:10E6A000240E0004122E00A8240F0012122F003294
++:10E6B0003C06800834C401003C0280009447011AE3
++:10E6C0009619000E909F00088E18000830E3FFFF97
++:10E6D00003F9B00432B40004AFB6005CAFA3005835
++:10E6E0008E1600041280002EAFB8005434C3008090
++:10E6F000906800083105004014A0002500000000CB
++:10E700008C70005002D090230640000500000000ED
++:10E710008C71003402D1A82306A201678EE20008A2
++:10E72000126000063C1280003C1508008EB531F4E2
++:10E7300026B600013C010800AC3631F4AE4000447E
++:10E74000240300018FBF00848FBE00808FB7007C40
++:10E750008FB600788FB500748FB400708FB3006CE3
++:10E760008FB200688FB100648FB00060006010212C
++:10E7700003E0000827BD00880E000D2800002021BE
++:10E780000A000D75004018210A000D9500C02021D7
++:10E790000E00171702C020211440FFE10000000006
++:10E7A0003C0B8008356400808C8A003402CA482300
++:10E7B0000520001D000000003C1E08008FDE310017
++:10E7C00027D700013C010800AC3731001260000679
++:10E7D000024020213C1408008E9431F42690000160
++:10E7E0003C010800AC3031F40E00164E3C1E80088F
++:10E7F00037CD008091B700250240202136EE00047D
++:10E800000E001658A1AE00250E000CAC02402021CF
++:10E810000A000DCA240300013C17080126F796C020
++:10E820000A000D843C1F80008C86003002C66023E5
++:10E830001980000C2419000C908F004F3C14080024
++:10E840008E94310032B500FC35ED0001268E0001BA
++:10E850003C010800AC2E3100A08D004FAFA0005845
++:10E860002419000CAFB900308C9800300316A02397
++:10E870001A80010B8FA300580074F82A17E0FFD309
++:10E88000000000001074002A8FA5005802D4B021A7
++:10E8900000B410233044FFFFAFA4005832A8000298
++:10E8A0001100002E32AB00103C15800836B00080FD
++:10E8B0009216000832D30040526000FB8EE200083E
++:10E8C0000E00164E02402021240A0018A20A000958
++:10E8D000921100052409FFFE024020210229902404
++:10E8E0000E001658A2120005240400390000282149
++:10E8F0000E0016F2240600180A000DCA24030001B7
++:10E9000092FE000C3C0A800835490080001EBB00C6
++:10E910008D27003836F10081024020213225F08118
++:10E920000E000C9B30C600FF0A000DC10000000065
++:10E930003AA7000130E300011460FFA402D4B02123
++:10E940000A000E1D00000000024020210E001734B6
++:10E95000020028210A000D75004018211160FF7087
++:10E960003C0F80083C0D800835EE00808DC40038D7
++:10E970008FA300548DA60004006660231D80FF68ED
++:10E98000000000000064C02307020001AFA400548F
++:10E990003C1F08008FFF31E433F9000113200015FC
++:10E9A0008FAC00583C07800094E3011A10600012FD
++:10E9B0003C0680080E00216A024020213C03080129
++:10E9C000906396F13064000214800145000000005D
++:10E9D000306C0004118000078FAC0058306600FBDB
++:10E9E0003C010801A02696F132B500FCAFA000580A
++:10E9F0008FAC00583C06800834D30080AFB40018B8
++:10EA0000AFB60010AFAC00143C088000950B01209D
++:10EA10008E6F0030966A005C8FA3005C8FBF003061
++:10EA20003169FFFF3144FFFF8FAE005401341021E4
++:10EA3000350540000064382B0045C82103E7C02598
++:10EA4000AFB90020AFAF0028AFB80030AFAF00249F
++:10EA5000AFA0002CAFAE0034926D000831B40008B6
++:10EA6000168000BB020020218EE200040040F8095D
++:10EA700027A400108FAF003031F300025660000170
++:10EA800032B500FE3C048008349F008093F90008F2
++:10EA900033380040530000138FA400248C850004F9
++:10EAA0008FA7005410A700D52404001432B0000131
++:10EAB0001200000C8FA400242414000C1234011A3C
++:10EAC0002A2D000D11A001022413000E240E000AAD
++:10EAD000522E0001241E00088FAF002425E40001FF
++:10EAE000AFA400248FAA00143C0B80083565008079
++:10EAF000008A48218CB10030ACA9003090A4004EAF
++:10EB00008CA700303408FFFF0088180400E3F821C8
++:10EB1000ACBF00348FA600308FB900548FB8005CB2
++:10EB200030C200081040000B033898218CAC002044
++:10EB3000119300D330C600FF92EE000C8FA7003473
++:10EB400002402021000E6B0035B400800E000C9BAB
++:10EB50003285F0803C028008345000808E0F0030F7
++:10EB600001F1302318C00097264800803C070800B8
++:10EB70008CE731E42404FF80010418243118007F5D
++:10EB80003C1F80003C19800430F10001AFE300908D
++:10EB900012200006031928213C030801906396F116
++:10EBA00030690008152000C6306A00F73C10800864
++:10EBB00036040080908C004F318B000115600042BC
++:10EBC000000000003C0608008CC6319830CE0010D2
++:10EBD00051C0004230F9000190AF006B55E0003F9A
++:10EBE00030F9000124180001A0B8006B3C1180002E
++:10EBF0009622007A24470064A48700123C0D800806
++:10EC000035A5008090B40008329000401600000442
++:10EC10003C03800832AE000115C0008B00000000EC
++:10EC2000346400808C86002010D3000A3463010015
++:10EC30008C67000002C7782319E000978FBF00544B
++:10EC4000AC93002024130001AC760000AFB3005059
++:10EC5000AC7F000417C0004E000000008FA90050D8
++:10EC60001520000B000000003C030801906396F1A2
++:10EC7000306A00011140002E8FAB0058306400FE56
++:10EC80003C010801A02496F10A000D75000018212E
++:10EC90000E000CAC024020210A000F1300000000FF
++:10ECA0000A000E200000A0210040F80924040017EB
++:10ECB0000A000DCA240300010040F80924040016CC
++:10ECC0000A000DCA240300019094004F240DFFFE9A
++:10ECD000028D2824A085004F30F900011320000682
++:10ECE0003C0480083C030801906396F1307F0010DB
++:10ECF00017E00051306800EF34900080240A0001D2
++:10ED0000024020210E00164EA60A00129203002592
++:10ED100024090001AFA90050346200010240202103
++:10ED20000E001658A20200250A000EF93C0D8008BC
++:10ED30001160FE83000018218FA5003030AC000464
++:10ED40001180FE2C8FBF00840A000DCB240300012C
++:10ED500027A500380E000CB6AFA000385440FF4382
++:10ED60008EE200048FB40038329001005200FF3F61
++:10ED70008EE200048FA3003C8E6E0058006E682364
++:10ED800005A3FF39AE6300580A000E948EE200041A
++:10ED90000E00164E024020213C038008346800809B
++:10EDA000024020210E001658A11E000903C0302188
++:10EDB000240400370E0016F2000028210A000F116B
++:10EDC0008FA900508FAB00185960FF8D3C0D800853
++:10EDD0000E00164E02402021920C00252405000151
++:10EDE000AFA5005035820004024020210E001658C5
++:10EDF000A20200250A000EF93C0D800812240059D9
++:10EE00002A2300151060004D240900162408000C68
++:10EE10005628FF2732B000013C0A8008914C001BA5
++:10EE20002406FFBD241E000E01865824A14B001BA2
++:10EE30000A000EA532B000013C010801A02896F19D
++:10EE40000A000EF93C0D80088CB500308EFE0008DB
++:10EE50002404001826B6000103C0F809ACB600303F
++:10EE60003C030801906396F13077000116E0FF81C2
++:10EE7000306A00018FB200300A000D753243000481
++:10EE80003C1080009605011A50A0FF2B34C60010DC
++:10EE90000A000EC892EE000C8C6200001456FF6D42
++:10EEA000000000008C7800048FB9005403388823D8
++:10EEB0000621FF638FBF00540A000F0E0000000000
++:10EEC0003C010801A02A96F10A000F3030F9000138
++:10EED0001633FF028FAF00240A000EB0241E00106C
++:10EEE0000E00164E024020213C0B80083568008041
++:10EEF00091090025240A0001AFAA0050353300040F
++:10EF0000024020210E001658A11300253C050801DF
++:10EF100090A596F130A200FD3C010801A02296F1D7
++:10EF20000A000E6D004018212411000E53D1FEEA94
++:10EF3000241E00100A000EAF241E00165629FEDC07
++:10EF400032B000013C0A8008914C001B2406FFBD32
++:10EF5000241E001001865824A14B001B0A000EA598
++:10EF600032B000010A000EA4241E00123C038000EF
++:10EF70008C6201B80440FFFE24040800AC6401B8B0
++:10EF800003E000080000000030A5FFFF30C6FFFFCF
++:10EF90003C0780008CE201B80440FFFE34EA0180A7
++:10EFA000AD440000ACE400203C0480089483004899
++:10EFB0003068FFFF11000016AF88000824AB001274
++:10EFC000010B482B512000133C04800034EF01005A
++:10EFD00095EE00208F890000240D001A31CCFFFF30
++:10EFE00031274000A14D000B10E000362583FFFEC5
++:10EFF0000103C02B170000348F9900048F88000490
++:10F00000A5430014350700010A001003AF87000470
++:10F010003C04800024030003348201808F890000B7
++:10F020008F870004A043000B3C088000350C018052
++:10F03000A585000EA585001A8F85000C30EB800099
++:10F04000A5890010AD850028A58600081160000F75
++:10F050008F85001435190100972A00163158FFFCDE
++:10F06000270F000401E870218DCD400031A6FFFF7D
++:10F0700014C000072403BFFF3C02FFFF34487FFF9A
++:10F0800000E83824AF8700048F8500142403BFFFF5
++:10F090003C04800000E3582434830180A46B0026E4
++:10F0A000AC69002C10A0000300054C02A465001000
++:10F0B000A46900263C071000AC8701B803E00008F3
++:10F0C000000000008F990004240AFFFE032A382460
++:10F0D0000A001003AF87000427BDFFE88FA20028B5
++:10F0E00030A5FFFF30C6FFFFAFBF0010AF87000C99
++:10F0F000AF820014AF8000040E000FDBAF80000071
++:10F100008FBF001027BD001803E00008AF80001477
++:10F110003C06800034C4007034C701008C8A0000B3
++:10F1200090E500128F84000027BDFFF030A300FFA0
++:10F13000000318823082400010400037246500032D
++:10F140000005C8800326C0218F0E4000246F0004F4
++:10F15000000F6880AFAE000001A660218D8B4000DB
++:10F16000AFAB000494E900163128FFFC01063821FA
++:10F170008CE64000AFA600088FA9000800003021EF
++:10F18000000028213C07080024E701000A0010675E
++:10F19000240800089059000024A500012CAC000CA4
++:10F1A0000079C0210018788001E770218DCD000022
++:10F1B0001180000600CD302603A5102114A8FFF50C
++:10F1C00000051A005520FFF4905900003C0480000F
++:10F1D000348700703C0508008CA531048CE30000E6
++:10F1E0002CA2002010400009006A38230005488046
++:10F1F0003C0B0800256B3108012B402124AA00019B
++:10F20000AD0700003C010800AC2A310400C0102109
++:10F2100003E0000827BD0010308220001040000BE2
++:10F2200000055880016648218D24400024680004B0
++:10F2300000083880AFA4000000E618218C6540006B
++:10F24000AFA000080A001057AFA500040000000D91
++:10F250000A0010588FA9000827BDFFE03C07800076
++:10F2600034E60100AFBF001CAFB20018AFB100140C
++:10F27000AFB0001094C5000E8F87000030A4FFFFD0
++:10F280002483000430E2400010400010AF830028C7
++:10F290003C09002000E940241100000D30EC800002
++:10F2A0008F8A0004240BBFFF00EB38243543100085
++:10F2B000AF87000030F220001640000B3C1900041C
++:10F2C000241FFFBF0A0010B7007F102430EC80001D
++:10F2D000158000423C0E002030F220001240FFF862
++:10F2E0008F8300043C19000400F9C0241300FFF5CB
++:10F2F000241FFFBF34620040AF82000430E20100EF
++:10F300001040001130F010008F83002C10600006B8
++:10F310003C0F80003C05002000E52024148000C044
++:10F320003C0800043C0F800035EE010095CD001E26
++:10F3300095CC001C31AAFFFF000C5C00014B482556
++:10F34000AF89000C30F010001200000824110001F9
++:10F3500030F100201620008B3C18100000F890249B
++:10F36000164000823C040C002411000130E801002A
++:10F370001500000B3C0900018F85000430A94000F6
++:10F38000152000073C0900013C0C1F0100EC58242B
++:10F390003C0A1000116A01183C1080003C09000171
++:10F3A00000E9302410C000173C0B10003C18080086
++:10F3B0008F1800243307000214E0014024030001E9
++:10F3C0008FBF001C8FB200188FB100148FB00010D7
++:10F3D0000060102103E0000827BD002000EE682433
++:10F3E00011A0FFBE30F220008F8F00043C11FFFF00
++:10F3F00036307FFF00F0382435E380000A0010A685
++:10F40000AF87000000EB102450400065AF8000245F
++:10F410008F8C002C3C0D0F0000ED18241580008807
++:10F42000AF83001030E8010011000086938F0010B8
++:10F430003C0A0200106A00833C1280003650010032
++:10F44000920500139789002A3626000230AF00FF8C
++:10F4500025EE0004000E19C03C0480008C9801B811
++:10F460000700FFFE34880180AD0300003C198008CE
++:10F47000AC830020973100483225FFFF10A0015CCB
++:10F48000AF8500082523001200A3F82B53E0015993
++:10F490008F850004348D010095AC00202402001AF1
++:10F4A00030E44000318BFFFFA102000B108001927D
++:10F4B0002563FFFE00A3502B154001908F8F0004A1
++:10F4C000A50300148F88000435050001AF850004F2
++:10F4D0003C08800035190180A729000EA729001AD1
++:10F4E0008F89000C30B18000A7270010AF290028B9
++:10F4F000A72600081220000E3C04800035020100FF
++:10F50000944C0016318BFFFC256400040088182100
++:10F510008C7F400033E6FFFF14C000053C048000F0
++:10F520003C0AFFFF354D7FFF00AD2824AF85000466
++:10F53000240EBFFF00AE402434850180A4A800261D
++:10F54000ACA7002C3C071000AC8701B800001821C4
++:10F550008FBF001C8FB200188FB100148FB0001045
++:10F560000060102103E0000827BD00203C020BFFD3
++:10F5700000E41824345FFFFF03E3C82B5320FF7B14
++:10F58000241100013C0608008CC6002C24C5000193
++:10F590003C010800AC25002C0A0010D42411000501
++:10F5A0008F85002410A0002FAF80001090A30000D2
++:10F5B000146000792419000310A0002A30E601002D
++:10F5C00010C000CC8F860010241F000210DF00C97D
++:10F5D0008F8B000C3C0708008CE7003824E4FFFF09
++:10F5E00014E0000201641824000018213C0D0800FA
++:10F5F00025AD0038006D1021904C00048F85002847
++:10F6000025830004000321C030A5FFFF3626000239
++:10F610000E000FDB000000000A00114D0000182151
++:10F6200000E8302414C0FF403C0F80000E00103D65
++:10F63000000000008F8700000A0010CAAF82000C93
++:10F64000938F00103C18080127189640000F90C0B7
++:10F6500002588021AF9000248F85002414A0FFD38E
++:10F66000AF8F00103C0480008C86400030C5010044
++:10F6700010A000BC322300043C0C08008D8C002438
++:10F6800024120004106000C23190000D3C04800080
++:10F690008C8D40003402FFFF11A201003231FFFBCC
++:10F6A0008C884000310A01005540000124110010EF
++:10F6B00030EE080011C000BE2419FFFB8F9800280F
++:10F6C0002F0F03EF51E000010219802430E90100FF
++:10F6D00011200014320800018F87002C14E000FB79
++:10F6E0008F8C000C3C05800034AB0100917F00132F
++:10F6F00033E300FF246A00042403FFFE0203802496
++:10F70000000A21C012000002023230253226FFFF1B
++:10F710000E000FDB9785002A1200FF290000182138
++:10F72000320800011100000D32180004240E0001FF
++:10F73000120E0002023230253226FFFF9785002A82
++:10F740000E000FDB00002021240FFFFE020F80249B
++:10F750001200FF1B00001821321800045300FF188C
++:10F760002403000102323025241200045612000145
++:10F770003226FFFF9785002A0E000FDB24040100CC
++:10F780002419FFFB021988241220FF0D0000182104
++:10F790000A0010E9240300011079009C00003021C8
++:10F7A00090AD00012402000211A200BE30EA004028
++:10F7B00090B90001241800011338007F30E900409F
++:10F7C0008CA600049785002A00C020210E000FDBC4
++:10F7D0003626000200004021010018218FBF001CC6
++:10F7E0008FB200188FB100148FB00010006010218C
++:10F7F00003E0000827BD0020360F010095EE000C45
++:10F8000031CD020015A0FEE63C0900013C1880083D
++:10F81000971200489789002A362600023248FFFFD7
++:10F82000AF8800083C0380008C7101B80620FFFE01
++:10F83000346A0180AD4000001100008E3C0F800052
++:10F84000253F0012011FC82B1320008B240E00033C
++:10F85000346C0100958B00202402001A30E4400033
++:10F860003163FFFFA142000B108000A72463FFFE5D
++:10F870000103682B15A000A52408FFFE34A5000194
++:10F88000A5430014AF8500043C0480002412BFFF90
++:10F8900000B2802434850180A4A9000EA4A9001A16
++:10F8A000A4A60008A4B00026A4A700103C071000DE
++:10F8B000AC8701B80A00114D000018213C038000FC
++:10F8C00034640100949F000E3C1908008F3900D861
++:10F8D0002404008033E5FFFF273100013C010800CC
++:10F8E000AC3100D80E000FDB240600030A00114DD6
++:10F8F00000001821240A000210CA00598F85002830
++:10F900003C0308008C6300D0240E0001106E005EE2
++:10F910002CCF000C24D2FFFC2E5000041600002136
++:10F9200000002021241800021078001B2CD9000CA4
++:10F9300024DFFFF82FE900041520FF330000202109
++:10F9400030EB020051600004000621C054C00022C8
++:10F9500030A5FFFF000621C030A5FFFF0A00117D82
++:10F96000362600023C0908008D29002431300001B0
++:10F970005200FEF7000018219785002A3626000263
++:10F980000E000FDB000020210A00114D000018219D
++:10F990000A00119C241200021320FFE624DFFFF866
++:10F9A0000000202130A5FFFF0A00117D362600024D
++:10F9B0000A0011AC021980245120FF828CA6000499
++:10F9C0003C05080190A5964110A0FF7E2408000187
++:10F9D0000A0011F0010018210E000FDB3226000191
++:10F9E0008F8600108F8500280A00124F000621C064
++:10F9F0008F8500043C18800024120003371001801A
++:10FA0000A212000B0A00112E3C08800090A30001F6
++:10FA1000241100011071FF70240800012409000264
++:10FA20005069000430E60040240800010A0011F08B
++:10FA30000100182150C0FFFD240800013C0C80008B
++:10FA4000358B01009563001094A40002307FFFFF06
++:10FA5000509FFF62010018210A001284240800014F
++:10FA60002CA803EF1100FE56240300010A001239EE
++:10FA700000000000240E000335EA0180A14E000BB7
++:10FA80000A00121C3C04800011E0FFA2000621C005
++:10FA900030A5FFFF0A00117D362600020A0011A5DD
++:10FAA000241100201140FFC63C1280003650010096
++:10FAB000960F001094AE000231E80FFF15C8FFC08A
++:10FAC000000000000A0011E690B900013C060800A1
++:10FAD0008CC6003824C4FFFF14C00002018418241F
++:10FAE000000018213C0D080025AD0038006D1021E4
++:10FAF0000A0011B6904300048F8F0004240EFFFE0D
++:10FB00000A00112C01EE28242408FFFE0A00121A14
++:10FB100000A8282427BDFFC8AFB00010AFBF003435
++:10FB20003C10600CAFBE0030AFB7002CAFB6002861
++:10FB3000AFB50024AFB40020AFB3001CAFB20018C3
++:10FB4000AFB100148E0E5000240FFF7F3C068000E2
++:10FB500001CF682435AC380C240B0003AE0C5000E8
++:10FB6000ACCB00083C010800AC2000200E001819A6
++:10FB7000000000003C0A0010354980513C06601628
++:10FB8000AE09537C8CC700003C0860148D0500A0B2
++:10FB90003C03FFFF00E320243C02535300051FC237
++:10FBA0001482000634C57C000003A08002869821E0
++:10FBB0008E7200043C116000025128218CBF007C31
++:10FBC0008CA200783C1E600037C420203C05080150
++:10FBD00024A59288AF820018AF9F001C0E0016DD8E
++:10FBE0002406000A3C190001273996403C01080010
++:10FBF000AC3931DC0E0020DDAF8000148FD708084F
++:10FC00002418FFF03C15570902F8B02412D502F56C
++:10FC100024040001AF80002C3C1480003697018042
++:10FC20003C1E080127DE9644369301008E900000AA
++:10FC30003205000310A0FFFD3207000110E000882C
++:10FC4000320600028E7100283C048000AE91002034
++:10FC50008E6500048E66000000A0382100C040219F
++:10FC60008C8301B80460FFFE3C0B0010240A0800DE
++:10FC700000AB4824AC8A01B8552000E0240BBFFF3C
++:10FC80009675000E3C1208008E52002030AC4000E9
++:10FC900032AFFFFF264E000125ED00043C010800B5
++:10FCA000AC2E0020118000E8AF8D00283C18002009
++:10FCB00000B8B02412C000E530B980002408BFFFAE
++:10FCC00000A8382434C81000AF87000030E62000B8
++:10FCD00010C000E92409FFBF3C03000400E328240E
++:10FCE00010A00002010910243502004030EA010092
++:10FCF00011400010AF8200048F8B002C11600007B0
++:10FD00003C0D002000ED6024118000043C0F000435
++:10FD100000EF702411C00239000000009668001E38
++:10FD20009678001C3115FFFF0018B40002B690252C
++:10FD3000AF92000C30F910001320001324150001BD
++:10FD400030FF002017E0000A3C04100000E41024FB
++:10FD50001040000D3C0A0C003C090BFF00EA18247F
++:10FD60003525FFFF00A3302B10C0000830ED010047
++:10FD70003C0C08008D8C002C24150005258B0001FF
++:10FD80003C010800AC2B002C30ED010015A0000B4D
++:10FD90003C0500018F85000430AE400055C00007CF
++:10FDA0003C0500013C161F0100F690243C0F10009A
++:10FDB000124F01CE000000003C05000100E5302498
++:10FDC00010C000AF3C0C10003C1F08008FFF002447
++:10FDD00033E90002152000712403000100601021A6
++:10FDE000104000083C0680003C08800035180100E7
++:10FDF0008F0F00243C056020ACAF00140000000011
++:10FE00003C0680003C194000ACD9013800000000DD
++:10FE10005220001332060002262B0140262C0080BF
++:10FE2000240EFF80016E2024018E6824000D1940ED
++:10FE3000318A007F0004A9403172007F3C16200007
++:10FE400036C20002006A482502B2382500E2882541
++:10FE50000122F825ACDF0830ACD1083032060002B0
++:10FE600010C0FF723C188000370501408CA80000CC
++:10FE700024100040AF08002090AF000831E300706C
++:10FE8000107000D428790041532000082405006038
++:10FE9000241100201071000E3C0A40003C09800033
++:10FEA000AD2A01780A001304000000001465FFFB6E
++:10FEB0003C0A40000E001FFA000000003C0A40000F
++:10FEC0003C098000AD2A01780A00130400000000FC
++:10FED00090A90009241F00048CA70000312800FF0E
++:10FEE000111F01B22503FFFA2C7200061240001404
++:10FEF0003C0680008CA9000494A4000A310500FF90
++:10FF000000095E022D6A00083086FFFF15400002DE
++:10FF10002567000424070003240C000910AC01FA33
++:10FF200028AD000A11A001DE2410000A240E0008EA
++:10FF300010AE0028000731C000C038213C06800008
++:10FF40008CD501B806A0FFFE34D20180AE47000078
++:10FF500034CB0140916E0008240300023C0A4000AB
++:10FF600031C400FF00046A0001A86025A64C000807
++:10FF7000A243000B9562000A3C0810003C09800077
++:10FF8000A64200108D670004AE470024ACC801B83B
++:10FF9000AD2A01780A001304000000003C0A80002A
++:10FFA000354401009483000E3C0208008C4200D8C6
++:10FFB000240400803065FFFF245500013C01080047
++:10FFC000AC3500D80E000FDB240600030A001370C6
++:10FFD000000018210009320230D900FF2418000166
++:10FFE0001738FFD5000731C08F910020262200016D
++:10FFF000AF8200200A0013C800C0382100CB2024A3
++:020000021000EC
++:10000000AF85000010800008AF860004240D87FF34
++:1000100000CD6024158000083C0E006000AE302446
++:1000200010C00005000000000E000D42000000009E
++:100030000A001371000000000E0016050000000009
++:100040000A0013710000000030B980005320FF1F28
++:10005000AF8500003C02002000A2F82453E0FF1B03
++:10006000AF8500003C07FFFF34E47FFF00A4382485
++:100070000A00132B34C880000A001334010910242D
++:1000800000EC58245160005AAF8000248F8D002C62
++:100090003C0E0F0000EE182415A00075AF83001071
++:1000A00030EF010011E00073939800103C12020041
++:1000B000107200703C06800034D9010093280013B0
++:1000C0009789002A36A60002311800FF271600047F
++:1000D000001619C03C0480008C8501B804A0FFFE06
++:1000E00034880180AD0300003C158008AC830020FB
++:1000F00096BF004833E5FFFF10A001BCAF850008A4
++:100100002523001200A3102B504001B98F85000455
++:10011000348D010095AC0020240B001A30E440001F
++:10012000318AFFFFA10B000B108001BA2543FFFEAF
++:1001300000A3702B15C001B88F9600048F8F0004A8
++:10014000A503001435E50001AF8500043C088000DC
++:1001500035150180A6A9000EA6A9001A8F89000CEA
++:1001600030BF8000A6A70010AEA90028A6A60008F0
++:1001700013E0000F3C0F8000350C0100958B00163A
++:10018000316AFFFC25440004008818218C6240007D
++:100190003046FFFF14C000072416BFFF3C0EFFFFD0
++:1001A00035CD7FFF00AD2824AF8500043C0F8000D3
++:1001B0002416BFFF00B6902435E50180A4B20026C6
++:1001C000ACA7002C3C071000ADE701B80A00137083
++:1001D000000018210E00165D000000003C0A4000DF
++:1001E0003C098000AD2A01780A00130400000000D9
++:1001F0008F85002410A00027AF80001090A300007E
++:10020000106000742409000310690101000030210E
++:1002100090AE0001240D000211CD014230EF0040EC
++:1002200090A90001241F0001113F000930E20040A5
++:100230008CA600049785002A00C020210E000FDB49
++:1002400036A60002000040210A00137001001821A8
++:100250005040FFF88CA600043C07080190E7964147
++:1002600010E0FFF4240800010A00137001001821B7
++:10027000939800103C1F080127FF96400018C8C043
++:10028000033F4021AF8800248F85002414A0FFDBAA
++:10029000AF9800103C0480008C86400030C50100FF
++:1002A00010A0008732AB00043C0C08008D8C0024A9
++:1002B00024160004156000033192000D241600027C
++:1002C0003C0480008C8E4000340DFFFF11CD0113E3
++:1002D00032B5FFFB8C984000330F010055E0000160
++:1002E0002415001030E80800110000382409FFFB35
++:1002F0008F9F00282FF903EF53200001024990241B
++:1003000030E2010010400014325F00018F87002CA2
++:1003100014E0010E8F8C000C3C0480003486010038
++:1003200090C5001330AA00FF25430004000321C03C
++:100330002419FFFE025990241240000202B6302513
++:1003400032A6FFFF0E000FDB9785002A1240FEA3A6
++:1003500000001821325F000113E0000D3247000455
++:10036000240900011249000202B6302532A6FFFF1F
++:100370009785002A0E000FDB000020212402FFFEDB
++:10038000024290241240FE950000182132470004DA
++:1003900050E0FE922403000102B63025241600042A
++:1003A0005656000132A6FFFF9785002A0E000FDB8C
++:1003B000240401002403FFFB0243A82412A0FE87AB
++:1003C000000018210A001370240300010A0014B968
++:1003D0000249902410A0FFAF30E5010010A00017E3
++:1003E0008F8600102403000210C300148F84000CB9
++:1003F0003C0608008CC6003824CAFFFF14C0000267
++:10040000008A1024000010213C0E080025CE003880
++:10041000004E682191AC00048F850028258B0004D4
++:10042000000B21C030A5FFFF36A600020E000FDB37
++:10043000000000000A00137000001821240F0002C1
++:1004400010CF0088241600013C0308008C6300D004
++:100450001076008D8F85002824D9FFFC2F280004FA
++:100460001500006300002021241F0002107F005DA2
++:100470002CC9000C24C3FFF82C6200041440FFE9CF
++:100480000000202130EA020051400004000621C093
++:1004900054C0000530A5FFFF000621C030A5FFFFB6
++:1004A0000A00150436A600020E000FDB32A600017A
++:1004B0008F8600108F8500280A001520000621C0B5
++:1004C0003C0A08008D4A0024315200015240FE438C
++:1004D000000018219785002A36A600020E000FDBC7
++:1004E000000020210A001370000018219668000CFB
++:1004F000311802005700FE313C0500013C1F800806
++:1005000097F900489789002A36A600023328FFFF92
++:10051000AF8800083C0380008C7501B806A0FFFE80
++:100520003C04800034820180AC400000110000B621
++:1005300024180003252A0012010A182B106000B2AB
++:1005400000000000966F00203C0E8000240D001A71
++:1005500031ECFFFF35CA018030EB4000A14D000BAC
++:10056000116000B02583FFFE0103902B164000AE02
++:100570002416FFFE34A50001A5430014AF85000436
++:100580002419BFFF00B94024A6E9000EA6E9001A0D
++:10059000A6E60008A6E80026A6E700103C07100023
++:1005A000AE8701B80A001370000018213C048000D7
++:1005B0008C8201B80440FFFE349601802415001C93
++:1005C000AEC70000A2D5000B3C071000AC8701B8F5
++:1005D0003C0A40003C098000AD2A01780A0013045F
++:1005E000000000005120FFA424C3FFF800002021D8
++:1005F00030A5FFFF0A00150436A600020E00103DCC
++:10060000000000008F8700000A001346AF82000C34
++:1006100090A30001241500011075FF0B24080001B0
++:10062000240600021066000430E2004024080001A5
++:100630000A001370010018215040FFFD240800013A
++:100640003C0C8000358B0100956A001094A40002D8
++:100650003143FFFF5083FDE1010018210A00158599
++:10066000240800018F8500282CB203EF1240FDDB27
++:10067000240300013C0308008C6300D02416000111
++:100680001476FF7624D9FFFC2CD8000C1300FF72DF
++:10069000000621C030A5FFFF0A00150436A600029F
++:1006A00010B00037240F000B14AFFE23000731C039
++:1006B000312600FF00065600000A4E0305220047BF
++:1006C00030C6007F0006F8C03C16080126D69640CA
++:1006D00003F68021A2000001A20000003C0F600090
++:1006E0008DF918202405000100C588040011302769
++:1006F0000326C024000731C000C03821ADF81820FF
++:100700000A0013C8A60000028F850020000731C030
++:1007100024A2FFFF0A0013F6AF8200200A0014B2E1
++:100720002415002011E0FECC3C1980003728010080
++:100730009518001094B6000233120FFF16D2FEC6B1
++:10074000000000000A00148290A900013C0B080080
++:100750008D6B0038256DFFFF15600002018D1024A0
++:10076000000010213C080800250800380048C0217E
++:10077000930F000425EE00040A0014C5000E21C0EA
++:1007800000065202241F00FF115FFDEB000731C07D
++:10079000000A20C03C0E080125CE9640008EA821FC
++:1007A000009E602100095C02240D00013C076000EE
++:1007B000A2AD0000AD860000A2AB00018CF21820B3
++:1007C00024030001014310040242B025ACF61820B6
++:1007D00000C038210A0013C8A6A900020A0015AA01
++:1007E000AF8000200A0012FFAF84002C8F85000428
++:1007F0003C1980002408000337380180A308000B4F
++:100800000A00144D3C088000A2F8000B0A00155A9B
++:100810002419BFFF8F9600042412FFFE0A00144B18
++:1008200002D228242416FFFE0A00155800B62824F8
++:100830003C038000346401008C85000030A2003E3F
++:100840001440000800000000AC6000488C870000E5
++:1008500030E607C010C0000500000000AC60004C8E
++:10086000AC60005003E0000824020001AC600054BA
++:10087000AC6000408C880000310438001080FFF923
++:10088000000000002402000103E00008AC60004406
++:100890003C0380008C6201B80440FFFE3467018095
++:1008A000ACE4000024080001ACE00004A4E500086A
++:1008B00024050002A0E8000A34640140A0E5000B12
++:1008C0009483000A14C00008A4E30010ACE00024E4
++:1008D0003C07800034E901803C041000AD20002872
++:1008E00003E00008ACE401B88C8600043C0410006E
++:1008F000ACE600243C07800034E90180AD200028EC
++:1009000003E00008ACE401B83C0680008CC201B8EA
++:100910000440FFFE34C7018024090002ACE400005B
++:10092000ACE40004A4E50008A0E9000A34C50140D5
++:10093000A0E9000B94A8000A3C041000A4E80010F1
++:10094000ACE000248CA30004ACE3002803E0000822
++:10095000ACC401B83C039000346200010082202541
++:100960003C038000AC6400208C65002004A0FFFEE6
++:100970000000000003E00008000000003C028000CE
++:10098000344300010083202503E00008AC4400202C
++:1009900027BDFFE03C098000AFBF0018AFB10014D5
++:1009A000AFB00010352801408D10000091040009FF
++:1009B0009107000891050008308400FF30E600FF31
++:1009C00000061A002C820081008330251040002A86
++:1009D00030A50080000460803C0D080125AD92B078
++:1009E000018D58218D6A00000140000800000000C0
++:1009F0003C038000346201409445000A14A0001EAC
++:100A00008F91FCC09227000530E6000414C0001A44
++:100A1000000000000E00164E02002021922A000560
++:100A200002002021354900040E001658A2290005B5
++:100A30009228000531040004148000020000000028
++:100A40000000000D922D0000240B002031AC00FFAF
++:100A5000158B00093C0580008CAE01B805C0FFFE77
++:100A600034B10180AE3000003C0F100024100005AE
++:100A7000A230000BACAF01B80000000D8FBF001812
++:100A80008FB100148FB0001003E0000827BD0020D4
++:100A90000200202100C028218FBF00188FB1001450
++:100AA0008FB00010240600010A00161D27BD00208B
++:100AB0000000000D0200202100C028218FBF001877
++:100AC0008FB100148FB00010000030210A00161DF5
++:100AD00027BD002014A0FFE8000000000200202134
++:100AE0008FBF00188FB100148FB0001000C02821F4
++:100AF0000A00163B27BD00203C0780008CEE01B8A1
++:100B000005C0FFFE34F00180241F0002A21F000B6D
++:100B100034F80140A60600089719000A3C0F10009F
++:100B2000A61900108F110004A6110012ACEF01B835
++:100B30000A0016998FBF001827BDFFE8AFBF00104D
++:100B40000E000FD4000000003C0280008FBF001098
++:100B500000002021AC4001800A00108F27BD001842
++:100B60003084FFFF30A5FFFF108000070000182130
++:100B7000308200011040000200042042006518216C
++:100B80001480FFFB0005284003E0000800601021EE
++:100B900010C00007000000008CA2000024C6FFFF68
++:100BA00024A50004AC82000014C0FFFB24840004D0
++:100BB00003E000080000000010A0000824A3FFFFCD
++:100BC000AC86000000000000000000002402FFFFCF
++:100BD0002463FFFF1462FFFA2484000403E000088A
++:100BE000000000003C03800027BDFFF83462018054
++:100BF000AFA20000308C00FF30AD00FF30CE00FF10
++:100C00003C0B80008D6401B80480FFFE00000000F2
++:100C10008FA900008D6801288FAA00008FA700000F
++:100C20008FA400002405000124020002A085000A10
++:100C30008FA30000359940003C051000A062000B16
++:100C40008FB800008FAC00008FA600008FAF0000AF
++:100C500027BD0008AD280000AD400004AD80002491
++:100C6000ACC00028A4F90008A70D0010A5EE0012E2
++:100C700003E00008AD6501B83C06800827BDFFE829
++:100C800034C50080AFBF001090A7000924020012F5
++:100C900030E300FF1062000B008030218CA8005070
++:100CA00000882023048000088FBF00108CAA003425
++:100CB000240400390000282100CA4823052000052B
++:100CC000240600128FBF00102402000103E0000878
++:100CD00027BD00180E0016F2000000008FBF0010A4
++:100CE0002402000103E0000827BD001827BDFFC84B
++:100CF000AFB20030AFB00028AFBF0034AFB1002CAE
++:100D000000A0802190A5000D30A6001010C000109A
++:100D1000008090213C0280088C4400048E0300086F
++:100D20001064000C30A7000530A6000510C0009329
++:100D3000240400018FBF00348FB200308FB1002C2B
++:100D40008FB000280080102103E0000827BD003884
++:100D500030A7000510E0000F30AB001210C00006F5
++:100D6000240400013C0980088E0800088D25000439
++:100D70005105009C240400388FBF00348FB200302E
++:100D80008FB1002C8FB000280080102103E00008F4
++:100D900027BD0038240A0012156AFFE6240400016A
++:100DA0000200202127A500100E000CB6AFA00010F5
++:100DB0001440007C3C19800837240080909800087B
++:100DC000331100081220000A8FA7001030FF010025
++:100DD00013E000A48FA300148C8600580066102333
++:100DE000044000043C0A8008AC8300588FA7001020
++:100DF0003C0A800835480080910900083124000829
++:100E00001480000224080003000040213C1F8008D9
++:100E100093F1001193F9001237E600808CCC005456
++:100E2000333800FF03087821322D00FF000F708057
++:100E300001AE282100AC582B1160006F00000000AB
++:100E400094CA005C8CC900543144FFFF0125102373
++:100E50000082182B14600068000000008CCB005446
++:100E60000165182330EC00041180006C000830800C
++:100E70008FA8001C0068102B1040006230ED0004A9
++:100E8000006610232C46008010C00002004088211C
++:100E9000241100800E00164E024020213C0D8008D7
++:100EA00035A6008024070001ACC7000C90C80008DC
++:100EB0000011484035A70100310C007FA0CC00088C
++:100EC0008E05000424AB0001ACCB0030A4D1005C43
++:100ED0008CCA003C9602000E01422021ACC40020C6
++:100EE0008CC3003C0069F821ACDF001C8E190004A3
++:100EF000ACF900008E180008ACF800048FB10010A7
++:100F0000322F000855E0004793A60020A0C0004EF5
++:100F100090D8004E2411FFDFA0F8000890CF000801
++:100F200001F17024A0CE00088E0500083C0B80085B
++:100F300035690080AD2500388D6A00148D2200309F
++:100F40002419005001422021AD24003491230000D7
++:100F5000307F00FF13F90036264F01000E001658AF
++:100F60000240202124040038000028210E0016F23F
++:100F70002406000A0A001757240400010E000D2859
++:100F8000000020218FBF00348FB200308FB1002CC1
++:100F90008FB00028004020210080102103E00008CD
++:100FA00027BD00388E0E00083C0F800835F0008009
++:100FB000AE0E005402402021AE0000300E00164E4E
++:100FC00000000000920D00250240202135AC0020D9
++:100FD0000E001658A20C00250E000CAC0240202179
++:100FE000240400382405008D0E0016F22406001299
++:100FF0000A0017572404000194C5005C0A001792E8
++:1010000030A3FFFF2407021811A0FF9E00E6102363
++:101010008FAE001C0A00179A01C610230A0017970A
++:101020002C620218A0E600080A0017C48E0500080A
++:101030002406FF8001E6C0243C118000AE38002861
++:101040008E0D000831E7007F3C0E800C00EE602121
++:10105000AD8D00E08E080008AF8C00380A0017D074
++:10106000AD8800E4AC800058908500082403FFF7A9
++:1010700000A33824A08700080A0017758FA7001066
++:101080003C05080024A560A83C04080024846FF4F3
++:101090003C020800244260B0240300063C01080121
++:1010A000AC2596C03C010801AC2496C43C01080163
++:1010B000AC2296C83C010801A02396CC03E00008AE
++:1010C0000000000003E00008240200013C02800050
++:1010D000308800FF344701803C0680008CC301B893
++:1010E0000460FFFE000000008CC501282418FF806A
++:1010F0003C0D800A24AF010001F8702431EC007F20
++:10110000ACCE0024018D2021ACE50000948B00EAD8
++:101110003509600024080002316AFFFFACEA0004D0
++:1011200024020001A4E90008A0E8000BACE00024C0
++:101130003C071000ACC701B8AF84003803E00008DA
++:10114000AF85006C938800488F8900608F820038DB
++:1011500030C600FF0109382330E900FF01221821C1
++:1011600030A500FF2468008810C000020124382147
++:101170000080382130E400031480000330AA00030B
++:101180001140000D312B000310A0000900001021B8
++:1011900090ED0000244E000131C200FF0045602B9D
++:1011A000A10D000024E700011580FFF925080001CA
++:1011B00003E00008000000001560FFF300000000DD
++:1011C00010A0FFFB000010218CF80000245900043F
++:1011D000332200FF0045782BAD18000024E70004FF
++:1011E00015E0FFF92508000403E0000800000000F6
++:1011F00093850048938800588F8700600004320070
++:101200003103007F00E5102B30C47F001040000F39
++:10121000006428258F8400383C0980008C8A00EC0B
++:10122000AD2A00A43C03800000A35825AC6B00A0AD
++:101230008C6C00A00580FFFE000000008C6D00ACEF
++:10124000AC8D00EC03E000088C6200A80A00188254
++:101250008F840038938800593C0280000080502120
++:10126000310300FEA383005930ABFFFF30CC00FFF9
++:1012700030E7FFFF344801803C0980008D2401B82D
++:101280000480FFFE8F8D006C24180016AD0D000049
++:101290008D2201248F8D0038AD0200048D5900206D
++:1012A000A5070008240201C4A119000AA118000B17
++:1012B000952F01208D4E00088D4700049783005C18
++:1012C0008D59002401CF302100C7282100A32023FD
++:1012D0002418FFFFA504000CA50B000EA5020010AA
++:1012E000A50C0012AD190018AD18002495AF00E848
++:1012F0003C0B10002407FFF731EEFFFFAD0E002876
++:101300008DAC0084AD0C002CAD2B01B88D460020B7
++:1013100000C7282403E00008AD4500208F8800386E
++:101320000080582130E7FFFF910900D63C02800081
++:1013300030A5FFFF312400FF00041A00006750258C
++:1013400030C600FF344701803C0980008D2C01B875
++:101350000580FFFE8F82006C240F0017ACE20000B6
++:101360008D390124ACF900048D780020A4EA00082E
++:10137000241901C4A0F8000AA0EF000B9523012056
++:101380008D6E00088D6D00049784005C01C35021B0
++:10139000014D602101841023A4E2000CA4E5000E9D
++:1013A000A4F90010A4E60012ACE000148D7800242B
++:1013B000240DFFFFACF800188D0F007CACEF001C73
++:1013C0008D0E00783C0F1000ACEE0020ACED002438
++:1013D000950A00BE240DFFF73146FFFFACE600285A
++:1013E000950C00809504008231837FFF0003CA00C2
++:1013F0003082FFFF0322C021ACF8002CAD2F01B8D2
++:10140000950E00828D6A002000AE3021014D282407
++:10141000A506008203E00008AD6500203C028000C4
++:10142000344501803C0480008C8301B80460FFFED9
++:101430008F8A0044240600199549001C3128FFFFBB
++:10144000000839C0ACA70000A0A6000B3C051000A6
++:1014500003E00008AC8501B88F87004C0080402174
++:1014600030C400FF3C0680008CC201B80440FFFE7F
++:101470008F89006C9383006834996000ACA90000E8
++:10148000A0A300058CE20010240F00022403FFF744
++:10149000A4A20006A4B900088D180020A0B8000A74
++:1014A000A0AF000B8CEE0000ACAE00108CED000481
++:1014B000ACAD00148CEC001CACAC00248CEB002018
++:1014C000ACAB00288CEA002C3C071000ACAA002C26
++:1014D0008D090024ACA90018ACC701B88D05002007
++:1014E00000A3202403E00008AD0400208F8600380C
++:1014F00027BDFFE0AFB10014AFBF0018AFB00010C0
++:1015000090C300D430A500FF3062002010400008D6
++:10151000008088218CCB00D02409FFDF256A0001E0
++:10152000ACCA00D090C800D401093824A0C700D4A8
++:1015300014A000403C0C80008F840038908700D4B9
++:101540002418FFBF2406FFEF30E3007FA08300D400
++:10155000979F005C8F8200608F8D003803E2C82364
++:10156000A799005CA5A000BC91AF00D401F870243D
++:10157000A1AE00D48F8C0038A18000D78F8A0038AC
++:10158000A5400082AD4000EC914500D400A658244F
++:10159000A14B00D48F9000348F8400609786005C4C
++:1015A0000204282110C0000FAF850034A38000582A
++:1015B0003C0780008E2C000894ED01208E2B000447
++:1015C000018D5021014B8021020620233086FFFF30
++:1015D00030C8000F3909000131310001162000091F
++:1015E000A3880058938600488FBF00188FB100145D
++:1015F0008FB0001027BD0020AF85006403E0000815
++:10160000AF86006000C870238FBF00189386004823
++:101610008FB100148FB0001034EF0C00010F28219F
++:1016200027BD0020ACEE0084AF85006403E0000815
++:10163000AF86006035900180020028210E00190F4E
++:10164000240600828F840038908600D430C5004084
++:1016500050A0FFBAA38000688F85004C3C06800034
++:101660008CCD01B805A0FFFE8F89006C2408608234
++:1016700024070002AE090000A6080008A207000B1C
++:101680008CA300083C0E1000AE0300108CA2000CCE
++:10169000AE0200148CBF0014AE1F00188CB90018E5
++:1016A000AE1900248CB80024AE1800288CAF002896
++:1016B000AE0F002CACCE01B80A001948A380006818
++:1016C0008F8A003827BDFFE0AFB10014AFB0001023
++:1016D0008F880060AFBF00189389003C954200BC22
++:1016E00030D100FF0109182B0080802130AC00FFB1
++:1016F0003047FFFF0000582114600003310600FF4F
++:1017000001203021010958239783005C0068202BB9
++:101710001480002700000000106800562419000102
++:101720001199006334E708803165FFFF0E0018C08F
++:10173000020020218F83006C3C07800034E601808A
++:101740003C0580008CAB01B80560FFFE240A001840
++:101750008F840038ACC30000A0CA000B948900BE7F
++:101760003C081000A4C90010ACC00030ACA801B8FF
++:101770009482008024430001A4830080949F008011
++:101780003C0608008CC6318833EC7FFF1186005E72
++:101790000000000002002021022028218FBF001835
++:1017A0008FB100148FB000100A00193427BD00203B
++:1017B000914400D42403FF8000838825A15100D4E4
++:1017C0009784005C3088FFFF51000023938C003C1D
++:1017D0008F8500382402EFFF008B782394AE00BC85
++:1017E0000168502B31E900FF01C26824A4AD00BCA0
++:1017F00051400039010058213C1F800037E60100AC
++:101800008CD800043C190001031940245500000144
++:1018100034E740008E0A00202403FFFB241100015E
++:1018200001432024AE0400201191002D34E78000F4
++:1018300002002021012030210E0018C03165FFFF79
++:101840009787005C8F890060A780005C0127802358
++:10185000AF900060938C003C8F8B00388FBF0018D6
++:101860008FB100148FB0001027BD002003E00008E6
++:10187000A16C00D73C0D800035AA01008D48000402
++:101880003C0900010109282454A0000134E740006C
++:101890008E0F00202418FFFB34E7800001F870242D
++:1018A00024190001AE0E00201599FF9F34E708802F
++:1018B000020020210E00188E3165FFFF020020215A
++:1018C000022028218FBF00188FB100148FB00010A4
++:1018D0000A00193427BD00200A0019F7000048212A
++:1018E00002002021012030210E00188E3165FFFFFB
++:1018F0009787005C8F890060A780005C01278023A8
++:101900000A001A0EAF900060948C0080241F8000A3
++:10191000019F3024A4860080908B0080908F0080EF
++:10192000316700FF0007C9C20019C027001871C045
++:1019300031ED007F01AE2825A08500800A0019DF67
++:1019400002002021938500682403000127BDFFE8E1
++:1019500000A330042CA20020AFB00010AFBF0014D1
++:1019600000C01821104000132410FFFE3C0708009F
++:101970008CE7319000E610243C088000350501809A
++:1019800014400005240600848F890038240A0004CE
++:101990002410FFFFA12A00FC0E00190F0000000018
++:1019A000020010218FBF00148FB0001003E0000868
++:1019B00027BD00183C0608008CC631940A001A574F
++:1019C00000C310248F87004427BDFFE0AFB200188A
++:1019D000AFB10014AFB00010AFBF001C30D000FF9B
++:1019E00090E6000D00A088210080902130C5007F86
++:1019F000A0E5000D8F8500388E2300188CA200D042
++:101A00001062002E240A000E0E001A4AA38A0068F3
++:101A10002409FFFF104900222404FFFF5200002088
++:101A2000000020218E2600003C0C001000CC582421
++:101A3000156000393C0E000800CE682455A0003F18
++:101A4000024020213C18000200D880241200001F10
++:101A50003C0A00048F8700448CE200148CE30010E1
++:101A60008CE500140043F82303E5C82B1320000580
++:101A7000024020218E24002C8CF1001010910031A6
++:101A80000240202124020012A38200680E001A4A9C
++:101A90002412FFFF105200022404FFFF0000202147
++:101AA0008FBF001C8FB200188FB100148FB00010D0
++:101AB0000080102103E0000827BD002090A800D47A
++:101AC000350400200A001A80A0A400D400CA4824CB
++:101AD0001520000B8F8B00448F8D00448DAC0010BF
++:101AE0001580000B024020218E2E002C51C0FFECEF
++:101AF00000002021024020210A001A9B2402001726
++:101B00008D66001050C0FFE6000020210240202119
++:101B10000A001A9B24020011024020212402001511
++:101B20000E001A4AA3820068240FFFFF104FFFDC4B
++:101B30002404FFFF0A001A8A8E2600000A001AC138
++:101B4000240200143C08000400C8382450E0FFD4EC
++:101B500000002021024020210A001A9B24020013C9
++:101B60008F85003827BDFFD8AFB3001CAFB2001877
++:101B7000AFB10014AFB00010AFBF002090A700D4E9
++:101B80008F90004C2412FFFF34E2004092060000C8
++:101B9000A0A200D48E0300100080982110720006CD
++:101BA00030D1003F2408000D0E001A4AA3880068B7
++:101BB000105200252404FFFF8F8A00388E09001878
++:101BC0008D4400D01124000702602021240C000E57
++:101BD0000E001A4AA38C0068240BFFFF104B001A5A
++:101BE0002404FFFF24040020122400048F8D0038F9
++:101BF00091AF00D435EE0020A1AE00D48F85005403
++:101C000010A00019000000001224004A8F9800382C
++:101C10008F92FCC0971000809651000A5230004805
++:101C20008F9300403C1F08008FFF318C03E5C82BC9
++:101C30001720001E02602021000028210E0019A993
++:101C400024060001000020218FBF00208FB3001C5C
++:101C50008FB200188FB100148FB0001000801021D7
++:101C600003E0000827BD00285224002A8E05001436
++:101C70008F840038948A008025490001A48900805F
++:101C8000948800803C0208008C42318831077FFF35
++:101C900010E2000E00000000026020210E00193446
++:101CA000240500010A001B0B000020212402002D46
++:101CB0000E001A4AA38200682403FFFF1443FFE1C9
++:101CC0002404FFFF0A001B0C8FBF002094990080A2
++:101CD000241F800024050001033FC024A498008035
++:101CE00090920080908E0080325100FF001181C2DE
++:101CF00000107827000F69C031CC007F018D582576
++:101D0000A08B00800E001934026020210A001B0BFA
++:101D1000000020212406FFFF54A6FFD68F84003840
++:101D2000026020210E001934240500010A001B0B5B
++:101D300000002021026020210A001B252402000A45
++:101D40002404FFFD0A001B0BAF9300608F8800384E
++:101D500027BDFFE8AFB00010AFBF0014910A00D458
++:101D60008F87004C00808021354900408CE60010B0
++:101D7000A10900D43C0208008C4231B030C53FFFBD
++:101D800000A2182B106000078F850050240DFF80E3
++:101D900090AE000D01AE6024318B00FF156000088D
++:101DA0000006C382020020212403000D8FBF00140F
++:101DB0008FB0001027BD00180A001A4AA3830068DC
++:101DC00033060003240F000254CFFFF70200202146
++:101DD00094A2001C8F85003824190023A4A200E8D7
++:101DE0008CE8000000081E02307F003F13F9003528
++:101DF0003C0A00838CE800188CA600D0110600086D
++:101E0000000000002405000E0E001A4AA385006899
++:101E10002407FFFF104700182404FFFF8F850038B8
++:101E200090A900D435240020A0A400D48F8C0044B5
++:101E3000918E000D31CD007FA18D000D8F83005458
++:101E40001060001C020020218F8400508C9800102C
++:101E50000303782B11E0000D241900180200202143
++:101E6000A39900680E001A4A2410FFFF10500002C8
++:101E70002404FFFF000020218FBF00148FB000104A
++:101E80000080102103E0000827BD00188C86001098
++:101E90008F9F00440200202100C31023AFE20010F6
++:101EA000240500010E0019A9240600010A001B9751
++:101EB000000020210E001934240500010A001B97A0
++:101EC00000002021010A5824156AFFD98F8C004494
++:101ED000A0A600FC0A001B84A386005A30A500FFC0
++:101EE0002406000124A9000100C9102B1040000C99
++:101EF00000004021240A000100A61823308B0001B5
++:101F000024C60001006A3804000420421160000267
++:101F100000C9182B010740251460FFF800A61823FC
++:101F200003E000080100102127BDFFD8AFB0001862
++:101F30008F90004CAFB1001CAFBF00202403FFFF07
++:101F40002411002FAFA30010920600002405000802
++:101F500026100001006620260E001BB0308400FF12
++:101F600000021E003C021EDC34466F410A001BD8F2
++:101F70000000102110A00009008018212445000154
++:101F800030A2FFFF2C4500080461FFFA0003204047
++:101F90000086202614A0FFF9008018210E001BB037
++:101FA000240500208FA300102629FFFF313100FFF8
++:101FB00000034202240700FF1627FFE20102182651
++:101FC00000035027AFAA0014AFAA00100000302170
++:101FD00027A8001027A7001400E6782391ED00033E
++:101FE00024CE000100C8602131C600FF2CCB0004C4
++:101FF0001560FFF9A18D00008FA200108FBF002097
++:102000008FB1001C8FB0001803E0000827BD002826
++:1020100027BDFFD0AFB3001CAFB00010AFBF00288A
++:10202000AFB50024AFB40020AFB20018AFB10014B8
++:102030003C0C80008D880128240FFF803C06800A1C
++:1020400025100100250B0080020F68243205007F57
++:10205000016F7024AD8E009000A62821AD8D002464
++:1020600090A600FC3169007F3C0A8004012A1821F7
++:10207000A386005A9067007C00809821AF830030CF
++:1020800030E20002AF88006CAF85003800A0182154
++:10209000144000022404003424040030A3840048C7
++:1020A0008C7200DC30D100FF24040004AF92006089
++:1020B00012240004A38000688E7400041680001EA1
++:1020C0003C0880009386005930C7000110E0000FE3
++:1020D0008F9300608CB000848CA800842404FF805F
++:1020E000020410240002F940310A007F03EA482567
++:1020F0003C0C2000012C902530CD00FE3C038000DC
++:10210000AC720830A38D00598F9300608FBF0028F8
++:102110008FB50024ACB300DC8FB400208FB3001C5B
++:102120008FB200188FB100148FB00010240200018C
++:1021300003E0000827BD00308E7F000895020120D3
++:102140008E67001003E2C8213326FFFF30D8000F4E
++:1021500033150001AF87003416A00058A39800582B
++:1021600035090C000309382100D81823AD03008479
++:10217000AF8700648E6A00043148FFFF1100007EC3
++:10218000A78A005C90AC00D42407FF8000EC3024C8
++:1021900030CB00FF1560004B9786005C938E005A91
++:1021A000240D000230D5FFFF11CD02A20000A021B6
++:1021B0008F85006002A5802B160000BC9388004824
++:1021C0003C11800096240120310400FF1485008812
++:1021D0008F8400648F9800343312000356400085CA
++:1021E00030A500FF8F900064310C00FF24060034FE
++:1021F00011860095AF90004C9204000414800118E1
++:102200008F8E0038A380003C8E0D00048DC800D84E
++:102210003C0600FF34CCFFFF01AC30240106182B34
++:1022200014600120AF8600548F8700609798005C8F
++:10223000AF8700400307402310C000C7A788005C99
++:102240008F91003030C3000300035823922A007C92
++:102250003171000302261021000A20823092000111
++:102260000012488000492821311FFFFF03E5C82BD9
++:10227000132001208F8800388F8500348F880064F8
++:102280001105025A3C0E3F018E0600003C0C250051
++:1022900000CE682411AC01638F84004C30E500FF50
++:1022A0000E00184A000030218F8800388F870060A8
++:1022B0008F8500340A001DB78F8600540A001C5613
++:1022C000AF87006490A400D400E48024320200FFB1
++:1022D000104000169386005990A6008890AE00D753
++:1022E00024A8008830D4003F2686FFE02CD10020AF
++:1022F000A38E003C1220000CAF88004C240B000180
++:1023000000CB20043095001916A0012B3C0680005C
++:1023100034CF0002008FC0241700022E3099002015
++:1023200017200234000000009386005930CB0001D2
++:102330001160000F9788005C8CBF00848CA900841A
++:10234000240AFF8003EA6024000C19403132007F28
++:10235000007238253C0D200000EDC82530D800FE65
++:102360003C0F8000ADF90830A39800599788005CB5
++:102370001500FF84000000008E630020306200041E
++:102380001040FF51938600592404FFFB0064802411
++:102390003C038000AE700020346601808C7301B86D
++:1023A0000660FFFE8F98006C347501003C1400013C
++:1023B000ACD800008C6B012424076085ACCB0004F2
++:1023C0008EAE000401D488245220000124076083CB
++:1023D00024190002A4C700083C0F1000A0D9000B6C
++:1023E0003C068000ACCF01B80A001C2B9386005934
++:1023F00030A500FF0E00184A240600018F88006CEB
++:102400003C05800034A90900250201889388004812
++:10241000304A0007304B00783C0340802407FF809F
++:102420000163C825014980210047F824310C00FFD1
++:1024300024060034ACBF0800AF90004CACB90810C3
++:102440005586FF6E920400048F8400388E11003090
++:10245000908E00D431CD001015A000108F83006045
++:102460002C6F000515E000E400000000909800D4F7
++:102470002465FFFC331200101640000830A400FF52
++:102480008F9F00648F99003413F90004388700018E
++:1024900030E20001144001C8000000000E001BC320
++:1024A000000000000A001DF8000000008F84006496
++:1024B00030C500FF0E00184A24060001939800481A
++:1024C000240B0034130B00A08F8500388F8600602A
++:1024D0009783005C306EFFFF00CE8823AF910060D1
++:1024E000A780005C1280FF90028018212414FFFD59
++:1024F0005474FFA28E6300208E6A00042403FFBF81
++:102500002408FFEF0155F823AE7F000490AC00D4FF
++:102510003189007FA0A900D48E7200208F8F0038EF
++:10252000A780005C364D0002AE6D0020A5E000BC27
++:1025300091E500D400A3C824A1F900D48F950038F8
++:10254000AEA000EC92B800D403085824A2AB00D48B
++:102550000A001CD78F8500388F910034AF8000604F
++:1025600002275821AF8B0034000020212403FFFFF5
++:10257000108301B48F8500388E0C00103C0D0800CC
++:102580008DAD31B09208000031843FFF008D802B6B
++:1025900012000023310D003F3C1908008F3931A88B
++:1025A0008F9F006C000479802408FF80033F202166
++:1025B000008FC821938500590328F8243C06008029
++:1025C0003C0F800034D80001001F91403331007F60
++:1025D0008F8600380251502535EE0940332B0078A4
++:1025E000333000073C0310003C02800C017890253A
++:1025F000020E48210143C0250222382134AE0001D9
++:10260000ADFF0804AF890050ADF20814AF87004455
++:10261000ADFF0028ACD90084ADF80830A38E005976
++:102620009383005A24070003106700272407000142
++:102630001467FFAC8F8500382411002311B1008589
++:1026400000000000240E000B026020210E001A4A38
++:10265000A38E00680040A0210A001D328F8500383B
++:1026600002602021240B000C0E001A4AA38B006884
++:10267000240AFFFF104AFFBD2404FFFF8F8E00389D
++:10268000A380003C8E0D00048DC800D83C0600FFDE
++:1026900034CCFFFF01AC30240106182B1060FEE2A1
++:1026A000AF86005402602021241200190E001A4A3D
++:1026B000A3920068240FFFFF104FFFAC2404FFFF1C
++:1026C0000A001C838F86005425A3FFE02C74002091
++:1026D0001280FFDD240E000B000328803C1108014E
++:1026E000263194B400B148218D2D000001A00008CE
++:1026F000000000008F85003400A710219385003C66
++:10270000AF82003402251821A383003C951F00BC32
++:102710000226282137F91000A51900BC5240FF926B
++:10272000AF850060246A0004A38A003C950900BCC0
++:1027300024A40004AF84006035322000A51200BC40
++:102740000A001D54000020218F8600602CC800055F
++:102750001500FF609783005C3065FFFF00C5C8234C
++:102760002F2F000511E00003306400FF24CDFFFC93
++:1027700031A400FF8F8900648F920034113200046D
++:10278000389F000133EC0001158001380000000083
++:102790008F840038908700D434E60010A08600D4DF
++:1027A0008F8500388F8600609783005CACA000ECBA
++:1027B0000A001D2F306EFFFF8CB500848CB400849E
++:1027C0003C04100002A7302400068940328E007FAE
++:1027D000022E8025020410253C08800024050001FB
++:1027E00002602021240600010E0019A9AD02083064
++:1027F0000A001CC38F8500388C8200EC1222FE7EFA
++:102800000260202124090005A38900680E001A4AED
++:102810002411FFFF1451FE782404FFFF0A001D5508
++:102820002403FFFF8F8F004C8F8800388DF8000045
++:10283000AD1800888DE70010AD0700988F87006005
++:102840000A001DB78F8600542406FFFF118600057D
++:10285000000000000E001B4C026020210A001D8FAA
++:102860000040A0210E001AD1026020210A001D8F15
++:102870000040A0218F90004C3C0208008C4231B0F7
++:102880008E110010322C3FFF0182282B10A0000C6B
++:10289000240BFF808F85005090A3000D01637024EE
++:1028A00031CA00FF1140000702602021001143825D
++:1028B000310600032418000110D8010600000000B2
++:1028C000026020212403000D0E001A4AA383006831
++:1028D000004020218F8500380A001D320080A02191
++:1028E0008F90004C3C0A08008D4A31B08F85005013
++:1028F0008E0400100000A0218CB1001430823FFF34
++:10290000004A602B8CB200205180FFEE0260202133
++:1029100090B8000D240BFF800178702431C300FFB4
++:102920005060FFE80260202100044382310600036A
++:1029300014C0FFE40260202194BF001C8F9900386E
++:102940008E060028A73F00E88CAF0010022F20233E
++:1029500014C4013A026020218F83005400C368210F
++:10296000022D382B14E00136240200188F8A00440F
++:102970008F820030024390218D4B00100163702341
++:10298000AD4E0010AD5200208C4C00740192282BEB
++:1029900014A0015F026020218F8400508E08002463
++:1029A0008C86002411060007026020212419001CD7
++:1029B0000E001A4AA3990068240FFFFF104FFFC5AD
++:1029C0002404FFFF8F8400448C87002424FF00012F
++:1029D000AC9F00241251012F8F8D00308DB10074F7
++:1029E0001232012C3C0B00808E0E000001CB5024D3
++:1029F00015400075000000008E0300142411FFFF35
++:102A0000107100073C0808003C0608008CC6319095
++:102A100000C8C0241300015202602021A380006876
++:102A20008E0300003C19000100792024108000135F
++:102A30003C1F0080007FA02416800009020028218E
++:102A4000026020212411001A0E001A4AA391006886
++:102A50002407FFFF1047FF9F2404FFFF02002821E7
++:102A6000026020210E001A6A240600012410FFFFD4
++:102A70001050FF982404FFFF241400018F8D0044A0
++:102A8000026020210280302195A900342405000134
++:102A9000253200010E0019A9A5B200340000202142
++:102AA0008F8500380A001D320080A0218F90004CD5
++:102AB0003C1408008E9431B08E07001030E53FFFC3
++:102AC00000B4C82B132000618F8600502412FF80B1
++:102AD00090C9000D0249682431A400FF5080005CB9
++:102AE000026020218F8C00541180000700078B8228
++:102AF0008F8500388F82FCC094BF0080944A000A02
++:102B0000515F00F78F8600403227000314E0006415
++:102B100000000000920E000211C000D8000000006A
++:102B20008E0B0024156000D902602021920400035E
++:102B300024190002308500FF14B90005308900FF18
++:102B40008F940054128000EA240D002C308900FF7D
++:102B5000392C00102D8400012D3200010244302553
++:102B6000020028210E001A6A026020212410FFFFB3
++:102B7000105000BF8F8500388F830054106000D341
++:102B8000240500013C0A08008D4A318C0143F82BD2
++:102B900017E000B22402002D02602021000028214D
++:102BA0000E0019A9240600018F85003800001821A5
++:102BB0000A001D320060A0210E0018750000000000
++:102BC0000A001DF800000000AC8000200A001E78FA
++:102BD0008E03001400002821026020210E0019A994
++:102BE000240600010A001CC38F8500380A001DB7A7
++:102BF0008F8800388CAA00848CAC00843C031000C1
++:102C00000147F824001F91403189007F024968255F
++:102C100001A32825ACC50830910700012405000157
++:102C2000026020210E0019A930E600010A001CC331
++:102C30008F850038938F00482403FFFD0A001D3460
++:102C4000AF8F00600A001D342403FFFF02602021C3
++:102C50002410000D0E001A4AA390006800401821AD
++:102C60008F8500380A001D320060A0210E00187503
++:102C7000000000009783005C8F86006000402021E8
++:102C80003070FFFF00D010232C4A00051140FE11C8
++:102C90008F850038ACA400EC0A001D2F306EFFFFBA
++:102CA00090CF000D31E300085460FFA192040003AF
++:102CB00002602021240200100E001A4AA38200683C
++:102CC0002403FFFF5443FF9A920400030A001F12DB
++:102CD0008F85003890A4000D308F000811E000951A
++:102CE0008F990054572000A6026020218E1F000CEF
++:102CF0008CB4002057F40005026020218E0D0008DE
++:102D00008CA7002411A7003A026020212402002091
++:102D1000A38200680E001A4A2412FFFF1052FEED33
++:102D20002404FFFF8F9F00442402FFF73C14800E11
++:102D300093EA000D2419FF803C03800001423824EF
++:102D4000A3E7000D8F9F00303C0908008D2931ACAE
++:102D50008F8C006C97F200788F870044012C302113
++:102D6000324D7FFF000D204000C4782131E5007F07
++:102D700000B4C02101F94024AC68002CA711000068
++:102D80008CEB0028256E0001ACEE00288CEA002CAC
++:102D90008E02002C01426021ACEC002C8E09002C2C
++:102DA000ACE900308E120014ACF2003494ED003A1D
++:102DB00025A40001A4E4003A97E600783C1108003D
++:102DC0008E3131B024C3000130707FFF1211005CDE
++:102DD000006030218F8F0030026020212405000127
++:102DE0000E001934A5E600780A001EA1000020217B
++:102DF0008E0900142412FFFF1132006B8F8A0038F5
++:102E00008E0200188D4C00D0144C00650260202109
++:102E10008E0B00248CAE0028116E005B2402002172
++:102E20000E001A4AA38200681452FFBE2404FFFF5A
++:102E30008F8500380A001D320080A0212402001F67
++:102E40000E001A4AA38200682409FFFF1049FEA160
++:102E50002404FFFF0A001E548F83005402602021C7
++:102E60000E001A4AA38200681450FF508F85003864
++:102E70002403FFFF0A001D320060A0218CD800242B
++:102E80008E0800241118FF29026020210A001F2744
++:102E90002402000F8E0900003C05008001259024CB
++:102EA0001640FF492402001A026020210E001A4A2F
++:102EB000A3820068240CFFFF144CFECF2404FFFF04
++:102EC0008F8500380A001D320080A0210E001934C1
++:102ED000026020218F8500380A001EE500001821BD
++:102EE0002403FFFD0060A0210A001D32AF860060B0
++:102EF000026020210E001A4AA38D00682403FFFF00
++:102F00001043FF588F8500380A001ECC920400033E
++:102F10002418001D0E001A4AA39800682403FFFF1E
++:102F20001443FE9D2404FFFF8F8500380A001D32E4
++:102F30000080A021026020210A001F3D24020024FD
++:102F4000240880000068C024330BFFFF000B73C20D
++:102F500031D000FF001088270A001F6E001133C017
++:102F6000240F001B0E001A4AA38F00681451FEACF8
++:102F70002404FFFF8F8500380A001D320080A02145
++:102F80000A001F3D240200278E0600288CA3002C77
++:102F900010C30008026020210A001F812402001FC4
++:102FA0000A001F812402000E026020210A001F81F6
++:102FB000240200258E04002C1080000D8F8F00301D
++:102FC0008DE800740104C02B5700000C0260202122
++:102FD0008CB900140086A0210334282B10A0FF52C6
++:102FE0008F9F0044026020210A001F8124020022DA
++:102FF000026020210A001F81240200230A001F8191
++:103000002402002627BDFFD8AFB3001CAFB10014C7
++:10301000AFBF0020AFB20018AFB000103C0280007C
++:103020008C5201408C4B01483C048000000B8C0208
++:10303000322300FF317300FF8C8501B804A0FFFE2E
++:1030400034900180AE1200008C8701442464FFF0AC
++:10305000240600022C830013AE070004A61100080A
++:10306000A206000BAE1300241060004F8FBF00209B
++:10307000000448803C0A0801254A9534012A402171
++:103080008D04000000800008000000003C030800E0
++:103090008C6331A831693FFF00099980007280215B
++:1030A000021370212405FF80264D0100264C00806C
++:1030B0003C02800031B1007F3198007F31CA007F2F
++:1030C0003C1F800A3C1980043C0F800C01C5202461
++:1030D00001A5302401853824014F1821AC46002475
++:1030E000023F402103194821AC470090AC4400281E
++:1030F000AF830044AF880038AF8900300E0019005C
++:10310000016080213C0380008C6B01B80560FFFEEC
++:103110008F8700448F8600383465018090E8000D69
++:10312000ACB20000A4B0000600082600000416039C
++:1031300000029027001227C21080008124C200885C
++:10314000241F6082A4BF0008A0A000052402000282
++:10315000A0A2000B8F8B0030000424003C08270045
++:1031600000889025ACB20010ACA00014ACA00024E4
++:10317000ACA00028ACA0002C8D6900382413FF807F
++:10318000ACA9001890E3000D02638024320500FF13
++:1031900010A000058FBF002090ED000D31AC007F26
++:1031A000A0EC000D8FBF00208FB3001C8FB2001861
++:1031B0008FB100148FB000103C0A10003C0E80004C
++:1031C00027BD002803E00008ADCA01B8265F010052
++:1031D0002405FF8033F8007F3C06800003E5782457
++:1031E0003C19800A03192021ACCF0024908E00D412
++:1031F00000AE682431AC00FF11800024AF84003899
++:10320000248E008895CD00123C0C08008D8C31A8CE
++:1032100031AB3FFF01924821000B5180012A402130
++:1032200001052024ACC400283107007F3C06800C37
++:1032300000E620219083000D00A31024304500FFFC
++:1032400010A0FFD8AF8400449098000D330F0010F9
++:1032500015E0FFD58FBF00200E0019000000000010
++:103260003C0380008C7901B80720FFFE00000000BD
++:10327000AE1200008C7F0144AE1F0004A6110008AE
++:1032800024110002A211000BAE1300243C1308010C
++:10329000927396F0327000015200FFC38FBF00207E
++:1032A0000E002146024020210A0020638FBF00202B
++:1032B0003C1260008E452C083C03F0033462FFFF93
++:1032C00000A2F824AE5F2C088E582C083C1901C0CF
++:1032D00003199825AE532C080A0020638FBF0020E5
++:1032E000264D010031AF007F3C10800A240EFF8084
++:1032F00001F0282101AE60243C0B8000AD6C00245D
++:103300001660FFA8AF85003824110003A0B100FCAF
++:103310000A0020638FBF002026480100310A007F89
++:103320003C0B800A2409FF80014B30210109202435
++:103330003C078000ACE400240A002062AF8600381D
++:10334000944E0012320C3FFF31CD3FFF15ACFF7D94
++:10335000241F608290D900D42418FF800319782498
++:1033600031EA00FF1140FF7700000000240700044D
++:10337000A0C700FC8F870044241160842406000D40
++:10338000A4B10008A0A600050A00204D24020002F6
++:103390003C040001248496DC24030014240200FE73
++:1033A0003C010800AC2431EC3C010800AC2331E8BE
++:1033B0003C010801A42296F83C040801248496F8F4
++:1033C0000000182100643021A0C300042463000120
++:1033D0002C6500FF54A0FFFC006430213C0708006E
++:1033E00024E7010003E00008AF87007800A058211F
++:1033F000008048210000102114A00012000050217C
++:103400000A002142000000003C010801A42096F8B7
++:103410003C05080194A596F88F8200783C0C0801C1
++:10342000258C96F800E2182100AC2021014B302BAE
++:10343000A089000400001021A460000810C0003919
++:10344000010048218F8600780009384000E94021BA
++:103450000008388000E6282190A8000B90B9000AE7
++:103460000008204000881021000218800066C0215A
++:10347000A319000A8F85007800E5782191EE000AF3
++:1034800091E6000B000E684001AE6021000C208028
++:1034900000851021A046000B3C030801906396F2C2
++:1034A000106000222462FFFF8F8300383C01080176
++:1034B000A02296F2906C00FF118000040000000032
++:1034C000906E00FF25CDFFFFA06D00FF3C190801A5
++:1034D000973996F8272300013078FFFF2F0F00FF60
++:1034E00011E0FFC9254A00013C010801A42396F818
++:1034F0003C05080194A596F88F8200783C0C0801E1
++:10350000258C96F800E2182100AC2021014B302BCD
++:10351000A089000400001021A460000814C0FFC9A5
++:103520000100482103E000080000000003E000085B
++:103530002402000227BDFFE0248501002407FF804C
++:10354000AFB00010AFBF0018AFB1001400A718242F
++:103550003C10800030A4007F3C06800A00862821B1
++:103560008E110024AE03002490A200FF1440000836
++:10357000AF850038A0A000098FBF0018AE1100244D
++:103580008FB100148FB0001003E0000827BD0020A9
++:1035900090A900FD90A800FF312400FF0E0020F448
++:1035A000310500FF8F8500388FBF0018A0A00009EB
++:1035B000AE1100248FB100148FB0001003E000089A
++:1035C00027BD002027BDFFD0AFB20020AFB1001C47
++:1035D000AFB00018AFBF002CAFB40028AFB30024C9
++:1035E0003C0980009533011635320C00952F011AE5
++:1035F0003271FFFF023280218E08000431EEFFFF9E
++:10360000248B0100010E6821240CFF8025A5FFFFFB
++:10361000016C50243166007F3C07800AAD2A0024EB
++:1036200000C73021AF850074AF8800703C010801ED
++:10363000A02096F190C300090200D02100809821BB
++:10364000306300FF2862000510400048AF86003854
++:10365000286400021480008E24140001240D00054B
++:103660003C010801A02D96D590CC00FD3C0108013D
++:10367000A02096D63C010801A02096D790CB000A46
++:10368000240AFF80318500FF014B4824312700FFC9
++:1036900010E0000C000058213C12800836510080D8
++:1036A0008E2F00308CD0005C01F0702305C0018E9D
++:1036B0008F87007090D4000A3284007FA0C4000A73
++:1036C0008F8600383C118008363000808E0F003025
++:1036D0008F87007000EF702319C000EE000000001B
++:1036E00090D4000924120002328400FF1092024795
++:1036F000000000008CC2005800E2F82327F9FFFF09
++:103700001B2001300000000090C5000924080004BF
++:1037100030A300FF10680057240A00013C01080193
++:10372000A02A96D590C900FF252700013C01080179
++:10373000A02796D43C030801906396D52406000583
++:103740001066006A2C780005130000C40000902168
++:103750000003F8803C0408012484958003E4C82118
++:103760008F25000000A0000800000000241800FFC2
++:103770001078005C0000000090CC000A90CA00099C
++:103780003C080801910896F13187008000EA48253D
++:103790003C010801A02996DC90C500FD3C140801FD
++:1037A000929496F2311100013C010801A02596DDAA
++:1037B00090DF00FE3C010801A03F96DE90D200FFA2
++:1037C0003C010801A03296DF8CD900543C0108016D
++:1037D000AC3996E08CD000583C010801AC3096E43E
++:1037E0008CC3005C3C010801AC3496EC3C01080140
++:1037F000AC2396E8162000088FBF002C8FB4002859
++:103800008FB300248FB200208FB1001C8FB000183E
++:1038100003E0000827BD00303C1180009624010E13
++:103820000E000FD43094FFFF3C0B08018D6B96F413
++:103830000260382102802821AE2B01803C13080150
++:103840008E7396D401602021240600830E00102F71
++:10385000AFB300108FBF002C8FB400288FB30024AB
++:103860008FB200208FB1001C8FB0001803E0000859
++:1038700027BD00303C1808008F1831FC270F0001CD
++:103880003C010800AC2F31FC0A0021D700000000E9
++:103890001474FFB900000000A0C000FF3C05080040
++:1038A0008CA531E43C0308008C6331E03C02080045
++:1038B0008C4232048F99003834A80001241F000282
++:1038C0003C010801AC2396F43C010801A02896F0C5
++:1038D0003C010801A02296F3A33F00090A002190B1
++:1038E0008F8600380E002146000000000A0021D714
++:1038F0008F8600383C1F080193FF96D424190001DD
++:1039000013F902298F8700703C100801921096D895
++:103910003C06080190C696D610C000050200A02102
++:103920003C040801908496D9109001E48F870078B8
++:10393000001088408F9F0078023048210009C8801D
++:10394000033F702195D80008270F0001A5CF00087C
++:103950003C040801908496D93C05080190A596D6B0
++:103960000E0020F4000000008F8700780230202134
++:103970000004308000C720218C8500048F820074F1
++:1039800000A2402305020006AC8200048C8A0000DD
++:103990008F830070014310235C400001AC83000062
++:1039A0008F86003890CB00FF2D6C00025580002DD3
++:1039B000241400010230F821001F40800107282153
++:1039C00090B9000B8CAE00040019C0400319782197
++:1039D000000F1880006710218C4D000001AE882375
++:1039E0002630FFFF5E00001F241400018C440004F9
++:1039F0008CAA0000008A482319200019240E000414
++:103A00003C010801A02E96D590AD000B8CAB0004B4
++:103A1000000D8840022D80210010108000471021E9
++:103A20008C44000401646023058202009443000872
++:103A300090DF00FE90B9000B33E500FF54B900049D
++:103A40000107A021A0D400FE8F8700780107A021E4
++:103A50009284000B0E0020F4240500018F860038AC
++:103A600024140001125400962E500001160000424A
++:103A70003C08FFFF241900021659FF3F0000000018
++:103A8000A0C000FF8F860038A0D200090A0021D70D
++:103A90008F86003890C700092404000230E300FF3D
++:103AA0001064016F24090004106901528F880074AA
++:103AB0008CCE0054010E682325B10001062001754B
++:103AC000241800043C010801A03896D53C010801E7
++:103AD000A02096D490D400FD90D200FF2E4F00027B
++:103AE00015E0FF14328400FF000438408F8900780D
++:103AF00090DF00FF00E41021000220800089C8212F
++:103B00002FE500029324000B14A0FF0A24070002F3
++:103B100000041840006480210010588001692821A9
++:103B20008CAC0004010C50230540FF020000000093
++:103B30003C030801906396D614600005246F0001D1
++:103B40003C010801A02496D93C010801A02796D782
++:103B50003C010801A02F96D690CE00FF24E700017B
++:103B600031CD00FF01A7882B1220FFE990A4000BA4
++:103B70000A0021C6000000003C0508018CA596D46F
++:103B80003C12000400A8F82413F2000624020005E9
++:103B90003C090801912996D5152000022402000352
++:103BA000240200053C010801A02296F190C700FF05
++:103BB00014E0012024020002A0C200090A0021D75B
++:103BC0008F86003890CC00FF1180FEDA240A0001B5
++:103BD0008F8C00748F890078240F00030180682186
++:103BE0001160001E240E0002000540400105A021C6
++:103BF00000142080008990218E51000401918023BF
++:103C00000600FECC000000003C020801904296D65F
++:103C100014400005245800013C010801A02A96D751
++:103C20003C010801A02596D93C010801A03896D690
++:103C300090DF00FF010510210002C88033E500FF7E
++:103C4000254A00010329202100AA402B1500FEB9B6
++:103C50009085000B1560FFE50005404000054040E1
++:103C600001051821000310803C010801A02A96D408
++:103C70003C010801A02596D8004918218C64000455
++:103C800000E4F82327F9FFFF1F20FFE900000000F0
++:103C90008C63000000E358230560013A01A38823E8
++:103CA00010E301170184C0231B00FEA200000000E6
++:103CB0003C010801A02E96D50A002305240B000123
++:103CC000240E0004A0CE00093C0D08008DAD31F893
++:103CD0008F86003825A200013C010800AC2231F893
++:103CE0000A0021D7000000008CD9005C00F9C02335
++:103CF0001F00FE7B000000008CDF005C10FFFF65F2
++:103D00008F8400748CC3005C008340232502000173
++:103D10001C40FF60000000008CC9005C248700018B
++:103D200000E9282B10A0FE943C0D80008DAB01040F
++:103D30003C0C0001016C50241140FE8F2402001045
++:103D40003C010801A02296F10A0021D700000000E2
++:103D50008F9100748F86003826220001ACC2005C6F
++:103D60000A002292241400018F8700382404FF8067
++:103D70000000882190E9000A241400010124302564
++:103D8000A0E6000A3C05080190A596D63C0408016F
++:103D9000908496D90E0020F4000000008F86003831
++:103DA0008F85007890C800FD310700FF0007404074
++:103DB0000107F821001FC0800305C8219323000BD1
++:103DC000A0C300FD8F8500788F8600380305602131
++:103DD000918F000B000F704001CF6821000D808093
++:103DE000020510218C4B0000ACCB00548D840004E4
++:103DF0008F83007400645023194000022482000164
++:103E00002462000101074821ACC2005C0009308037
++:103E100000C5402100E02021240500010E0020F40F
++:103E20009110000B8F86003890C500FF10A0FF0C8A
++:103E3000001070408F85007801D06821000D10803F
++:103E4000004558218D6400008F8C0074018450233C
++:103E50002547000104E0FF02263100013C03080170
++:103E6000906396D62E2F0002247800013C010801B1
++:103E7000A03896D63C010801A03496D711E0FEF890
++:103E8000020038210A002365000740408F84003873
++:103E90008F8300748C85005800A340230502FE9A8E
++:103EA000AC8300580A00223B000000003C070801D8
++:103EB00090E796F2240200FF10E200BE8F860038E1
++:103EC0003C110801963196FA3C030801246396F8E8
++:103ED000262500013230FFFF30ABFFFF02036021D7
++:103EE0002D6A00FF1540008D918700043C010801F8
++:103EF000A42096FA8F88003800074840012728211F
++:103F0000911800FF000530802405000127140001EE
++:103F1000A11400FF3C120801925296F28F8800789B
++:103F20008F8E0070264F000100C820213C0108013F
++:103F3000A02F96F2AC8E00008F8D0074A48500082F
++:103F4000AC8D00043C030801906396D414600077A4
++:103F5000000090213C010801A02596D4A087000B09
++:103F60008F8C007800CC5021A147000A8F82003846
++:103F7000A04700FD8F840038A08700FE8F860038A0
++:103F80008F9F0070ACDF00548F990074ACD900583B
++:103F90008F8D00780127C02100185880016DA02165
++:103FA000928F000A000F704001CF18210003888013
++:103FB000022D8021A207000B8F8600780166602108
++:103FC000918A000B000A1040004A2021000428803A
++:103FD00000A64021A107000A3C07800834E90080C0
++:103FE0008D2200308F860038ACC2005C0A0022921D
++:103FF0002414000190CA00FF1540FEAD8F880074A4
++:10400000A0C400090A0021D78F860038A0C000FD97
++:104010008F98003824060001A30000FE3C0108012F
++:10402000A02696D53C010801A02096D40A0021C6FE
++:104030000000000090CB00FF3C040801908496F340
++:10404000316C00FF0184502B1540000F2402000347
++:1040500024020004A0C200090A0021D78F8600387C
++:1040600090C3000A2410FF8002035824316C00FF23
++:104070001180FDC1000000003C010801A02096D580
++:104080000A0021C600000000A0C200090A0021D7D2
++:104090008F86003890D4000A2412FF8002544824EE
++:1040A000312800FF1500FFF4240200083C0108013C
++:1040B000A02296F10A0021D70000000000108840DD
++:1040C0008F8B0070023018210003688001A7202127
++:1040D000AC8B00008F8A0074240C0001A48C0008B3
++:1040E000AC8A00043C05080190A596D62402000184
++:1040F00010A2FE1E24A5FFFF0A0022519084000B8F
++:104100000184A0231A80FD8B000000003C010801FF
++:10411000A02E96D50A002305240B00013C010801BE
++:10412000A42596FA0A0023B78F880038240B0001D3
++:10413000106B00228F9800388F85003890BF00FFE9
++:1041400033F900FF1079002B000000003C1F08012C
++:1041500093FF96D8001FC840033FC0210018A080DD
++:104160000288782191EE000AA08E000A8F8D0078D7
++:104170003C030801906396D800CD88210A0023DD16
++:10418000A223000B263000010600003101A4902379
++:104190000640002B240200033C010801A02F96D505
++:1041A0000A002305240B00018F8900380A00223BF6
++:1041B000AD2700540A00229124120001931400FD3F
++:1041C000A094000B8F8800388F8F0078910E00FE2E
++:1041D00000CF6821A1AE000A8F910038A22700FD10
++:1041E0008F8300708F900038AE0300540A0023DEE6
++:1041F0008F8D007890B000FEA090000A8F8B003861
++:104200008F8C0078916A00FD00CC1021A04A000B31
++:104210008F840038A08700FE8F8600748F85003859
++:10422000ACA600580A0023DE8F8D007894B80008F1
++:10423000ACA40004030378210A002285A4AF00087F
++:104240003C010801A02296D50A0021C6000000000A
++:1042500090CF0009240D000431EE00FF11CDFD8543
++:10426000240200013C010801A02296D50A0021C6C3
++:1042700000000000080033440800334408003420E4
++:10428000080033F4080033D8080033280800332826
++:10429000080033280800334C8008010080080080A3
++:1042A000800800005F865437E4AC62CC50103A4579
++:1042B00036621985BF14C0E81BC27A1E84F4B55655
++:1042C000094EA6FE7DDA01E7C04D748108005A74DC
++:1042D00008005AB808005A5C08005A5C08005A5C8A
++:1042E00008005A5C08005A7408005A5C08005A5CBE
++:1042F00008005AC008005A5C080059D408005A5CEB
++:1043000008005A5C08005AC008005A5C08005A5C51
++:1043100008005A5C08005A5C08005A5C08005A5CA5
++:1043200008005A5C08005A5C08005A5C08005A5C95
++:1043300008005A9408005A5C08005A9408005A5C15
++:1043400008005A5C08005A5C08005A9808005A9401
++:1043500008005A5C08005A5C08005A5C08005A5C65
++:1043600008005A5C08005A5C08005A5C08005A5C55
++:1043700008005A5C08005A5C08005A5C08005A5C45
++:1043800008005A5C08005A5C08005A5C08005A5C35
++:1043900008005A5C08005A5C08005A5C08005A5C25
++:1043A00008005A9808005A9808005A5C08005A9861
++:1043B00008005A5C08005A5C08005A5C08005A5C05
++:1043C00008005A5C08005A5C08005A5C08005A5CF5
++:1043D00008005A5C08005A5C08005A5C08005A5CE5
++:1043E00008005A5C08005A5C08005A5C08005A5CD5
++:1043F00008005A5C08005A5C08005A5C08005A5CC5
++:1044000008005A5C08005A5C08005A5C08005A5CB4
++:1044100008005A5C08005A5C08005A5C08005A5CA4
++:1044200008005A5C08005A5C08005A5C08005A5C94
++:1044300008005A5C08005A5C08005A5C08005A5C84
++:1044400008005A5C08005A5C08005A5C08005A5C74
++:1044500008005A5C08005A5C08005A5C08005A5C64
++:1044600008005A5C08005A5C08005A5C08005A5C54
++:1044700008005A5C08005A5C08005A5C08005A5C44
++:1044800008005A5C08005A5C08005A5C08005A5C34
++:1044900008005A5C08005A5C08005A5C08005A5C24
++:1044A00008005A5C08005A5C08005A5C08005A5C14
++:1044B00008005A5C08005A5C08005A5C08005A5C04
++:1044C00008005A5C08005A5C08005A5C08005ADC74
++:1044D0000800782C08007A900800783808007628C0
++:1044E00008007838080078C4080078380800762872
++:1044F0000800762808007628080076280800762824
++:104500000800762808007628080076280800762813
++:1045100008007628080078580800784808007628AF
++:1045200008007628080076280800762808007628F3
++:1045300008007628080076280800762808007628E3
++:1045400008007628080076280800762808007848B1
++:10455000080082FC08008188080082C40800818865
++:104560000800829408008070080081880800818813
++:1045700008008188080081880800818808008188F7
++:1045800008008188080081880800818808008188E7
++:104590000800818808008188080081B008008D34F7
++:1045A00008008E9008008E70080088D808008D4C96
++:1045B0000A00012400000000000000000000000DBF
++:1045C000747061362E322E31620000000602010145
++:1045D00000000000000000000000000000000000DB
++:1045E00000000000000000000000000000000000CB
++:1045F00000000000000000000000000000000000BB
++:1046000000000000000000000000000000000000AA
++:10461000000000000000000000000000000000009A
++:10462000000000000000000000000000000000008A
++:10463000000000000000000000000000000000007A
++:104640000000000010000003000000000000000D4A
++:104650000000000D3C020800244217203C03080023
++:1046600024632A10AC4000000043202B1480FFFD7F
++:10467000244200043C1D080037BD2FFC03A0F0219C
++:104680003C100800261004903C1C0800279C1720B2
++:104690000E000262000000000000000D2402FF80F6
++:1046A00027BDFFE000821024AFB00010AF42002011
++:1046B000AFBF0018AFB10014936500043084007FD1
++:1046C000034418213C0200080062182130A5002094
++:1046D000036080213C080111277B000814A0000220
++:1046E0002466005C2466005892020004974301048B
++:1046F000920400043047000F3063FFFF3084004015
++:10470000006728231080000900004821920200055C
++:1047100030420004104000050000000010A000031B
++:104720000000000024A5FFFC2409000492020005FB
++:1047300030420004104000120000000010A00010E1
++:10474000000000009602000200A72021010440257D
++:104750002442FFFEA7421016920300042402FF80A9
++:1047600000431024304200FF104000033C020400CC
++:104770000A000174010240258CC20000AF421018EB
++:104780008F4201780440FFFE2402000AA742014044
++:1047900096020002240400093042000700021023A0
++:1047A00030420007A7420142960200022442FFFE67
++:1047B000A7420144A740014697420104A74201488D
++:1047C0008F420108304200205040000124040001C3
++:1047D00092020004304200101440000234830010A2
++:1047E00000801821A743014A0000000000000000DB
++:1047F0000000000000000000AF48100000000000B2
++:104800000000000000000000000000008F421000C7
++:104810000441FFFE3102FFFF1040000700000000CE
++:1048200092020004304200401440000300000000E7
++:104830008F421018ACC20000960200063042FFFF03
++:10484000244200020002104300021040036288214B
++:10485000962200001120000D3044FFFF00A7102118
++:104860008F8300388F45101C0002108200021080D8
++:1048700000431021AC45000030A6FFFF0E00058D5F
++:1048800000052C0200402021A62200009203000413
++:104890002402FF8000431024304200FF1040001F1C
++:1048A0000000000092020005304200021040001B90
++:1048B000000000009742100C2442FFFEA742101691
++:1048C000000000003C02040034420030AF421000FF
++:1048D00000000000000000000000000000000000D8
++:1048E0008F4210000441FFFE000000009742100CB0
++:1048F0008F45101C3042FFFF24420030000210821E
++:1049000000021080005B1021AC45000030A6FFFFC4
++:104910000E00058D00052C02A62200009604000260
++:10492000248400080E0001E93084FFFF974401044D
++:104930000E0001F73084FFFF8FBF00188FB1001405
++:104940008FB000103C02100027BD002003E00008DB
++:10495000AF4201783084FFFF308200078F8500244A
++:1049600010400002248300073064FFF800A41021E7
++:1049700030421FFF03421821247B4000AF850028EE
++:10498000AF82002403E00008AF4200843084FFFFC0
++:104990003082000F8F85002C8F860034104000027B
++:1049A0002483000F3064FFF000A410210046182B70
++:1049B000AF8500300046202314600002AF82002C37
++:1049C000AF84002C8F82002C340480000342182115
++:1049D00000641821AF83003803E00008AF42008074
++:1049E0008F820014104000088F8200048F82FFDC49
++:1049F000144000058F8200043C02FFBF3442FFFFD9
++:104A0000008220248F82000430430006240200022A
++:104A10001062000F3C0201012C62000350400005AF
++:104A2000240200041060000F3C0200010A00023062
++:104A30000000000010620005240200061462000C51
++:104A40003C0201110A000229008210253C020011DB
++:104A500000821025AF421000240200010A0002303B
++:104A6000AF82000C00821025AF421000AF80000C16
++:104A700000000000000000000000000003E000084B
++:104A8000000000008F82000C1040000400000000B5
++:104A90008F4210000441FFFE0000000003E0000808
++:104AA000000000008F8200102443F800000231C291
++:104AB00024C2FFF02C6303011060000300021042C7
++:104AC0000A000257AC8200008F85001800C5102B29
++:104AD0001440000B0000182100C5102324470001DA
++:104AE0008F82001C00A210212442FFFF0046102BE1
++:104AF000544000042402FFFF0A000257AC87000064
++:104B00002402FFFF0A000260AC8200008C820000D9
++:104B10000002194000621821000318800062182169
++:104B2000000318803C0208002442175C0062182130
++:104B300003E000080060102127BDFFD8AFBF0020B0
++:104B4000AFB1001CAFB000183C0460088C8250006C
++:104B50002403FF7F3C066000004310243442380CDD
++:104B6000AC8250008CC24C1C3C1A80000002160221
++:104B70003042000F10400007AF82001C8CC34C1C59
++:104B80003C02001F3442FC0000621824000319C2DA
++:104B9000AF8300188F420008275B400034420001B9
++:104BA000AF420008AF8000243C02601CAF40008090
++:104BB000AF4000848C4500088CC308083402800094
++:104BC000034220212402FFF0006218243C020080EE
++:104BD0003C010800AC2204203C025709AF84003895
++:104BE00014620004AF850034240200010A0002921E
++:104BF000AF820014AF8000148F42000038420001E1
++:104C0000304200011440FFFC8F8200141040001657
++:104C10000000000097420104104000058F8300004F
++:104C2000146000072462FFFF0A0002A72C62000A3A
++:104C30002C620010504000048F83000024620001A9
++:104C4000AF8200008F8300002C62000A1440000332
++:104C50002C6200070A0002AEAF80FFDC10400002A9
++:104C600024020001AF82FFDC8F4301088F44010062
++:104C700030622000AF83000410400008AF840010B1
++:104C80003C0208008C42042C244200013C01080034
++:104C9000AC22042C0A00058A3C0240003065020068
++:104CA00014A0000324020F001482026024020D00ED
++:104CB00097420104104002C83C02400030624000AC
++:104CC000144000AD8F8200388C4400088F42017878
++:104CD0000440FFFE24020800AF42017824020008CD
++:104CE000A7420140A7400142974201048F8400047B
++:104CF0003051FFFF30820001104000070220802168
++:104D00002623FFFE240200023070FFFFA742014667
++:104D10000A0002DBA7430148A74001463C02080005
++:104D20008C42043C1440000D8F8300103082002020
++:104D30001440000224030009240300010060202124
++:104D40008F830010240209005062000134840004A3
++:104D5000A744014A0A0002F60000000024020F00E6
++:104D60001462000530820020144000062403000D68
++:104D70000A0002F524030005144000022403000980
++:104D800024030001A743014A3C0208008C4204208E
++:104D90003C0400480E00020C004420250E000235A1
++:104DA000000000008F82000C1040003E0000000058
++:104DB0008F4210003C0300200043102410400039B3
++:104DC0008F820004304200021040003600000000D4
++:104DD000974210141440003300000000974210085E
++:104DE0008F8800383042FFFF2442000600021882FC
++:104DF0000003388000E83021304300018CC40000FB
++:104E000010600004304200030000000D0A00033768
++:104E100000E81021544000103084FFFF3C05FFFFE4
++:104E200000852024008518260003182B0004102B71
++:104E300000431024104000050000000000000000A6
++:104E40000000000D00000000240002228CC20000BF
++:104E50000A000336004520253883FFFF0003182B86
++:104E60000004102B00431024104000050000000037
++:104E7000000000000000000D000000002400022BD4
++:104E80008CC200003444FFFF00E81021AC44000055
++:104E90003C0208008C420430244200013C0108001E
++:104EA000AC2204308F6200008F840038AF8200088B
++:104EB0008C8300003402FFFF1462000F00001021F9
++:104EC0003C0508008CA504543C0408008C84045064
++:104ED00000B0282100B0302B008220210086202144
++:104EE0003C010800AC2504543C010800AC240450EB
++:104EF0000A000580240400088C8200003042010072
++:104F00001040000F000010213C0508008CA5044C47
++:104F10003C0408008C84044800B0282100B0302BE9
++:104F200000822021008620213C010800AC25044C91
++:104F30003C010800AC2404480A0005802404000851
++:104F40003C0508008CA504443C0408008C84044003
++:104F500000B0282100B0302B0082202100862021C3
++:104F60003C010800AC2504443C010800AC2404408A
++:104F70000A000580240400088F6200088F62000088
++:104F800000021602304300F02402003010620005D7
++:104F900024020040106200E08F8200200A00058891
++:104FA0002442000114A000050000000000000000E1
++:104FB0000000000D00000000240002568F4201781E
++:104FC0000440FFFE000000000E00023D27A4001078
++:104FD0001440000500408021000000000000000D8A
++:104FE000000000002400025D8E0200001040000559
++:104FF00000000000000000000000000D00000000A4
++:10500000240002608F62000C0443000324020001AC
++:105010000A00042EAE000000AE0200008F820038AD
++:105020008C480008A20000078F65000C8F64000404
++:1050300030A3FFFF0004240200852023308200FFFC
++:105040000043102124420005000230832CC200815D
++:10505000A605000A14400005A20400040000000098
++:105060000000000D00000000240002788F85003849
++:105070000E0005AB260400148F6200048F43010864
++:10508000A60200083C02100000621824106000080C
++:105090000000000097420104920300072442FFEC45
++:1050A000346300023045FFFF0A0003C3A203000778
++:1050B000974201042442FFF03045FFFF96060008A6
++:1050C0002CC200135440000592030007920200070F
++:1050D00034420001A20200079203000724020001EB
++:1050E00010620005240200031062000B8F8200385A
++:1050F0000A0003E030C6FFFF8F8200383C04FFFF48
++:105100008C43000C0064182400651825AC43000C87
++:105110000A0003E030C6FFFF3C04FFFF8C43001091
++:105120000064182400651825AC43001030C6FFFF4A
++:1051300024C2000200021083A20200058F830038FF
++:10514000304200FF00021080004328218CA800009C
++:105150008CA2000024030004000217021443001272
++:1051600000000000974201043C03FFFF01031824E4
++:105170003042FFFF004610232442FFFE006240251C
++:10518000ACA8000092030005306200FF000210800E
++:1051900000501021904200143042000F00431021B3
++:1051A0000A000415A20200068CA400049742010420
++:1051B0009603000A3088FFFF3042FFFF00461023AD
++:1051C0002442FFD60002140001024025ACA80004CE
++:1051D000920200079204000524630028000318834C
++:1051E0000064182134420004A2030006A202000752
++:1051F0008F8200042403FFFB34420002004310248A
++:10520000AF820004920300068F87003800031880E5
++:10521000007010218C4400203C02FFF63442FFFF56
++:105220000082402400671821AE04000CAC68000C1A
++:10523000920500063C03FF7F8E02000C00052880CB
++:1052400000B020213463FFFF01033024948800263E
++:1052500000A7282100431024AE02000CAC860020D9
++:10526000AC880024ACA8001024020010A742014022
++:1052700024020002A7400142A7400144A742014680
++:10528000974201043C0400082442FFFEA742014863
++:10529000240200010E00020CA742014A9603000AF4
++:1052A0009202000400431021244200023042000711
++:1052B00000021023304200070E000235AE0200103B
++:1052C0008F6200003C0308008C6304442404001037
++:1052D000AF820008974201043042FFFF2442FFFEE4
++:1052E00000403821000237C33C0208008C420440D1
++:1052F000006718210067282B004610210045102167
++:105300003C010800AC2304443C010800AC220440EA
++:105310000A0005150000000014A0000500000000B0
++:10532000000000000000000D000000002400030A3F
++:105330008F4201780440FFFE000000000E00023D95
++:1053400027A4001414400005004080210000000044
++:105350000000000D00000000240003118E02000078
++:105360005440000692020007000000000000000DFB
++:10537000000000002400031C9202000730420004D9
++:10538000104000058F8200042403FFFB344200021A
++:1053900000431024AF8200048F620004044300081D
++:1053A00092020007920200068E03000CAE0000007D
++:1053B0000002108000501021AC4300209202000730
++:1053C00030420004544000099602000A920200058F
++:1053D0003C03000100021080005010218C46001890
++:1053E00000C33021AC4600189602000A9206000461
++:1053F000277100080220202100C2302124C60005A8
++:10540000260500140E0005AB00063082920400064B
++:105410008F6500043C027FFF000420800091202162
++:105420008C8300043442FFFF00A228240065182169
++:10543000AC8300049202000792040005920300046A
++:10544000304200041040001496070008308400FF2A
++:1054500000042080009120218C86000497420104E2
++:105460009605000A306300FF3042FFFF0043102121
++:105470000045102130E3FFFF004310232442FFD8F2
++:1054800030C6FFFF0002140000C23025AC860004C5
++:105490000A0004C992030007308500FF0005288038
++:1054A00000B128218CA4000097420104306300FF62
++:1054B0003042FFFF00431021004710233C03FFFF51
++:1054C000008320243042FFFF00822025ACA400008E
++:1054D0009203000724020001106200060000000091
++:1054E0002402000310620011000000000A0004EC16
++:1054F0008E03001097420104920300049605000AEF
++:105500008E24000C00431021004510212442FFF29C
++:105510003C03FFFF008320243042FFFF0082202550
++:10552000AE24000C0A0004EC8E0300109742010424
++:10553000920300049605000A8E24001000431021F7
++:10554000004510212442FFEE3C03FFFF008320248E
++:105550003042FFFF00822025AE2400108E03001091
++:105560002402000AA7420140A74301429603000A11
++:10557000920200043C04004000431021A742014471
++:10558000A740014697420104A742014824020001B6
++:105590000E00020CA742014A0E0002350000000076
++:1055A0008F6200009203000400002021AF820008F7
++:1055B000974201049606000A3042FFFF006218215C
++:1055C000006028213C0308008C6304443C0208006E
++:1055D0008C42044000651821004410210065382BDE
++:1055E000004710213C010800AC2304443C010800A2
++:1055F000AC22044092040004008620212484000A86
++:105600003084FFFF0E0001E9000000009744010410
++:105610003084FFFF0E0001F7000000003C02100084
++:10562000AF4201780A0005878F820020148200278C
++:105630003062000697420104104000673C024000BF
++:105640003062400010400005000000000000000033
++:105650000000000D00000000240004208F420178AB
++:105660000440FFFE24020800AF4201782402000833
++:10567000A7420140A74001428F82000497430104E2
++:1056800030420001104000073070FFFF2603FFFE8C
++:1056900024020002A7420146A74301480A00053F31
++:1056A0002402000DA74001462402000DA742014A32
++:1056B0008F62000024040008AF8200080E0001E998
++:1056C000000000000A0005190200202110400042DD
++:1056D0003C02400093620000304300F024020010BE
++:1056E0001062000524020070106200358F820020D5
++:1056F0000A000588244200018F62000097430104DC
++:105700003050FFFF3071FFFF8F4201780440FFFEF1
++:105710003202000700021023304200072403000A6F
++:105720002604FFFEA7430140A7420142A7440144CB
++:10573000A7400146A75101488F420108304200208E
++:10574000144000022403000924030001A743014A76
++:105750000E00020C3C0400400E0002350000000068
++:105760003C0708008CE70444021110212442FFFE8C
++:105770003C0608008CC604400040182100E3382194
++:10578000000010218F65000000E3402B00C2302193
++:105790002604000800C830213084FFFFAF850008D0
++:1057A0003C010800AC2704443C010800AC2604403E
++:1057B0000E0001E9000000000A0005190220202166
++:1057C0000E00013B000000008F82002024420001F7
++:1057D000AF8200203C024000AF4201380A00029232
++:1057E000000000003084FFFF30C6FFFF00052C00E2
++:1057F00000A628253882FFFF004510210045282BF0
++:105800000045102100021C023042FFFF004310211E
++:1058100000021C023042FFFF004310213842FFFF0C
++:1058200003E000083042FFFF3084FFFF30A5FFFF98
++:1058300000001821108000070000000030820001E5
++:105840001040000200042042006518210A0005A152
++:105850000005284003E000080060102110C0000689
++:1058600024C6FFFF8CA2000024A50004AC82000027
++:105870000A0005AB2484000403E0000800000000D7
++:1058800010A0000824A3FFFFAC8600000000000069
++:10589000000000002402FFFF2463FFFF1462FFFAF0
++:1058A0002484000403E00008000000000000000160
++:1058B0000A00002A00000000000000000000000DA7
++:1058C000747870362E322E3162000000060201001C
++:1058D00000000000000001360000EA600000000047
++:1058E00000000000000000000000000000000000B8
++:1058F00000000000000000000000000000000000A8
++:105900000000000000000000000000000000000097
++:105910000000001600000000000000000000000071
++:105920000000000000000000000000000000000077
++:105930000000000000000000000000000000000067
++:1059400000000000000000000000138800000000BC
++:10595000000005DC00000000000000001000000353
++:10596000000000000000000D0000000D3C020800D7
++:1059700024423D683C0308002463401CAC40000006
++:105980000043202B1480FFFD244200043C1D08002E
++:1059900037BD7FFC03A0F0213C100800261000A8B2
++:1059A0003C1C0800279C3D680E00044E00000000CF
++:1059B0000000000D27BDFFB4AFA10000AFA200049E
++:1059C000AFA30008AFA4000CAFA50010AFA6001451
++:1059D000AFA70018AFA8001CAFA90020AFAA0024F1
++:1059E000AFAB0028AFAC002CAFAD0030AFAE003491
++:1059F000AFAF0038AFB8003CAFB90040AFBC004417
++:105A0000AFBF00480E000591000000008FBF0048A6
++:105A10008FBC00448FB900408FB8003C8FAF003876
++:105A20008FAE00348FAD00308FAC002C8FAB0028D0
++:105A30008FAA00248FA900208FA8001C8FA7001810
++:105A40008FA600148FA500108FA4000C8FA3000850
++:105A50008FA200048FA1000027BD004C3C1B6004F6
++:105A60008F7A5030377B502803400008AF7A00000F
++:105A70008F86003C3C0390003C0280000086282575
++:105A800000A32025AC4400203C0380008C6700204C
++:105A900004E0FFFE0000000003E00008000000003A
++:105AA0000A000070240400018F85003C3C04800043
++:105AB0003483000100A3102503E00008AC8200201D
++:105AC00003E00008000010213084FFFF30A5FFFF35
++:105AD00010800007000018213082000110400002F1
++:105AE00000042042006518211480FFFB00052840B7
++:105AF00003E000080060102110C000070000000053
++:105B00008CA2000024C6FFFF24A50004AC82000084
++:105B100014C0FFFB2484000403E000080000000020
++:105B200010A0000824A3FFFFAC86000000000000C6
++:105B3000000000002402FFFF2463FFFF1462FFFA4D
++:105B40002484000403E000080000000090AA003153
++:105B50008FAB00108CAC00403C0300FF8D6800044C
++:105B6000AD6C00208CAD004400E060213462FFFF8A
++:105B7000AD6D00248CA700483C09FF000109C0243A
++:105B8000AD6700288CAE004C0182C824031978252B
++:105B9000AD6F0004AD6E002C8CAD0038314A00FFB3
++:105BA000AD6D001C94A900323128FFFFAD680010D4
++:105BB00090A70030A5600002A1600004A16700006A
++:105BC00090A30032306200FF0002198210600005CD
++:105BD000240500011065000E0000000003E000082D
++:105BE000A16A00018CD80028354A0080AD780018E1
++:105BF0008CCF0014AD6F00148CCE0030AD6E000859
++:105C00008CC4002CA16A000103E00008AD64000C04
++:105C10008CCD001CAD6D00188CC90014AD6900144A
++:105C20008CC80024AD6800088CC70020AD67000C4C
++:105C30008CC200148C8300700043C82B1320000713
++:105C4000000000008CC20014144CFFE400000000AF
++:105C5000354A008003E00008A16A00018C820070D0
++:105C60000A0000E6000000009089003027BDFFF820
++:105C70008FA8001CA3A900008FA300003C0DFF808B
++:105C800035A2FFFF8CAC002C00625824AFAB0000A3
++:105C9000A100000400C05821A7A000028D06000446
++:105CA00000A048210167C8218FA500000080502175
++:105CB0003C18FF7F032C20263C0E00FF2C8C00019B
++:105CC000370FFFFF35CDFFFF3C02FF0000AFC824B8
++:105CD00000EDC02400C27824000C1DC003236825F9
++:105CE00001F87025AD0D0000AD0E00048D240024D8
++:105CF000AFAD0000AD0400088D2C00202404FFFF90
++:105D0000AD0C000C9547003230E6FFFFAD060010E9
++:105D10009145004830A200FF000219C25060000106
++:105D20008D240034AD0400148D4700388FAA00186C
++:105D300027BD0008AD0B0028AD0A0024AD07001CEC
++:105D4000AD00002CAD00001803E00008AD000020FD
++:105D500027BDFFE0AFB20018AFB10014AFB0001024
++:105D6000AFBF001C9098003000C088213C0D00FFA0
++:105D7000330F007FA0CF0000908E003135ACFFFFC5
++:105D80003C0AFF00A0CE000194A6001EA220000441
++:105D90008CAB00148E29000400A08021016C282403
++:105DA000012A40240080902101052025A62600021A
++:105DB000AE24000426050020262400080E000092D0
++:105DC00024060002924700302605002826240014ED
++:105DD00000071E000003160324060004044000030D
++:105DE0002403FFFF965900323323FFFF0E00009279
++:105DF000AE230010262400248FBF001C8FB2001891
++:105E00008FB100148FB00010240500030000302172
++:105E10000A00009C27BD002027BDFFD8AFB1001CA1
++:105E2000AFB00018AFBF002090A9003024020001DD
++:105E300000E050213123003F00A040218FB00040FE
++:105E40000080882100C04821106200148FA700380C
++:105E5000240B000500A0202100C02821106B001396
++:105E6000020030210E000128000000009225007C75
++:105E700030A400021080000326030030AE00003082
++:105E8000260300348FBF00208FB1001C8FB0001894
++:105E90000060102103E0000827BD00280E0000A7C5
++:105EA000AFB000100A00016F000000008FA3003C9B
++:105EB000010020210120282101403021AFA3001042
++:105EC0000E0000EEAFB000140A00016F00000000E9
++:105ED0003C06800034C20E008C4400108F850044C4
++:105EE000ACA400208C43001803E00008ACA30024FD
++:105EF0003C06800034C20E008C4400148F850044A0
++:105F0000ACA400208C43001C03E00008ACA30024D8
++:105F10009382000C1040001B2483000F2404FFF028
++:105F20000064382410E00019978B00109784000E4D
++:105F30009389000D3C0A601C0A0001AC01644023F7
++:105F400001037021006428231126000231C2FFFFE3
++:105F500030A2FFFF0047302B50C0000E00E4482164
++:105F60008D4D000C31A3FFFF00036400000C2C03D7
++:105F700004A1FFF30000302130637FFF0A0001A479
++:105F80002406000103E00008000000009784000ED2
++:105F900000E448213123FFFF3168FFFF0068382B00
++:105FA00054E0FFF8A783000E938A000D114000050E
++:105FB000240F0001006BC023A380000D03E0000844
++:105FC000A798000E006BC023A38F000D03E000080C
++:105FD000A798000E03E000080000000027BDFFE8BE
++:105FE000AFB000103C10800036030140308BFFFF43
++:105FF00093AA002BAFBF0014A46B000436040E005C
++:106000009488001630C600FF8FA90030A4680006EF
++:10601000AC650008A0660012A46A001AAC670020F4
++:106020008FA5002CA4690018012020210E000198E2
++:10603000AC6500143C021000AE0201788FBF001462
++:106040008FB0001003E0000827BD00188F85000006
++:106050002484000727BDFFF83084FFF83C06800049
++:1060600094CB008A316AFFFFAFAA00008FA900001D
++:10607000012540232507FFFF30E31FFF0064102B9D
++:106080001440FFF700056882000D288034CC4000E2
++:1060900000AC102103E0000827BD00088F8200003B
++:1060A0002486000730C5FFF800A2182130641FFFC6
++:1060B00003E00008AF8400008F87003C8F84004419
++:1060C00027BDFFB0AFB70044AFB40038AFB1002C6C
++:1060D000AFBF0048AFB60040AFB5003CAFB300342F
++:1060E000AFB20030AFB000283C0B80008C8600249B
++:1060F000AD6700808C8A002035670E00356901008D
++:10610000ACEA00108C8800248D2500040000B82122
++:10611000ACE800188CE3001000A688230000A02142
++:10612000ACE300148CE20018ACE2001C122000FE6C
++:1061300000E0B021936C0008118000F40000000022
++:10614000976F001031EEFFFF022E682B15A000EFB5
++:1061500000000000977200103250FFFFAED0000028
++:106160003C0380008C740000329300081260FFFD35
++:106170000000000096D800088EC700043305FFFF1A
++:1061800030B5000112A000E4000000000000000D86
++:1061900030BFA0402419004013F9011B30B4A00007
++:1061A000128000DF000000009373000812600008F6
++:1061B00000000000976D001031ACFFFF00EC202BB9
++:1061C0001080000330AE004011C000D50000000078
++:1061D000A7850040AF87003893630008022028217C
++:1061E000AFB10020146000F527B40020AF60000CB0
++:1061F000978F004031F14000162000022403001662
++:106200002403000E24054007A363000AAF650014B1
++:10621000938A00428F70001431550001001512401E
++:1062200002024825AF690014979F00408F78001440
++:1062300033F9001003194025AF680014979200400D
++:106240003247000810E0016E000000008F67001464
++:106250003C1210003C11800000F27825AF6F001452
++:1062600036230E00946E000A3C0D81002406000EB9
++:1062700031CCFFFF018D2025AF640004A36600022E
++:106280009373000A3406FFFC266B0004A36B000A1C
++:1062900097980040330820001100015F00000000C3
++:1062A0003C05800034A90E00979900409538000CF9
++:1062B00097870040001940423312C00031030003A9
++:1062C00000127B0330F11000006F6825001172038B
++:1062D00001AE6025000C20C0A76400129793004017
++:1062E000936A000A001359823175003C02AA1021FA
++:1062F0002450003CA3700009953F000C33F93FFF88
++:10630000A779001097700012936900090130F821F5
++:1063100027E5000230B900070019C0233308000741
++:10632000A368000B9371000997720012976F001019
++:10633000322700FF8F910038978D004000F218211E
++:10634000006F702101C6602131A6004010C0000519
++:106350003185FFFF00B1102B3C1280001040001768
++:10636000000098210225A82B56A0013E8FA50020F1
++:106370003C048000348A0E008D5300143C068000DB
++:10638000AD5300108D4B001CAD4B0018AD45000007
++:106390008CCD000031AC00081180FFFD34CE0E0022
++:1063A00095C3000800A0882100009021A783004029
++:1063B0008DC6000424130001AF860038976F0010CB
++:1063C00031F5FFFF8E9F000003F1282310A0011F6D
++:1063D000AE85000093620008144000DD000000005C
++:1063E0000E0001E7240400108F900048004028218F
++:1063F0003C023200320600FF000654000142F8253C
++:1064000026090001AF890048ACBF0000937900095C
++:1064100097780012936F000A332800FF3303FFFFC1
++:106420000103382100076C0031EE00FF01AE60254A
++:10643000ACAC00048F840048978B0040316A200088
++:106440001140010AACA4000897640012308BFFFFD2
++:1064500006400108ACAB000C978E004031C5000827
++:1064600014A0000226280006262800023C1F8000F7
++:1064700037E70E0094F900148CE5001C8F670004C8
++:10648000937800023324FFFF330300FFAFA3001013
++:106490008F6F0014AFA800180E0001CBAFAF00142F
++:1064A000240400100E0001FB000000008E9200008A
++:1064B00016400005000000008F7800142403FFBF81
++:1064C0000303A024AF7400148F67000C00F5C821EB
++:1064D000AF79000C9375000816A0000800000000BA
++:1064E00012600006000000008F6800143C0AEFFFF5
++:1064F0003549FFFE0109F824AF7F0014A37300089B
++:106500008FA500200A00034F02202021AED10000F9
++:106510000A00022D3C03800014E0FF1E30BFA040A3
++:106520000E0001900000A0212E9100010237B0253D
++:1065300012C000188FBF00488F87003C24170F003F
++:1065400010F700D43C0680008CD901780720FFFEAC
++:10655000241F0F0010FF00F634CA0E008D560014E1
++:1065600034C7014024080240ACF600048D49001CE9
++:106570003C141000ACE90008A0E00012A4E0001AEE
++:10658000ACE00020A4E00018ACE80014ACD4017822
++:106590008FBF00488FB700448FB600408FB5003CD6
++:1065A0008FB400388FB300348FB200308FB1002C1D
++:1065B0008FB0002803E0000827BD00508F910038FD
++:1065C000978800403C1280000220A821310700403B
++:1065D00014E0FF7C00009821977900108F9200381A
++:1065E0003338FFFF131200A8000020210080A021F3
++:1065F000108000F300A088211620FECE00000000CD
++:106600000A00031F2E9100013C0380008C62017878
++:106610000440FFFE240808008F860000AC68017863
++:106620003C038000946D008A31ACFFFF0186582343
++:10663000256AFFFF31441FFF2C8900081520FFF950
++:10664000000000008F8F0048347040008F83003CB2
++:1066500000E0A021240E0F0025E70001AF870048CD
++:1066600000D03021023488233C08800031F500FF3F
++:10667000106E0005240700019398004233130001B7
++:106680000013924036470001001524003C0A010027
++:10669000008A4825ACC900008F82004830BF003610
++:1066A00030B90008ACC200041320009900FF9825FF
++:1066B00035120E009650000A8F8700003C0F8100B3
++:1066C0003203FFFF24ED000835060140006F60250E
++:1066D0003C0E100031AB1FFF269200062405000E71
++:1066E000ACCC0020026E9825A4C5001AAF8B000028
++:1066F000A4D20018162000083C1080008F89003CAE
++:1067000024020F00512200022417000136730040BA
++:106710000E0001883C10800036060E008CCB001461
++:10672000360A014002402021AD4B00048CC5001CFC
++:10673000AD450008A1550012AD5300140E0001989C
++:106740003C151000AE1501780A000352000000004D
++:10675000936F0009976E0012936D000B31E500FFF7
++:1067600000AE202131AC00FF008C80212602000AFF
++:106770003050FFFF0E0001E7020020218F86004805
++:106780003C0341003C05800024CB0001AF8B004856
++:10679000936A00099769001230C600FF315F00FF5D
++:1067A0003128FFFF03E8382124F900020006C40065
++:1067B0000319782501E37025AC4E00008F6D000CA5
++:1067C00034A40E00948B001401B26025AC4C00047C
++:1067D0008C85001C8F670004936A00023164FFFF00
++:1067E000314900FFAFA900108F680014AFB1001845
++:1067F0000E0001CBAFA800140A0002FD0200202108
++:10680000AF600004A36000029798004033082000A6
++:106810001500FEA300003021A760001297840040FD
++:10682000936B000A3C10800030931F0000135183CB
++:10683000014BA82126A20028A362000936090E00F8
++:10684000953F000C0A000295A77F00108F7000147E
++:10685000360900400E000188AF6900140A0002C921
++:10686000000000000A00034F000020210641FEFA4C
++:10687000ACA0000C8CAC000C3C0D8000018D902570
++:106880000A0002EAACB2000C000090210A0002C526
++:1068900024130001128000073C028000344B0E00DC
++:1068A0009566000830D300401260004900000000E7
++:1068B0003C0680008CD001780600FFFE34C50E0037
++:1068C00094B500103C03050034CC014032B8FFFF02
++:1068D00003039025AD92000C8CAF0014240D200012
++:1068E0003C041000AD8F00048CAE001CAD8E00087F
++:1068F000A1800012A580001AAD800020A58000189C
++:10690000AD8D0014ACC401780A0003263C0680005B
++:106910008F9F0000351801402692000227F90008D9
++:1069200033281FFFA71200180A000391AF88000048
++:106930003C02800034450140ACA0000C1280001BDA
++:1069400034530E0034510E008E370010ACB70004E3
++:106950008E2400183C0B8000ACA400083570014068
++:1069600024040040A20000128FBF0048A600001AB5
++:106970008FB70044AE0000208FB60040A60000187C
++:106980008FB5003CAE0400148FB400388FB30034D0
++:106990008FB200308FB1002C8FB000283C02100065
++:1069A00027BD005003E00008AD6201788E66001438
++:1069B000ACA600048E64001C0A00042A3C0B800074
++:1069C0000E0001902E9100010A0003200237B0252D
++:1069D000000000000000000D00000000240003691A
++:1069E0000A0004013C06800027BDFFD8AFBF00208D
++:1069F0003C0980003C1F20FFAFB200183C0760003C
++:106A000035320E002402001037F9FFFDACE23008E9
++:106A1000AFB3001CAFB10014AFB00010AE5900000E
++:106A20000000000000000000000000000000000066
++:106A3000000000003C1800FF3713FFFDAE530000BC
++:106A40003C0B60048D7050002411FF7F3C0E00024F
++:106A50000211782435EC380C35CD0109ACED4C1819
++:106A6000240A0009AD6C50008CE80438AD2A0008F7
++:106A7000AD2000148CE54C1C3106FFFF38C42F718B
++:106A800000051E023062000F2486C0B310400007CC
++:106A9000AF8200088CE54C1C3C09001F3528FC0027
++:106AA00000A81824000321C2AF8400048CF1080858
++:106AB0003C0F57092412F0000232702435F0001008
++:106AC00001D0602601CF68262DAA00012D8B000180
++:106AD000014B382550E00009A380000C3C1F601CCE
++:106AE0008FF8000824190001A399000C33137C00CF
++:106AF000A7930010A780000EA380000DAF80004870
++:106B000014C00003AF8000003C066000ACC0442C01
++:106B10000E0005B93C1080000E000F1A361101005E
++:106B20003C12080026523DD03C13080026733E500C
++:106B30008E03000038640001308200011440FFFC25
++:106B40003C0B800A8E2600002407FF8024C90240E7
++:106B5000312A007F014B402101272824AE06002066
++:106B6000AF880044AE0500243C048000AF86003CA2
++:106B70008C8C01780580FFFE24180800922F0008F5
++:106B8000AC980178A38F0042938E004231CD000172
++:106B900011A0000F24050D0024DFF8002FF90301D8
++:106BA0001320001C000629C224A4FFF00004104298
++:106BB000000231400E00020200D2D8213C02400007
++:106BC0003C068000ACC201380A0004A000000000AE
++:106BD00010C50023240D0F0010CD00273C1F800896
++:106BE00037F9008093380000240E0050330F00FF67
++:106BF00015EEFFF33C0240000E000A3600000000D4
++:106C00003C0240003C068000ACC201380A0004A0EF
++:106C1000000000008F83000400A3402B1500000B30
++:106C20008F8B0008006B50212547FFFF00E5482BA4
++:106C30001520000600A36023000C19400E0002027C
++:106C40000073D8210A0004C43C0240000000000D7B
++:106C50000E000202000000000A0004C43C024000D2
++:106C60003C1B0800277B3F500E0002020000000082
++:106C70000A0004C43C0240003C1B0800277B3F7014
++:106C80000E000202000000000A0004C43C024000A2
++:106C90003C0660043C09080025290104ACC9502CBD
++:106CA0008CC850003C0580003C0200023507008083
++:106CB000ACC750003C040800248415A43C03080021
++:106CC0002463155CACA50008ACA2000C3C010800D4
++:106CD000AC243D603C010800AC233D6403E00008A7
++:106CE0002402000100A030213C1C0800279C3D68C4
++:106CF0003C0C04003C0B0002008B3826008C402624
++:106D00002CE200010007502B2D050001000A4880ED
++:106D10003C03080024633D60004520250123182121
++:106D20001080000300001021AC6600002402000166
++:106D300003E00008000000003C1C0800279C3D68A0
++:106D40003C0B04003C0A0002008A3026008B3826E7
++:106D50002CC200010006482B2CE5000100094080F0
++:106D60003C03080024633D600045202501031821F1
++:106D700010800005000010213C0C0800258C155CDB
++:106D8000AC6C00002402000103E0000800000000D9
++:106D90003C0900023C08040000883026008938269F
++:106DA0002CC30001008028212CE400010083102561
++:106DB0001040000B000030213C1C0800279C3D685F
++:106DC0003C0A80008D4E00082406000101CA682597
++:106DD000AD4D00088D4C000C01855825AD4B000CC5
++:106DE00003E0000800C010213C1C0800279C3D68FF
++:106DF0003C0580008CA6000C000420272402000122
++:106E000000C4182403E00008ACA3000C3C020002FC
++:106E10001082000B3C0560003C0704001087000353
++:106E20000000000003E00008000000008CA908D06A
++:106E3000240AFFFD012A402403E00008ACA808D082
++:106E40008CA408D02406FFFE0086182403E0000866
++:106E5000ACA308D03C05601A34A600108CC3008097
++:106E600027BDFFF88CC50084AFA3000093A40000E9
++:106E70002402000110820003AFA5000403E0000813
++:106E800027BD000893A7000114E0001497AC00028E
++:106E900097B800023C0F8000330EFFFC01CF682141
++:106EA000ADA50000A3A000003C0660008CC708D080
++:106EB0002408FFFE3C04601A00E82824ACC508D072
++:106EC0008FA300048FA200003499001027BD000892
++:106ED000AF22008003E00008AF2300843C0B800059
++:106EE000318AFFFC014B48218D2800000A00057DF6
++:106EF000AFA8000427BDFFE8AFBF00103C1C08008E
++:106F0000279C3D683C0580008CA4000C8CA20004EA
++:106F10003C0300020044282410A0000A00A3182407
++:106F20003C0604003C0400021460000900A6102482
++:106F30001440000F3C0404000000000D3C1C08003D
++:106F4000279C3D688FBF001003E0000827BD001894
++:106F50003C0208008C423D600040F809000000003F
++:106F60003C1C0800279C3D680A0005A68FBF001046
++:106F70003C0208008C423D640040F809000000001B
++:106F80000A0005AC00000000000411C003E0000886
++:106F9000244202403C04080024843FB42405001A23
++:106FA0000A00009C0000302127BDFFE0AFB00010B8
++:106FB0003C108000AFBF0018AFB1001436110100C3
++:106FC000922200090E0005B63044007F8E3F00007B
++:106FD0008F89003C3C0F008003E26021258800403F
++:106FE0000049F821240DFF80310E00783198007897
++:106FF00035F9000135F100020319382501D1482582
++:10700000010D302403ED5824018D2824240A00406A
++:1070100024040080240300C0AE0B0024AE0008103E
++:10702000AE0A0814AE040818AE03081CAE05080426
++:10703000AE070820AE060808AE0908243609090084
++:107040009539000C3605098033ED007F3338FFFF9A
++:10705000001889C0AE110800AE0F0828952C000C4E
++:107060008FBF00188FB10014318BFFFF000B51C090
++:10707000AE0A002C8CA400508FB000108CA3003CF2
++:107080008D2700048CA8001C8CA600383C0E800ABA
++:1070900001AE102127BD0020AF820044AF84005014
++:1070A000AF830054AF87004CAF88005C03E000085A
++:1070B000AF8600603C09080091293FD924A800024E
++:1070C0003C05110000093C0000E8302500C51825EA
++:1070D00024820008AC83000003E00008AC800004B8
++:1070E0003C098000352309009128010B906A0011AA
++:1070F0002402002800804821314700FF00A07021B1
++:1071000000C068213108004010E20002340C86DD26
++:10711000240C08003C0A800035420A9A944700007B
++:10712000354B0A9C35460AA030F9FFFFAD39000007
++:107130008D780000354B0A8024040001AD3800042E
++:107140008CCF0000AD2F00089165001930A300031B
++:107150001064009028640002148000AF240500022F
++:107160001065009E240F0003106F00B435450AA47B
++:10717000240A0800118A0048000000005100003D68
++:107180003C0B80003C0480003483090090670012AF
++:1071900030E200FF004D7821000FC8802724000155
++:1071A0003C0A8000354F090091E50019354C0980F3
++:1071B0008D87002830A300FF0003150000475825E5
++:1071C0000004C4003C19600001793025370806FF2F
++:1071D000AD260000AD2800048DEA002C25280028EB
++:1071E000AD2A00088DEC0030AD2C000C8DE500348C
++:1071F000AD2500108DE400383C05800034AC093C1E
++:10720000AD2400148DE3001CAD2300188DE7002091
++:10721000AD27001C8DE20024AD2200208DF900284E
++:1072200034A20100AD3900248D830000AD0E0004AE
++:1072300034B90900AD0300008C47000C250200148E
++:10724000AD070008932B00123C04080090843FD83F
++:10725000AD000010317800FF030D302100064F0013
++:1072600000047C00012F702535CDFFFC03E00008F1
++:10727000AD0D000C35780900930600123C0508009E
++:1072800094A53FC830C800FF010D5021000A60805E
++:107290000A00063C018520211500005B000000006B
++:1072A0003C08080095083FCE3C06080094C63FC83D
++:1072B000010610213C0B800035790900933800113C
++:1072C000932A001935660A80330800FF94CF002AFC
++:1072D00000086082314500FF978A0058000C1E00AC
++:1072E000000524003047FFFF006410250047C0253B
++:1072F00001EA30213C0B4000030B402500066400EE
++:10730000AD280000AD2C0004932500183C030006B6
++:107310002528001400053E0000E31025AD220008DA
++:107320008F24002C3C05800034AC093CAD24000CBB
++:107330008F38001C34A20100254F0001AD38001029
++:107340008D830000AD0E000431EB7FFFAD03000024
++:107350008C47000C34B90900A78B0058AD07000812
++:10736000932B00123C04080090843FD8250200149F
++:10737000317800FF030D302100064F0000047C002F
++:10738000012F702535CDFFFCAD00001003E0000893
++:10739000AD0D000C3C02080094423FD23C050800B1
++:1073A00094A53FC835440AA43C07080094E73FC4AD
++:1073B000948B00000045C8210327C023000B1C004C
++:1073C0002706FFF200665025AD2A000CAD20001004
++:1073D000AD2C00140A00063025290018354F0AA4E8
++:1073E00095E50000956400280005140000043C00A9
++:1073F0003459810000EC5825AD39000CAD2B00103C
++:107400000A000630252900143C0C0800958C3FCE5C
++:107410000A000681258200015460FF56240A0800F4
++:1074200035580AA49706000000061C00006C502581
++:10743000AD2A000C0A000630252900103C03080084
++:1074400094633FD23C07080094E73FC83C0F080014
++:1074500095EF3FC494A4000095790028006710219F
++:10746000004F582300041C00001934002578FFEE5B
++:1074700000D87825346A8100AD2A000CAD2F0010A9
++:10748000AD200014AD2C00180A0006302529001C80
++:1074900003E00008240207D027BDFFE0AFB20018C8
++:1074A000AFB10014AFB00010AFBF001C0E00007CE5
++:1074B000008088218F8800548F87004C3C0580080D
++:1074C00034B20080011128213C1080002402008089
++:1074D000240300C000A72023AE0208183C06800841
++:1074E000AE03081C18800004AF850054ACC500042E
++:1074F0008CC90004AF89004C1220000936040980B1
++:107500000E0006F800000000924C00278E0B00745D
++:1075100001825004014B3021AE46000C3604098034
++:107520008C8E001C8F8F005C01CF682319A0000493
++:107530008FBF001C8C90001CAF90005C8FBF001CA4
++:107540008FB200188FB100148FB000100A00007EB7
++:1075500027BD00208F8600508F8300548F82004CFF
++:107560003C05800834A40080AC860050AC83003C0D
++:1075700003E00008ACA200043C0308008C63005444
++:1075800027BDFFF8308400FF2462000130A500FF12
++:107590003C010800AC22005430C600FF3C078000CC
++:1075A0008CE801780500FFFE3C0C7FFFA3A40003DC
++:1075B0008FAA0000358BFFFF014B4824000627C02F
++:1075C00001244025AFA8000034E201009043000AE6
++:1075D000A3A000023C1980FFA3A300018FAF00000D
++:1075E00030AE007F3738FFFF01F86024000E6E00D8
++:1075F0003C0A002034E50140018D58253549200022
++:107600002406FF803C04100027BD0008ACAB000C32
++:10761000ACA90014A4A00018A0A6001203E0000862
++:10762000ACE40178308800FF30A700FF3C03800005
++:107630008C6201780440FFFE3C0C8000358A0A0011
++:107640008D4B00203584014035850980AC8B0004CA
++:107650008D4900240007302B00061540AC89000836
++:10766000A088001090A3004CA083002D03E0000828
++:10767000A480001827BDFFE8308400FFAFBF0010D2
++:107680000E00075D30A500FF8F8300548FBF0010F0
++:107690003C06800034C50140344700402404FF907C
++:1076A0003C02100027BD0018ACA3000CA0A40012DF
++:1076B000ACA7001403E00008ACC2017827BDFFE0CE
++:1076C0003C088008AFBF001CAFB20018AFB1001477
++:1076D000AFB00010351000808E0600183C07800007
++:1076E000309200FF00C72025AE0400180E00007C79
++:1076F00030B100FF92030005346200080E00007EE6
++:10770000A2020005024020210E000771022028215C
++:10771000024020218FBF001C8FB200188FB10014CF
++:107720008FB0001024050005240600010A0007326E
++:1077300027BD00203C05800034A309809066000826
++:1077400030C200081040000F3C0A01013549080A08
++:10775000AC8900008CA80074AC8800043C070800C9
++:1077600090E73FD830E5001050A00008AC8000083A
++:107770003C0D800835AC00808D8B0058AC8B000828
++:107780002484000C03E00008008010210A0007B5E3
++:107790002484000C27BDFFE83C098000AFB0001036
++:1077A000AFBF00143526098090C8000924020006E6
++:1077B00000A05821310300FF3527090000808021F7
++:1077C000240500041062007B2408000294CF005CB2
++:1077D0003C0E020431EDFFFF01AE6025AE0C00004F
++:1077E00090CA00083144002010800008000000000A
++:1077F00090C2004E3C1F010337F90300305800FFD0
++:107800000319302524050008AE06000490F9001184
++:1078100090E6001290E40011333800FF00187082E7
++:1078200030CF00FF01CF5021014B6821308900FF8C
++:1078300031AAFFFF39230028000A60801460002C61
++:10784000020C482390E400123C198000372F0100FD
++:10785000308C00FF018B1821000310800045F821B7
++:10786000001F8400360706FFAD270004373F0900DC
++:1078700093EC001193EE0012372609800005C082B8
++:107880008DE4000C8CC5003431CD00FF01AB10211C
++:107890000058182100A4F8230008840000033F00CA
++:1078A00000F0302533F9FFFF318F00FC00D970253F
++:1078B0000158202101E9682100045080ADAE000C80
++:1078C0000E00007C012A80213C088008240B000463
++:1078D000350500800E00007EA0AB000902001021DB
++:1078E0008FBF00148FB0001003E0000827BD001800
++:1078F00090EC001190E300193C18080097183FCE57
++:10790000318200FF0002F882307000FF001FCE00BD
++:1079100000103C000327302500D870253C0F4000A4
++:1079200001CF68253C198000AD2D0000373F0900CC
++:1079300093EC001193EE0012372F010037260980D7
++:107940000005C0828DE4000C8CC5003431CD00FFF1
++:1079500001AB10210058182100A4F823000884006E
++:1079600000033F0000F0302533F9FFFF318F00FCAA
++:1079700000D970250158202101E9682100045080B8
++:10798000ADAE000C0E00007C012A80213C0880086E
++:10799000240B0004350500800E00007EA0AB00091A
++:1079A000020010218FBF00148FB0001003E0000808
++:1079B00027BD00180A0007C72408001227BDFFD002
++:1079C0003C038000AFB60028AFB50024AFB4002060
++:1079D000AFB10014AFBF002CAFB3001CAFB20018A2
++:1079E000AFB000103467010090E6000B309400FF48
++:1079F00030B500FF30C200300000B02110400099C7
++:107A000000008821346409809088000800082E0056
++:107A100000051E03046000C0240400048F86005487
++:107A20003C010800A0243FD83C0C8000AD800048F9
++:107A30003C048000348E010091CD000B31A5002064
++:107A400010A000073C078000349309809272000860
++:107A50000012860000107E0305E000C43C1F800871
++:107A600034EC0100918A000B34EB09809169000825
++:107A7000314400400004402B3123000800C8982303
++:107A80001460000224120003000090213C108000CA
++:107A900036180A8036040900970E002C90830011D6
++:107AA0009089001293050018307F00FF312800FFF5
++:107AB000024810210002C880930D0018033F78216E
++:107AC00001F1302130B100FF00D11821A78E0058FC
++:107AD0003C010800A4263FCE3C010800A4233FD06F
++:107AE00015A00002000000000000000D920B010B29
++:107AF0003065FFFF3C010800A4233FD2316A0040FB
++:107B00003C010800A4203FC83C010800A4203FC459
++:107B10001140000224A4000A24A4000B3091FFFFAE
++:107B20000E0001E7022020219206010B3C0C080008
++:107B3000958C3FD2004020210006698231A70001C8
++:107B40000E00060101872821004020210260282123
++:107B50000E00060C024030210E0007A1004020213B
++:107B600016C00069004020219212010B32560040DD
++:107B700012C000053C0500FF8C93000034AEFFFFEF
++:107B8000026E8024AC9000000E0001FB0220202138
++:107B90003C0F080091EF3FD831F10003122000168E
++:107BA0003C1380088F8200543C09800835280080EF
++:107BB000245F0001AD1F003C3C0580088CB9000427
++:107BC00003E02021033FC0231B000002AF9F0054AD
++:107BD0008CA400040E0006F8ACA400043C0780004E
++:107BE0008CEB00743C04800834830080004B5021EF
++:107BF000AC6A000C3C1380083670008002802021A3
++:107C000002A02821A200006B0E00075D3C1480003A
++:107C10008F920054368C0140AD92000C8F86004844
++:107C20003C151000344D000624D60001AF960048E4
++:107C30008FBF002CA18600128FB60028AD8D0014D6
++:107C40008FB3001CAE9501788FB200188FB5002459
++:107C50008FB400208FB100148FB0001003E0000833
++:107C600027BD003034640980908F0008000F760033
++:107C7000000E6E0305A00033347F090093F8001B4B
++:107C8000241900103C010800A0393FD8331300022A
++:107C90001260FF678F8600548F8200601446FF6574
++:107CA0003C0480000E00007C000000003C048008C2
++:107CB0003485008090A8000924060016310300FFD7
++:107CC0001066000D0000000090AB00093C070800A2
++:107CD00090E73FD824090008316400FF34EA00012E
++:107CE0003C010800A02A3FD81089002F240C000A6C
++:107CF000108C00282402000C0E00007E0000000002
++:107D00000A0008608F8600540E0007B9024028213F
++:107D10000A0008AE004020213C0B8008356A008034
++:107D20008D4600548CE9000C1120FF3DAF860054B5
++:107D3000240700143C010800A0273FD80A00085F70
++:107D40003C0C800090910008241200023C010800C5
++:107D5000A0323FD8323000201200000B2416000160
++:107D60008F8600540A0008602411000837F800804C
++:107D70008F020038AFE200048FF90004AF19003C15
++:107D80000A00086C3C0780008F8600540A000860D7
++:107D900024110004A0A200090E00007E00000000D3
++:107DA0000A0008608F860054240200140A00093A71
++:107DB000A0A2000927BDFFE8AFB000103C10800072
++:107DC000AFBF001436020100904400090E00075DA9
++:107DD000240500013C0480089099000E3483008043
++:107DE000909F000F906F00269089000A33F800FFE3
++:107DF00000196E000018740031EC00FF01AE502530
++:107E0000000C5A00014B3825312800FF3603014091
++:107E10003445600000E830252402FF813C04100056
++:107E2000AC66000C8FBF0014AC650014A062001299
++:107E3000AE0401788FB0001003E0000827BD0018E1
++:107E400027BDFFE8308400FFAFBF00100E00075DC4
++:107E500030A500FF3C05800034A4014034470040B9
++:107E60002406FF92AC870014A08600128F83005472
++:107E70008FBF00103C02100027BD0018AC83000C1F
++:107E800003E00008ACA2017827BDFFD8AFB0001016
++:107E9000308400FF30B000FF3C058000AFB100141B
++:107EA000AFBF0020AFB3001CAFB20018000410C277
++:107EB00034A60100320300023051000114600007B3
++:107EC00090D200093C098008353300809268000593
++:107ED0003107000810E0000C308A00100240202119
++:107EE0000E00078302202821240200018FBF0020FA
++:107EF0008FB3001C8FB200188FB100148FB0001028
++:107F000003E0000827BD00281540003434A50A000E
++:107F10008CB800248CAF0008130F004B00003821F0
++:107F20003C0D800835B30080926C00682406000286
++:107F3000318B00FF116600843C06800034C20100D2
++:107F40009263004C90590009307F00FF53F9000400
++:107F50003213007C10E00069000000003213007C46
++:107F60005660005C0240202116200009320D0001FD
++:107F70003C0C800035840100358B0A008D6500249F
++:107F80008C86000414A6FFD900001021320D0001D8
++:107F900011A0000E024020213C1880003710010083
++:107FA0008E0F000C8F8E005011EE000800000000B4
++:107FB0000E000843022028218E19000C3C1F800867
++:107FC00037F00080AE190050024020210E000771EA
++:107FD000022028210A00098F240200013C05080024
++:107FE0008CA5006424A400013C010800AC240064BA
++:107FF0001600000D00000000022028210E0007716D
++:1080000002402021926E0068240C000231CD00FF56
++:1080100011AC0022024020210E00094100000000A6
++:108020000A00098F240200010E00007024040001E0
++:10803000926B0025020B30250E00007EA266002503
++:108040000A0009D3022028218E6200188CDF000468
++:108050008CB9002400021E0217F9FFB13065007FC1
++:108060009268004C264400013093007F1265004066
++:10807000310300FF1464FFAB3C0D8008264700016C
++:1080800030F1007F30E200FF1225000B24070001D1
++:10809000004090210A00099C2411000124050004DD
++:1080A0000E000732240600010E0009410000000006
++:1080B0000A00098F240200012405FF8002452024C4
++:1080C00000859026324200FF004090210A00099C62
++:1080D000241100010E00084302202821320700303D
++:1080E00010E0FFA132100082024020210E00078321
++:1080F000022028210A00098F240200018E6900183D
++:108100000240202102202821012640250E0009647A
++:10811000AE6800189264004C240500032406000198
++:108120000E000732308400FF0E00007024040001AE
++:1081300092710025021150250E00007EA26A0025D2
++:108140000A00098F240200018E6F00183C1880007D
++:108150000240202101F87025022028210E0007711D
++:10816000AE6E00189264004C0A000A1B240500043D
++:10817000324A0080394900801469FF6A3C0D80084A
++:108180000A0009F42647000127BDFFC0AFB0001860
++:108190003C108000AFBF0038AFB70034AFB600303E
++:1081A000AFB5002CAFB40028AFB30024AFB20020AD
++:1081B0000E0005BEAFB1001C360201009045000B59
++:1081C0000E00097690440008144000E78FBF003885
++:1081D0003C08800835070080A0E0006B3606098067
++:1081E00090C50000240300503C17080026F73F907C
++:1081F00030A400FF3C13080026733FA01083000347
++:108200003C1080000000B82100009821241F0010BD
++:108210003611010036120A00361509808E580024E6
++:108220008E3400048EAF00208F8C00543C01080077
++:10823000A03F3FD836190A80972B002C8EF60000FD
++:10824000932A00180298702301EC68233C0108006F
++:10825000AC2E3FB43C010800AC2D3FB83C010800F7
++:10826000AC2C3FDCA78B005802C0F809315400FF4A
++:1082700030490002152000E930420001504000C49E
++:108280009227000992A90008312800081500000271
++:10829000241500030000A8213C0A80003543090092
++:1082A00035440A008C8D00249072001190700012E9
++:1082B000907F0011325900FF321100FF02B11021EE
++:1082C0000002C08033EF00FF0319B021028F70213C
++:1082D00002D4602125CB00103C010800A4363FCE1B
++:1082E0003C010800AC2D3FE03C010800A42C3FD02D
++:1082F0003C010800A42B3FCC3556010035540980C1
++:1083000035510E008F8700548F89005C8E850020C8
++:1083100024080006012730233C010800AC283FD484
++:1083200000A7282304C000B50000902104A000B3DA
++:1083300000C5502B114000B5000000003C010800B2
++:10834000AC263FB88E6200000040F8090000000033
++:108350003046000214C0007400408021304B000100
++:10836000556000118E6200043C0D08008DAD3FBCCD
++:108370003C0EC0003C04800001AE6025AE2C000025
++:108380008C980000330F000811E0FFFD0000000092
++:10839000963F000824120001A79F00408E39000478
++:1083A000AF9900388E6200040040F8090000000018
++:1083B0000202802532030002146000B300000000B6
++:1083C0003C09080095293FC43C06080094C63FD0EC
++:1083D0003C0A0800954A3FC63C0708008CE73FBCB2
++:1083E000012670213C0308008C633FE03C08080034
++:1083F00095083FDA01CA20218ED9000C00E9282116
++:10840000249F000200A878210067C02133E4FFFF09
++:10841000AF9900503C010800AC383FE03C01080037
++:10842000A42F3FC83C010800A42E3FD20E0001E754
++:10843000000000008F8D0048004020213C01080012
++:10844000A02D3FD98E62000825AC0001AF8C0048FA
++:108450000040F809000000008F85005402A0302180
++:108460000E00060C004020210E0007A10040202134
++:108470008E6B000C0160F809004020213C0A0800C6
++:10848000954A3FD23C06080094C63FC601464821A3
++:10849000252800020E0001FB3104FFFF3C05080007
++:1084A0008CA53FB43C0708008CE73FBC00A7202305
++:1084B0003C010800AC243FB414800006000000001A
++:1084C0003C0208008C423FD4344B00403C01080081
++:1084D000AC2B3FD4124000438F8E00448E2D0010F1
++:1084E0008F920044AE4D00208E2C0018AE4C00241C
++:1084F0003C04080094843FC80E0006FA0000000007
++:108500008F9F00548E6700103C010800AC3F3FDC99
++:1085100000E0F809000000003C1908008F393FB462
++:108520001720FF798F870054979300583C11800ED5
++:10853000321601000E000729A633002C16C0004594
++:10854000320300105460004C8EE5000432080040F5
++:108550005500001D8EF000088EE4000C0080F80924
++:10856000000000008FBF00388FB700348FB6003096
++:108570008FB5002C8FB400288FB300248FB2002059
++:108580008FB1001C8FB0001803E0000827BD004029
++:108590008F86003C36110E0000072E0000A6202515
++:1085A000AE0400808E4300208E500024AFA3001044
++:1085B000AE2300148FB20010AE320010AE30001C9B
++:1085C0000A000A75AE3000180200F8090000000029
++:1085D0008EE4000C0080F809000000000A000B2E59
++:1085E0008FBF003824180001240F0001A5C000200F
++:1085F000A5D800220A000B10ADCF00243C010800D2
++:10860000AC203FB80A000AA68E6200003C010800B8
++:10861000AC253FB80A000AA68E6200009224000929
++:108620000E000771000028218FBF00388FB700347B
++:108630008FB600308FB5002C8FB400288FB3002484
++:108640008FB200208FB1001C8FB0001803E000082B
++:1086500027BD00403C1480009295010900002821AC
++:108660000E00084332A400FF320300105060FFB830
++:10867000320800408EE5000400A0F8090000000068
++:108680000A000B28320800405240FFA89793005878
++:108690008E3400148F930044AE7400208E35001C7D
++:1086A000AE7500240A000B1F979300588F820014A8
++:1086B0000004218003E00008008210213C078008AC
++:1086C00034E200809043006900804021106000097E
++:1086D0003C0401003C0708008CE73FDC8F8300303E
++:1086E00000E32023048000089389001C14E30003A6
++:1086F0000100202103E00008008010213C0401005B
++:1087000003E00008008010211120000B00673823CF
++:108710003C0D800035AC0980918B007C316A0002F1
++:10872000114000202409003400E9702B15C0FFF12E
++:108730000100202100E938232403FFFC00A3C82402
++:1087400000E3C02400F9782B15E0FFEA030820219C
++:1087500030C400030004102314C000143049000387
++:108760000000302100A9782101E6702100EE682B7D
++:1087700011A0FFE03C0401002D3800010006C82BC9
++:10878000010548210319382414E0FFDA2524FFFCF1
++:108790002402FFFC00A218240068202103E0000846
++:1087A000008010210A000B9E240900303C0C800040
++:1087B0003586098090CB007C316A00041540FFE9C2
++:1087C000240600040A000BAD000030213C03080021
++:1087D0008C63005C8F82001827BDFFE0AFBF0018DC
++:1087E000AFB1001410620005AFB00010000329C043
++:1087F00024A40280AF840014AF8300183C108000D2
++:1088000036020A0094450032361101000E000B7F3B
++:1088100030A43FFF8E240000241FFF803C11008005
++:108820000082C021031F60243309007F000CC9406F
++:1088300003294025330E0078362F00033C0D10002D
++:10884000010D502501CF5825AE0C002836080980AF
++:10885000AE0C080CAE0B082CAE0A08309103006970
++:108860003C06800C0126382110600006AF870034DA
++:108870008D09003C8D03006C0123382318E0008231
++:10888000000000003C0B8008356A00803C1080002E
++:10889000A1400069360609808CC200383C06800081
++:1088A00034C50A0090A8003C310C00201180001A49
++:1088B000AF820030240D00013C0E800035D10A004B
++:1088C000A38D001CAF8000248E2400248F850024FB
++:1088D000240D0008AF800020AF8000283C01080074
++:1088E000A42D3FC63C010800A4203FDA0E000B83F4
++:1088F000000030219228003C8FBF00188FB1001477
++:108900008FB0001000086142AF82002C27BD00200C
++:1089100003E000083182000190B80032240E00010B
++:10892000330F00FF000F2182108E00412419000236
++:108930001099006434C40AC03C03800034640A0007
++:108940008C8F002415E0001E34660900909F0030D3
++:108950002418000533F9003F1338004E24030001AA
++:108960008F860020A383001CAF860028AF860024DA
++:108970003C0E800035D10A008E2400248F8500240F
++:10898000240D00083C010800A42D3FC63C0108004E
++:10899000A4203FDA0E000B83000000009228003C68
++:1089A0008FBF00188FB100148FB000100008614213
++:1089B000AF82002C27BD002003E0000831820001B7
++:1089C0008C8A00088C8B00248CD000643C0E8000C4
++:1089D00035D10A00014B2823AF900024A380001C4E
++:1089E000AF8500288E2400248F8600208F850024E8
++:1089F000240D00083C010800A42D3FC63C010800DE
++:108A0000A4203FDA0E000B83000000009228003CF7
++:108A10008FBF00188FB100148FB0001000086142A2
++:108A2000AF82002C27BD002003E000083182000146
++:108A300090A200303051003F5224002834C50AC0B3
++:108A40008CB000241600002234CB09008CA600480C
++:108A50003C0A7FFF3545FFFF00C510243C0E800017
++:108A6000AF82002035C509008F8800208CAD0060E2
++:108A7000010D602B15800002010020218CA40060F4
++:108A80000A000C22AF8400208D02006C0A000BFC4F
++:108A90003C0680008C8200488F8600203C097FFFC6
++:108AA0003527FFFF004788243C0480082403000189
++:108AB000AF910028AC80006CA383001C0A000C302E
++:108AC000AF8600248C9F00140A000C22AF9F002068
++:108AD0008D6200680A000C6C3C0E800034C4098072
++:108AE0008C8900708CA300140123382B10E0000443
++:108AF000000000008C8200700A000C6C3C0E8000AC
++:108B00008CA200140A000C6C3C0E80008F8500249F
++:108B100027BDFFE0AFBF0018AFB1001414A00008DC
++:108B2000AFB000103C04800034870A0090E60030AB
++:108B30002402000530C3003F106200B934840900EC
++:108B40008F91002000A080213C048000348E0A0018
++:108B50008DCD00043C0608008CC63FB831A73FFF0E
++:108B600000E6602B5580000100E03021938F001C4F
++:108B700011E0007800D0282B349F098093F9007C05
++:108B800033380002130000792403003400C3102B93
++:108B9000144000D90000000000C3302300D0282B6F
++:108BA0003C010800A4233FC414A0006E0200182159
++:108BB0003C0408008C843FB40064402B5500000145
++:108BC000006020213C05800034A90A00912A003C65
++:108BD0003C010800AC243FBC31430020146000037A
++:108BE0000000482134AB0E008D6900188F88002CDE
++:108BF0000128202B1080005F000000003C050800C9
++:108C00008CA53FBC00A96821010D602B1180005C80
++:108C100000B0702B0109382300E028213C01080036
++:108C2000AC273FBC12000003240AFFFC10B0008DEB
++:108C30003224000300AA18243C010800A4203FDAD3
++:108C40003C010800AC233FBC006028218F84002435
++:108C5000120400063C0B80088D6C006C0200202181
++:108C6000AF91002025900001AD70006C8F8D002821
++:108C700000858823AF91002401A52023AF8400281C
++:108C80001220000224070018240700103C18800856
++:108C90003706008090CF00683C010800A0273FD82D
++:108CA0002407000131EE00FF11C70047000000005B
++:108CB00014800018000028213C06800034D109806F
++:108CC00034CD010091A600098E2C001824C40001A7
++:108CD000000C86023205007F308B007F1165007F1B
++:108CE0002407FF803C19800837290080A124004C0C
++:108CF0003C0808008D083FD4241800023C010800FD
++:108D0000A0384019350F00083C010800AC2F3FD4B3
++:108D1000240500103C02800034440A009083003C8B
++:108D2000307F002013E0000500A02021240A00016C
++:108D30003C010800AC2A3FBC34A400018FBF0018DE
++:108D40008FB100148FB000100080102103E00008E4
++:108D500027BD00203C010800A4203FC410A0FF94C0
++:108D6000020018210A000CC000C018210A000CB72C
++:108D7000240300303C0508008CA53FBC00B0702BDC
++:108D800011C0FFA8000000003C19080097393FC43B
++:108D90000325C0210307782B11E000072CAA00044B
++:108DA0003C0360008C625404305F003F17E0FFE337
++:108DB000240400422CAA00041140FF9A240400421B
++:108DC0000A000D248FBF00181528FFB9000000000D
++:108DD0008CCA00183C1F800024020002015F182585
++:108DE000ACC3001837F90A00A0C200689329003C00
++:108DF0002404000400A01021312800203C010800B8
++:108E0000A0244019110000022405001024020001D2
++:108E10003C010800AC223FB40A000D1A3C0280005D
++:108E20008F8800288C8900600109282B14A000027B
++:108E3000010088218C9100603C048000348B0E007E
++:108E40008D640018240A000102202821022030210C
++:108E5000A38A001C0E000B83022080210A000CA6AE
++:108E6000AF82002C00045823122000073164000355
++:108E70003C0E800035C7098090ED007C31AC0004C9
++:108E800015800019248F00043C010800A4243FDA57
++:108E90003C1F080097FF3FDA03E5C82100D9C02B2B
++:108EA0001300FF6B8F8400242CA6000514C0FFA3C1
++:108EB0002404004230A200031440000200A2182340
++:108EC00024A3FFFC3C010800AC233FBC3C0108008C
++:108ED000A4203FDA0A000CE70060282100C77024B4
++:108EE0000A000D0D01C720263C010800A42F3FDA1F
++:108EF0000A000D78000000003C010800AC203FBCD7
++:108F00000A000D23240400428F8300283C058000C2
++:108F100034AA0A00146000060000102191470030B6
++:108F20002406000530E400FF108600030000000066
++:108F300003E0000800000000914B0048316900FF89
++:108F4000000941C21500FFFA3C0680083C040800F5
++:108F500094843FC43C0308008C633FDC3C19080048
++:108F60008F393FBC3C0F080095EF3FDA0064C02109
++:108F70008CCD00040319702101CF602134AB0E00A9
++:108F8000018D282318A0001D00000000914F004C07
++:108F90008F8C0034956D001031EE00FF8D89000438
++:108FA00001AE30238D8A000030CEFFFF000E290075
++:108FB0000125C82100003821014720210325182B55
++:108FC0000083C021AD990004AD980000918F000A84
++:108FD00001CF6821A18D000A956500128F8A0034A7
++:108FE000A5450008954B003825690001A5490038C2
++:108FF0009148000D35070008A147000D03E0000867
++:109000000000000027BDFFD8AFB000189388001CF7
++:109010008FB000143C0A80003C197FFF8F8700242A
++:109020003738FFFFAFBF0020AFB1001C355F0A002B
++:109030000218182493EB003C00087FC03C02BFFFDD
++:10904000006F60252CF000013449FFFF3C1F080031
++:109050008FFF3FDC8F9900303C18080097183FD2F3
++:1090600001897824001047803C07EFFF3C05F0FFA2
++:1090700001E818253C1180003169002034E2FFFF2F
++:1090800034ADFFFF362E098027A50010240600020C
++:1090900003F96023270B0002354A0E0000621824F2
++:1090A0000080802115200002000040218D48001C16
++:1090B000A7AB0012058000392407000030E800FF4C
++:1090C00000083F00006758253C028008AFAB001441
++:1090D000344F008091EA00683C08080091083FD9AD
++:1090E0003C09DFFF352CFFFF000AF82B3C0208008B
++:1090F00094423FCCA3A80011016CC024001FCF40B4
++:10910000031918258FA70010AFA300143C0C08000A
++:10911000918C3FDBA7A200168FAB001400ED482412
++:109120003C0F01003C0A0FFF012FC82531980003B6
++:10913000355FFFFF016D40243C027000033F38247F
++:1091400000181E0000E2482501037825AFAF001487
++:10915000AFA9001091CC007C0E000092A3AC0015CA
++:10916000362D0A0091A6003C30C400201080000675
++:10917000260200083C11080096313FC8262EFFFF4A
++:109180003C010800A42E3FC88FBF00208FB1001CF7
++:109190008FB0001803E0000827BD00288F8B002C3B
++:1091A000010B502B5540FFC5240700010A000E0497
++:1091B00030E800FF9383001C3C02800027BDFFD8ED
++:1091C00034480A0000805021AFBF002034460AC056
++:1091D000010028211060000E3444098091070030FE
++:1091E000240B00058F89002030EC003F118B000B11
++:1091F00000003821AFA900103C0B80088D69006C7D
++:10920000AFAA00180E00015AAFA90014A380001CD9
++:109210008FBF002003E0000827BD00288D1F0048F5
++:109220003C1808008F183FBC8F9900283C027FFF34
++:109230008D0800443443FFFFAFA900103C0B8008A9
++:109240008D69006C03E370240319782101CF682332
++:1092500001A83821AFAA00180E00015AAFA90014C6
++:109260000A000E58A380001C3C05800034A60A00AA
++:1092700090C7003C3C06080094C63FDA3C02080058
++:109280008C423FD430E30020000624001060001E12
++:10929000004438253C0880083505008090A300680C
++:1092A00000004821240800010000282124040001B6
++:1092B0003C0680008CCD017805A0FFFE34CF014034
++:1092C000ADE800083C0208008C423FDCA5E5000444
++:1092D000A5E40006ADE2000C3C04080090843FD9F0
++:1092E0003C03800834790080A1E40012ADE700144B
++:1092F000A5E900189338004C3C0E1000A1F8002D91
++:1093000003E00008ACCE017834A90E008D28001CC3
++:109310003C0C08008D8C3FBC952B0016952A001440
++:10932000018648213164FFFF0A000E803145FFFFAE
++:109330003C04800034830A009065003C30A2002089
++:109340001040001934870E00000040210000382131
++:10935000000020213C0680008CC901780520FFFE1A
++:1093600034CA014034CF010091EB0009AD48000838
++:109370003C0E08008DCE3FDC240DFF91240C0040F4
++:109380003C081000A5440004A5470006AD4E000CA3
++:10939000A14D0012AD4C0014A5400018A14B002DAA
++:1093A00003E00008ACC801788CE8001894E60012CD
++:1093B00094E4001030C7FFFF0A000EA93084FFFFBD
++:1093C0003C04800034830A009065003C30A20020F9
++:1093D0001040002727BDFFF82409000100003821B4
++:1093E000240800013C0680008CCA01780540FFFE7D
++:1093F0003C0280FF34C40100908D00093C0C080041
++:10940000918C4019A3AD00038FAB00003185007F24
++:109410003459FFFF01665025AFAA00009083000A6F
++:10942000A3A0000200057E00A3A300018FB80000E6
++:1094300034CB0140240C30000319702401CF68257F
++:10944000AD6D000C27BD0008AD6C0014A5600018C0
++:10945000AD690008A56700042409FF80A56800061F
++:109460003C081000A169001203E00008ACC80178B4
++:1094700034870E008CE9001894E6001294E4001082
++:1094800030C8FFFF0A000ECD3087FFFF27BDFFE089
++:10949000AFB100143C118000AFB00010AFBF001896
++:1094A00036380A00970F0032363001000E000B7F6D
++:1094B00031E43FFF8E0E0000240DFF803C042000AD
++:1094C00001C25821016D6024000C4940316A007FBF
++:1094D000012A4025010438253C048008AE270830C5
++:1094E0003486008090C500682403000230A200FF8B
++:1094F000104300048F9F00208F990024AC9F0068C8
++:10950000AC9900648FBF00188FB100148FB00010A9
++:1095100003E0000827BD00203C0A0800254A3A80E5
++:109520003C09080025293B103C08080025082F1C91
++:109530003C07080024E73BDC3C06080024C639044D
++:109540003C05080024A536583C0408002484325CFD
++:109550003C030800246339B83C0208002442375415
++:109560003C010800AC2A3F983C010800AC293F941C
++:109570003C010800AC283F903C010800AC273F9C10
++:109580003C010800AC263FAC3C010800AC253FA4E0
++:109590003C010800AC243FA03C010800AC233FB0D4
++:1095A0003C010800AC223FA803E0000800000000D6
++:1095B00080000940800009008008010080080080C8
++:1095C00080080000800E00008008008080080000F5
++:1095D00080000A8080000A00800009808000090065
++:00000001FF
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
+index 6181ad7..86db022 100644
+--- a/fs/9p/vfs_addr.c
++++ b/fs/9p/vfs_addr.c
+@@ -121,7 +121,7 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
+       if (ret == 0)
+               return ret;
+-      ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
++      ret = read_cache_pages(mapping, pages, v9fs_vfs_readpage, filp);
+       p9_debug(P9_DEBUG_VFS, "  = %d\n", ret);
+       return ret;
+ }
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index eeabcb0..cea07b5 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -179,7 +179,7 @@ static int v9fs_mapped_dotl_flags(int flags)
+ {
+       int i;
+       int rflags = 0;
+-      struct dotl_openflag_map dotl_oflag_map[] = {
++      static const struct dotl_openflag_map dotl_oflag_map[] = {
+               { O_CREAT,      P9_DOTL_CREATE },
+               { O_EXCL,       P9_DOTL_EXCL },
+               { O_NOCTTY,     P9_DOTL_NOCTTY },
+@@ -524,7 +524,7 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
+ {
+       int i;
+       int p9_iattr_valid = 0;
+-      struct dotl_iattr_map dotl_iattr_map[] = {
++      static const struct dotl_iattr_map dotl_iattr_map[] = {
+               { ATTR_MODE,            P9_ATTR_MODE },
+               { ATTR_UID,             P9_ATTR_UID },
+               { ATTR_GID,             P9_ATTR_GID },
+diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
+index c7efddf..fa601ee 100644
+--- a/fs/Kconfig.binfmt
++++ b/fs/Kconfig.binfmt
+@@ -112,7 +112,7 @@ config HAVE_AOUT
+ config BINFMT_AOUT
+       tristate "Kernel support for a.out and ECOFF binaries"
+-      depends on HAVE_AOUT
++      depends on HAVE_AOUT && BROKEN
+       ---help---
+         A.out (Assembler.OUTput) is a set of formats for libraries and
+         executables used in the earliest versions of UNIX.  Linux used
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 6344aee..217c579 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -122,11 +122,11 @@ static void afs_file_readpage_read_complete(struct page *page,
+ /*
+  * read page from file, directory or symlink, given a key to use
+  */
+-int afs_page_filler(void *data, struct page *page)
++int afs_page_filler(struct file *data, struct page *page)
+ {
+       struct inode *inode = page->mapping->host;
+       struct afs_vnode *vnode = AFS_FS_I(inode);
+-      struct key *key = data;
++      struct key *key = (struct key *)data;
+       size_t len;
+       off_t offset;
+       int ret;
+@@ -220,14 +220,14 @@ static int afs_readpage(struct file *file, struct page *page)
+       if (file) {
+               key = file->private_data;
+               ASSERT(key != NULL);
+-              ret = afs_page_filler(key, page);
++              ret = afs_page_filler((struct file *)key, page);
+       } else {
+               struct inode *inode = page->mapping->host;
+               key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
+               if (IS_ERR(key)) {
+                       ret = PTR_ERR(key);
+               } else {
+-                      ret = afs_page_filler(key, page);
++                      ret = afs_page_filler((struct file *)key, page);
+                       key_put(key);
+               }
+       }
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 86cc726..b9b7f73 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -142,7 +142,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+       struct afs_vnode *vnode;
+       struct super_block *sb;
+       struct inode *inode;
+-      static atomic_t afs_autocell_ino;
++      static atomic_unchecked_t afs_autocell_ino;
+       _enter("{%x:%u},%*.*s,",
+              AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
+@@ -155,7 +155,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+       data.fid.unique = 0;
+       data.fid.vnode = 0;
+-      inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
++      inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
+                            afs_iget5_autocell_test, afs_iget5_set,
+                            &data);
+       if (!inode) {
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index df976b2..fcafd44 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -15,7 +15,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/skbuff.h>
+ #include <linux/rxrpc.h>
+-#include <linux/key.h>
++#include <linux/key-type.h>
+ #include <linux/workqueue.h>
+ #include <linux/sched.h>
+ #include <linux/fscache.h>
+@@ -498,7 +498,7 @@ extern const struct file_operations afs_file_operations;
+ extern int afs_open(struct inode *, struct file *);
+ extern int afs_release(struct inode *, struct file *);
+-extern int afs_page_filler(void *, struct page *);
++extern int afs_page_filler(struct file *, struct page *);
+ /*
+  * flock.c
+diff --git a/fs/aio.c b/fs/aio.c
+index 4fe81d1..85f39a0 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -455,7 +455,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+       size += sizeof(struct io_event) * nr_events;
+       nr_pages = PFN_UP(size);
+-      if (nr_pages < 0)
++      if (nr_pages <= 0)
+               return -EINVAL;
+       file = aio_private_file(ctx, nr_pages);
+diff --git a/fs/attr.c b/fs/attr.c
+index 3c42cab..3e01da6 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -102,6 +102,10 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
+               unsigned long limit;
+               limit = rlimit(RLIMIT_FSIZE);
++              if (offset > ULONG_MAX)
++                      gr_learn_resource(current, RLIMIT_FSIZE, ULONG_MAX, 1);
++              else if (offset > 0)
++                      gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
+               if (limit != RLIM_INFINITY && offset > limit)
+                       goto out_sig;
+               if (offset > inode->i_sb->s_maxbytes)
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index e44271d..0fdc215 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -56,7 +56,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
+ {
+       unsigned long sigpipe, flags;
+       mm_segment_t fs;
+-      const char *data = (const char *)addr;
++      const char __user *data = (const char __force_user *)addr;
+       ssize_t wr = 0;
+       sigpipe = sigismember(&current->pending.signal, SIGPIPE);
+@@ -344,6 +344,10 @@ static int validate_request(struct autofs_wait_queue **wait,
+       return 1;
+ }
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
++#endif
++
+ int autofs4_wait(struct autofs_sb_info *sbi,
+                struct dentry *dentry, enum autofs_notify notify)
+ {
+@@ -389,7 +393,12 @@ int autofs4_wait(struct autofs_sb_info *sbi,
+       /* If this is a direct mount request create a dummy name */
+       if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              /* this name does get written to userland via autofs4_write() */
++              qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
++#else
+               qstr.len = sprintf(name, "%p", dentry);
++#endif
+       else {
+               qstr.len = autofs4_getpath(sbi, dentry, &name);
+               if (!qstr.len) {
+diff --git a/fs/befs/endian.h b/fs/befs/endian.h
+index 2722387..56059b5 100644
+--- a/fs/befs/endian.h
++++ b/fs/befs/endian.h
+@@ -11,7 +11,7 @@
+ #include <asm/byteorder.h>
+-static inline u64
++static inline u64 __intentional_overflow(-1)
+ fs64_to_cpu(const struct super_block *sb, fs64 n)
+ {
+       if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
+               return (__force fs64)cpu_to_be64(n);
+ }
+-static inline u32
++static inline u32 __intentional_overflow(-1)
+ fs32_to_cpu(const struct super_block *sb, fs32 n)
+ {
+       if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
+               return (__force fs32)cpu_to_be32(n);
+ }
+-static inline u16
++static inline u16 __intentional_overflow(-1)
+ fs16_to_cpu(const struct super_block *sb, fs16 n)
+ {
+       if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
+index 7da05b1..9df0a29 100644
+--- a/fs/befs/linuxvfs.c
++++ b/fs/befs/linuxvfs.c
+@@ -431,10 +431,12 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
+ static int __init
+ befs_init_inodecache(void)
+ {
+-      befs_inode_cachep = kmem_cache_create("befs_inode_cache",
++      befs_inode_cachep = kmem_cache_create_usercopy("befs_inode_cache",
+                                             sizeof (struct befs_inode_info),
+                                             0, (SLAB_RECLAIM_ACCOUNT|
+                                               SLAB_MEM_SPREAD|SLAB_ACCOUNT),
++                                            offsetof(struct befs_inode_info, i_data.symlink),
++                                            sizeof(((struct befs_inode_info *)0)->i_data.symlink),
+                                             init_once);
+       if (befs_inode_cachep == NULL) {
+               pr_err("%s: Couldn't initialize inode slabcache\n", __func__);
+diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
+index ae1b540..15cfacf 100644
+--- a/fs/binfmt_aout.c
++++ b/fs/binfmt_aout.c
+@@ -16,6 +16,7 @@
+ #include <linux/string.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+ #include <linux/ptrace.h>
+@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
+ #endif
+ #       define START_STACK(u)   ((void __user *)u.start_stack)
++      memset(&dump, 0, sizeof(dump));
++
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       has_dumped = 1;
+@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+    if we wrote the stack, but not the data area.  */
++      gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
+       if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
+               dump.u_dsize = 0;
+ /* Make sure we have enough room to write the stack and data areas. */
++      gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
+       if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
+               dump.u_ssize = 0;
+@@ -228,6 +233,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
+       rlim = rlimit(RLIMIT_DATA);
+       if (rlim >= RLIM_INFINITY)
+               rlim = ~0;
++
++      gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
+       if (ex.a_data + ex.a_bss > rlim)
+               return -ENOMEM;
+@@ -257,6 +264,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
+       install_exec_creds(bprm);
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++              current->mm->pax_flags |= MF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++              if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++                      current->mm->pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++                      current->mm->pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++      }
++#endif
++
+       if (N_MAGIC(ex) == OMAGIC) {
+               unsigned long text_addr, map_size;
+               loff_t pos;
+@@ -311,7 +339,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
+                       return error;
+               error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+-                              PROT_READ | PROT_WRITE | PROT_EXEC,
++                              PROT_READ | PROT_WRITE,
+                               MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+                               fd_offset + ex.a_text);
+               if (error != N_DATADDR(ex))
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index e5495f3..641d63f 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -36,6 +36,7 @@
+ #include <linux/coredump.h>
+ #include <linux/sched.h>
+ #include <linux/dax.h>
++#include <linux/xattr.h>
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/page.h>
+@@ -67,6 +68,14 @@ static int elf_core_dump(struct coredump_params *cprm);
+ #define elf_core_dump NULL
+ #endif
++#ifdef CONFIG_PAX_MPROTECT
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++static void elf_handle_mmap(struct file *file);
++#endif
++
+ #if ELF_EXEC_PAGESIZE > PAGE_SIZE
+ #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+ #else
+@@ -86,6 +95,15 @@ static struct linux_binfmt elf_format = {
+       .load_binary    = load_elf_binary,
+       .load_shlib     = load_elf_library,
+       .core_dump      = elf_core_dump,
++
++#ifdef CONFIG_PAX_MPROTECT
++      .handle_mprotect= elf_handle_mprotect,
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      .handle_mmap    = elf_handle_mmap,
++#endif
++
+       .min_coredump   = ELF_EXEC_PAGESIZE,
+ };
+@@ -93,6 +111,8 @@ static struct linux_binfmt elf_format = {
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++      unsigned long e = end;
++
+       start = ELF_PAGEALIGN(start);
+       end = ELF_PAGEALIGN(end);
+       if (end > start) {
+@@ -100,7 +120,7 @@ static int set_brk(unsigned long start, unsigned long end)
+               if (error)
+                       return error;
+       }
+-      current->mm->start_brk = current->mm->brk = end;
++      current->mm->start_brk = current->mm->brk = e;
+       return 0;
+ }
+@@ -161,7 +181,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+       elf_addr_t __user *u_rand_bytes;
+       const char *k_platform = ELF_PLATFORM;
+       const char *k_base_platform = ELF_BASE_PLATFORM;
+-      unsigned char k_rand_bytes[16];
++      u32 k_rand_bytes[4];
+       int items;
+       elf_addr_t *elf_info;
+       int ei_index = 0;
+@@ -208,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+        * Generate 16 random bytes for userspace PRNG seeding.
+        */
+       get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+-      u_rand_bytes = (elf_addr_t __user *)
+-                     STACK_ALLOC(p, sizeof(k_rand_bytes));
++      prandom_seed(k_rand_bytes[0] ^ prandom_u32());
++      prandom_seed(k_rand_bytes[1] ^ prandom_u32());
++      prandom_seed(k_rand_bytes[2] ^ prandom_u32());
++      prandom_seed(k_rand_bytes[3] ^ prandom_u32());
++      p = STACK_ROUND(p, sizeof(k_rand_bytes));
++      u_rand_bytes = (elf_addr_t __user *) p;
+       if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+               return -EFAULT;
+@@ -517,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
+    an ELF header */
+ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+-              struct file *interpreter, unsigned long *interp_map_addr,
++              struct file *interpreter,
+               unsigned long no_base, struct elf_phdr *interp_elf_phdata)
+ {
+       struct elf_phdr *eppnt;
+-      unsigned long load_addr = 0;
++      unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
+       int load_addr_set = 0;
+       unsigned long last_bss = 0, elf_bss = 0;
+-      unsigned long error = ~0UL;
++      unsigned long error = -EINVAL;
+       unsigned long total_size;
+       int i;
+@@ -544,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+               goto out;
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
+       eppnt = interp_elf_phdata;
+       for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+               if (eppnt->p_type == PT_LOAD) {
+@@ -567,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+                       map_addr = elf_map(interpreter, load_addr + vaddr,
+                                       eppnt, elf_prot, elf_type, total_size);
+                       total_size = 0;
+-                      if (!*interp_map_addr)
+-                              *interp_map_addr = map_addr;
+                       error = map_addr;
+                       if (BAD_ADDR(map_addr))
+                               goto out;
+@@ -587,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+                       k = load_addr + eppnt->p_vaddr;
+                       if (BAD_ADDR(k) ||
+                           eppnt->p_filesz > eppnt->p_memsz ||
+-                          eppnt->p_memsz > TASK_SIZE ||
+-                          TASK_SIZE - eppnt->p_memsz < k) {
++                          eppnt->p_memsz > pax_task_size ||
++                          pax_task_size - eppnt->p_memsz < k) {
+                               error = -ENOMEM;
+                               goto out;
+                       }
+@@ -639,6 +666,336 @@ out:
+       return error;
+ }
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++#ifdef CONFIG_PAX_SOFTMODE
++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (elf_phdata->p_flags & PF_PAGEEXEC)
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (elf_phdata->p_flags & PF_SEGMEXEC)
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (elf_phdata->p_flags & PF_MPROTECT)
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++#ifdef CONFIG_PAX_SOFTMODE
++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (pax_flags_softmode & MF_PAX_PAGEEXEC)
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (pax_flags_softmode & MF_PAX_SEGMEXEC)
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (pax_flags_softmode & MF_PAX_MPROTECT)
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++      if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static unsigned long pax_parse_defaults(void)
++{
++      unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++      if (pax_softmode)
++              return pax_flags;
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (randomize_va_space)
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++      return pax_flags;
++}
++
++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++      unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
++
++#ifdef CONFIG_PAX_EI_PAX
++
++#ifdef CONFIG_PAX_SOFTMODE
++      if (pax_softmode)
++              return pax_flags;
++#endif
++
++      pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++      if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++              pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
++              pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++      if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++              pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++      if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++              pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++      if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++              pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#endif
++
++      return pax_flags;
++
++}
++
++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++      unsigned long i;
++
++      for (i = 0UL; i < elf_ex->e_phnum; i++)
++              if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++                      if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++                          ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++                          ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++                          ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++                          ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
++                              return PAX_PARSE_FLAGS_FALLBACK;
++
++#ifdef CONFIG_PAX_SOFTMODE
++                      if (pax_softmode)
++                              return pax_parse_pt_pax_softmode(&elf_phdata[i]);
++                      else
++#endif
++
++                              return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
++                      break;
++              }
++#endif
++
++      return PAX_PARSE_FLAGS_FALLBACK;
++}
++
++static unsigned long pax_parse_xattr_pax(struct file * const file)
++{
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++      ssize_t xattr_size, i;
++      unsigned char xattr_value[sizeof("pemrs") - 1];
++      unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
++
++      xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
++      if (xattr_size < 0 || xattr_size > sizeof xattr_value)
++              return PAX_PARSE_FLAGS_FALLBACK;
++
++      for (i = 0; i < xattr_size; i++)
++              switch (xattr_value[i]) {
++              default:
++                      return PAX_PARSE_FLAGS_FALLBACK;
++
++#define parse_flag(option1, option2, flag)                    \
++              case option1:                                   \
++                      if (pax_flags_hardmode & MF_PAX_##flag) \
++                              return PAX_PARSE_FLAGS_FALLBACK;\
++                      pax_flags_hardmode |= MF_PAX_##flag;    \
++                      break;                                  \
++              case option2:                                   \
++                      if (pax_flags_softmode & MF_PAX_##flag) \
++                              return PAX_PARSE_FLAGS_FALLBACK;\
++                      pax_flags_softmode |= MF_PAX_##flag;    \
++                      break;
++
++              parse_flag('p', 'P', PAGEEXEC);
++              parse_flag('e', 'E', EMUTRAMP);
++              parse_flag('m', 'M', MPROTECT);
++              parse_flag('r', 'R', RANDMMAP);
++              parse_flag('s', 'S', SEGMEXEC);
++
++#undef parse_flag
++              }
++
++      if (pax_flags_hardmode & pax_flags_softmode)
++              return PAX_PARSE_FLAGS_FALLBACK;
++
++#ifdef CONFIG_PAX_SOFTMODE
++      if (pax_softmode)
++              return pax_parse_xattr_pax_softmode(pax_flags_softmode);
++      else
++#endif
++
++              return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
++#else
++      return PAX_PARSE_FLAGS_FALLBACK;
++#endif
++
++}
++
++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
++{
++      unsigned long pax_flags, ei_pax_flags,  pt_pax_flags, xattr_pax_flags;
++
++      pax_flags = pax_parse_defaults();
++      ei_pax_flags = pax_parse_ei_pax(elf_ex);
++      pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
++      xattr_pax_flags = pax_parse_xattr_pax(file);
++
++      if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
++          xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
++          pt_pax_flags != xattr_pax_flags)
++              return -EINVAL;
++      if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
++              pax_flags = xattr_pax_flags;
++      else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
++              pax_flags = pt_pax_flags;
++      else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
++              pax_flags = ei_pax_flags;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++      if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              if ((__supported_pte_mask & _PAGE_NX))
++                      pax_flags &= ~MF_PAX_SEGMEXEC;
++              else
++                      pax_flags &= ~MF_PAX_PAGEEXEC;
++      }
++#endif
++
++      if (0 > pax_check_flags(&pax_flags))
++              return -EINVAL;
++
++      current->mm->pax_flags = pax_flags;
++      return 0;
++}
++#endif
++
+ /*
+  * These are the functions used to load ELF style executables and shared
+  * libraries.  There is no binary dependent code anywhere else.
+@@ -652,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+ {
+       unsigned long random_variable = 0;
++#ifdef CONFIG_PAX_RANDUSTACK
++      if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++              return stack_top - current->mm->delta_stack;
++#endif
++
+       if ((current->flags & PF_RANDOMIZE) &&
+               !(current->personality & ADDR_NO_RANDOMIZE)) {
+               random_variable = get_random_long();
+@@ -671,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+       unsigned long load_addr = 0, load_bias = 0;
+       int load_addr_set = 0;
+       char * elf_interpreter = NULL;
+-      unsigned long error;
++      unsigned long error = 0;
+       struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
+       unsigned long elf_bss, elf_brk;
+       int retval, i;
+@@ -686,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+               struct elfhdr interp_elf_ex;
+       } *loc;
+       struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
++      unsigned long pax_task_size;
+       loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+       if (!loc) {
+@@ -846,6 +1209,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
+       /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+          may depend on the personality.  */
+       SET_PERSONALITY2(loc->elf_ex, &arch_state);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++      current->mm->call_dl_resolve = 0UL;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++      current->mm->call_syscall = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++      current->mm->delta_mmap = 0UL;
++      current->mm->delta_stack = 0UL;
++#endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
++              retval = -EINVAL;
++              goto out_free_dentry;
++      }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++      pax_set_initial_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++      if (pax_set_initial_flags_func)
++              (pax_set_initial_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++      if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
++              current->mm->context.user_cs_limit = PAGE_SIZE;
++              current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
++      }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++              current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
++              current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++              current->mm->def_flags |= VM_NOHUGEPAGE;
++      } else
++#endif
++
++      pax_task_size = TASK_SIZE;
++
++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
++      if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
++              put_cpu();
++      }
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++      if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++              current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
++              current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
++      }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              executable_stack = EXSTACK_DISABLE_X;
++              current->personality &= ~READ_IMPLIES_EXEC;
++      } else
++#endif
++
+       if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+               current->personality |= READ_IMPLIES_EXEC;
+@@ -922,8 +1356,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
+                       if (current->flags & PF_RANDOMIZE)
+                               load_bias += arch_mmap_rnd();
+                       load_bias = ELF_PAGESTART(load_bias);
+-                      total_size = total_mapping_size(elf_phdata,
+-                                                      loc->elf_ex.e_phnum);
++
++#ifdef CONFIG_PAX_RANDMMAP
++                      /* PaX: randomize base address at the default exe base if requested */
++                      if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
++#ifdef CONFIG_SPARC64
++                              load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
++#else
++                              load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
++#endif
++                              load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
++                              elf_flags |= MAP_FIXED;
++                      }
++#endif
++
++                      total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
+                       if (!total_size) {
+                               retval = -EINVAL;
+                               goto out_free_dentry;
+@@ -959,9 +1406,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
+                * allowed task size. Note that p_filesz must always be
+                * <= p_memsz so it is only necessary to check p_memsz.
+                */
+-              if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+-                  elf_ppnt->p_memsz > TASK_SIZE ||
+-                  TASK_SIZE - elf_ppnt->p_memsz < k) {
++              if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
++                  elf_ppnt->p_memsz > pax_task_size ||
++                  pax_task_size - elf_ppnt->p_memsz < k) {
+                       /* set_brk can never work. Avoid overflows. */
+                       retval = -EINVAL;
+                       goto out_free_dentry;
+@@ -997,16 +1444,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
+       if (retval)
+               goto out_free_dentry;
+       if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+-              retval = -EFAULT; /* Nobody gets to see this, but.. */
+-              goto out_free_dentry;
++              /*
++               * This bss-zeroing can fail if the ELF
++               * file specifies odd protections. So
++               * we don't check the return value
++               */
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++              unsigned long start, size, flags;
++              vm_flags_t vm_flags;
++
++              start = ELF_PAGEALIGN(elf_brk);
++              size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
++              flags = MAP_FIXED | MAP_PRIVATE;
++              vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
++
++              down_write(&current->mm->mmap_sem);
++              start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
++              retval = -ENOMEM;
++              if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
++//                    if (current->personality & ADDR_NO_RANDOMIZE)
++//                            vm_flags |= VM_READ | VM_MAYREAD;
++                      start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
++                      retval = IS_ERR_VALUE(start) ? start : 0;
++              }
++              up_write(&current->mm->mmap_sem);
++              if (retval == 0)
++                      retval = set_brk(start + size, start + size + PAGE_SIZE);
++              if (retval < 0)
++                      goto out_free_dentry;
++      }
++#endif
++
+       if (elf_interpreter) {
+-              unsigned long interp_map_addr = 0;
+-
+               elf_entry = load_elf_interp(&loc->interp_elf_ex,
+                                           interpreter,
+-                                          &interp_map_addr,
+                                           load_bias, interp_elf_phdata);
+               if (!IS_ERR((void *)elf_entry)) {
+                       /*
+@@ -1056,6 +1530,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+       current->mm->end_data = end_data;
+       current->mm->start_stack = bprm->p;
++#ifndef CONFIG_PAX_RANDMMAP
+       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
+               current->mm->brk = current->mm->start_brk =
+                       arch_randomize_brk(current->mm);
+@@ -1063,6 +1538,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+               current->brk_randomized = 1;
+ #endif
+       }
++#endif
+       if (current->personality & MMAP_PAGE_ZERO) {
+               /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
+@@ -1234,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
+  * Decide what to dump of a segment, part, all or none.
+  */
+ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+-                                 unsigned long mm_flags)
++                                 unsigned long mm_flags, long signr)
+ {
+ #define FILTER(type)  (mm_flags & (1UL << MMF_DUMP_##type))
+@@ -1281,7 +1757,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+       if (vma->vm_file == NULL)
+               return 0;
+-      if (FILTER(MAPPED_PRIVATE))
++      if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
+               goto whole;
+       /*
+@@ -1381,7 +1857,7 @@ static void fill_elf_header(struct elfhdr *elf, int segs,
+       return;
+ }
+-static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
++static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, loff_t offset)
+ {
+       phdr->p_type = PT_NOTE;
+       phdr->p_offset = offset;
+@@ -1488,9 +1964,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+ {
+       elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
+       int i = 0;
+-      do
++      do {
+               i += 2;
+-      while (auxv[i - 2] != AT_NULL);
++      } while (auxv[i - 2] != AT_NULL);
+       fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ }
+@@ -1499,7 +1975,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+ {
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
++      copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
+       set_fs(old_fs);
+       fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
+ }
+@@ -2219,7 +2695,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+                       vma = next_vma(vma, gate_vma)) {
+               unsigned long dump_size;
+-              dump_size = vma_dump_size(vma, cprm->mm_flags);
++              dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
+               vma_filesz[i++] = dump_size;
+               vma_data_size += dump_size;
+       }
+@@ -2327,6 +2803,167 @@ out:
+ #endif                /* CONFIG_ELF_CORE */
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
++ * we'll remove VM_MAYWRITE for good on RELRO segments.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
++{
++      struct elfhdr elf_h;
++      struct elf_phdr elf_p;
++      unsigned long i;
++      unsigned long oldflags;
++      bool is_textrel_rw, is_textrel_rx, is_relro;
++
++      if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
++              return;
++
++      oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
++      newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
++
++#ifdef CONFIG_PAX_ELFRELOCS
++      /* possible TEXTREL */
++      is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++      is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++#else
++      is_textrel_rw = false;
++      is_textrel_rx = false;
++#endif
++
++      /* possible RELRO */
++      is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++
++      if (!is_textrel_rw && !is_textrel_rx && !is_relro)
++              return;
++
++      if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++          memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++          ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++#else
++          ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
++#endif
++
++          (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++          !elf_check_arch(&elf_h) ||
++          elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++          elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++              return;
++
++      for (i = 0UL; i < elf_h.e_phnum; i++) {
++              if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++                      return;
++              switch (elf_p.p_type) {
++              case PT_DYNAMIC:
++                      if (!is_textrel_rw && !is_textrel_rx)
++                              continue;
++                      i = 0UL;
++                      while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
++                              elf_dyn dyn;
++
++                              if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
++                                      break;
++                              if (dyn.d_tag == DT_NULL)
++                                      break;
++                              if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++                                      gr_log_textrel(vma, is_textrel_rw);
++                                      if (is_textrel_rw)
++                                              vma->vm_flags |= VM_MAYWRITE;
++                                      else
++                                              /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++                                              vma->vm_flags &= ~VM_MAYWRITE;
++                                      break;
++                              }
++                              i++;
++                      }
++                      is_textrel_rw = false;
++                      is_textrel_rx = false;
++                      continue;
++
++              case PT_GNU_RELRO:
++                      if (!is_relro)
++                              continue;
++                      if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
++                              vma->vm_flags &= ~VM_MAYWRITE;
++                      is_relro = false;
++                      continue;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++              case PT_PAX_FLAGS: {
++                      const char *msg_mprotect = "", *msg_emutramp = "";
++                      char *buffer_lib, *buffer_exe;
++
++                      if (elf_p.p_flags & PF_NOMPROTECT)
++                              msg_mprotect = "MPROTECT disabled";
++
++#ifdef CONFIG_PAX_EMUTRAMP
++                      if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
++                              msg_emutramp = "EMUTRAMP enabled";
++#endif
++
++                      if (!msg_mprotect[0] && !msg_emutramp[0])
++                              continue;
++
++                      if (!printk_ratelimit())
++                              continue;
++
++                      buffer_lib = (char *)__get_free_page(GFP_KERNEL);
++                      buffer_exe = (char *)__get_free_page(GFP_KERNEL);
++                      if (buffer_lib && buffer_exe) {
++                              char *path_lib, *path_exe;
++
++                              path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
++                              path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
++
++                              pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
++                                      (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
++
++                      }
++                      free_page((unsigned long)buffer_exe);
++                      free_page((unsigned long)buffer_lib);
++                      continue;
++              }
++#endif
++
++              }
++      }
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++
++extern int grsec_enable_log_rwxmaps;
++
++static void elf_handle_mmap(struct file *file)
++{
++      struct elfhdr elf_h;
++      struct elf_phdr elf_p;
++      unsigned long i;
++
++      if (!grsec_enable_log_rwxmaps)
++              return;
++
++      if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++          memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++          (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
++          elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++          elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++              return;
++
++      for (i = 0UL; i < elf_h.e_phnum; i++) {
++              if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++                      return;
++              if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
++                      gr_log_ptgnustack(file);
++      }
++}
++#endif
++
+ static int __init init_elf_binfmt(void)
+ {
+       register_binfmt(&elf_format);
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 464a972..c889ed6 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -1302,7 +1302,7 @@ static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
+       return;
+ }
+-static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
++static inline void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, loff_t offset)
+ {
+       phdr->p_type = PT_NOTE;
+       phdr->p_offset = offset;
+@@ -1673,7 +1673,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
+       /* Write notes phdr entry */
+       {
+-              int sz = 0;
++              size_t sz = 0;
+               for (i = 0; i < numnote; i++)
+                       sz += notesize(notes + i);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 08ae993..9ef2014 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -840,7 +840,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+       else if (bdev->bd_contains == bdev)
+               return true;     /* is a whole device which isn't held */
+-      else if (whole->bd_holder == bd_may_claim)
++      else if (whole->bd_holder == (void *)bd_may_claim)
+               return true;     /* is a partition of a device that is being partitioned */
+       else if (whole->bd_holder != NULL)
+               return false;    /* is a partition of a held device */
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index d1c56c9..07bda1f 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -358,7 +358,7 @@ static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
+  */
+ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
+ {
+-      return atomic64_inc_return(&fs_info->tree_mod_seq);
++      return atomic64_inc_return_unchecked(&fs_info->tree_mod_seq);
+ }
+ /*
+@@ -1182,9 +1182,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+               free_extent_buffer(buf);
+               add_root_to_dirty_list(root);
+       } else {
+-              if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+-                      parent_start = parent->start;
+-              else
++              if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
++                      if (parent)
++                              parent_start = parent->start;
++                      else
++                              parent_start = 0;
++              } else
+                       parent_start = 0;
+               WARN_ON(trans->transid != btrfs_header_generation(parent));
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 791e47c..da50e2c 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -345,8 +345,8 @@ struct btrfs_dev_replace {
+       u64 replace_state;      /* see #define above */
+       u64 time_started;       /* seconds since 1-Jan-1970 */
+       u64 time_stopped;       /* seconds since 1-Jan-1970 */
+-      atomic64_t num_write_errors;
+-      atomic64_t num_uncorrectable_read_errors;
++      atomic64_unchecked_t num_write_errors;
++      atomic64_unchecked_t num_uncorrectable_read_errors;
+       u64 cursor_left;
+       u64 committed_cursor_left;
+@@ -836,7 +836,7 @@ struct btrfs_fs_info {
+       /* this protects tree_mod_seq_list */
+       spinlock_t tree_mod_seq_lock;
+-      atomic64_t tree_mod_seq;
++      atomic64_unchecked_t tree_mod_seq;
+       struct list_head tree_mod_seq_list;
+       /* this protects tree_mod_log */
+@@ -1148,7 +1148,7 @@ struct btrfs_root {
+       struct list_head log_ctxs[2];
+       atomic_t log_writers;
+       atomic_t log_commit[2];
+-      atomic_t log_batch;
++      atomic_unchecked_t log_batch;
+       int log_transid;
+       /* No matter the commit succeeds or not*/
+       int log_transid_committed;
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 3eeb9cd..428a561 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -456,7 +456,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
+ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
+ {
+-      int seq = atomic_inc_return(&delayed_root->items_seq);
++      int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
+       /*
+        * atomic_dec_return implies a barrier for waitqueue_active
+@@ -1397,7 +1397,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
+ static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
+ {
+-      int val = atomic_read(&delayed_root->items_seq);
++      int val = atomic_read_unchecked(&delayed_root->items_seq);
+       if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
+               return 1;
+@@ -1422,7 +1422,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
+               int seq;
+               int ret;
+-              seq = atomic_read(&delayed_root->items_seq);
++              seq = atomic_read_unchecked(&delayed_root->items_seq);
+               ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
+               if (ret)
+diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
+index 2495b3d..8bdbb07 100644
+--- a/fs/btrfs/delayed-inode.h
++++ b/fs/btrfs/delayed-inode.h
+@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
+        */
+       struct list_head prepare_list;
+       atomic_t items;         /* for delayed items */
+-      atomic_t items_seq;     /* for delayed items */
++      atomic_unchecked_t items_seq;   /* for delayed items */
+       int nodes;              /* for delayed nodes */
+       wait_queue_head_t wait;
+ };
+@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
+                               struct btrfs_delayed_root *delayed_root)
+ {
+       atomic_set(&delayed_root->items, 0);
+-      atomic_set(&delayed_root->items_seq, 0);
++      atomic_set_unchecked(&delayed_root->items_seq, 0);
+       delayed_root->nodes = 0;
+       spin_lock_init(&delayed_root->lock);
+       init_waitqueue_head(&delayed_root->wait);
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index ac02e04..c0b234e 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -658,7 +658,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+               action = BTRFS_ADD_DELAYED_REF;
+       if (is_fstree(ref_root))
+-              seq = atomic64_read(&fs_info->tree_mod_seq);
++              seq = atomic64_read_unchecked(&fs_info->tree_mod_seq);
+       delayed_refs = &trans->transaction->delayed_refs;
+       /* first set the basic ref node struct up */
+@@ -714,7 +714,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+       delayed_refs = &trans->transaction->delayed_refs;
+       if (is_fstree(ref_root))
+-              seq = atomic64_read(&fs_info->tree_mod_seq);
++              seq = atomic64_read_unchecked(&fs_info->tree_mod_seq);
+       /* first set the basic ref node struct up */
+       atomic_set(&ref->refs, 1);
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index e9bbff3..6985e2c 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -82,8 +82,8 @@ no_valid_dev_replace_entry_found:
+               dev_replace->replace_state = 0;
+               dev_replace->time_started = 0;
+               dev_replace->time_stopped = 0;
+-              atomic64_set(&dev_replace->num_write_errors, 0);
+-              atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
++              atomic64_set_unchecked(&dev_replace->num_write_errors, 0);
++              atomic64_set_unchecked(&dev_replace->num_uncorrectable_read_errors, 0);
+               dev_replace->cursor_left = 0;
+               dev_replace->committed_cursor_left = 0;
+               dev_replace->cursor_left_last_write_of_item = 0;
+@@ -112,9 +112,9 @@ no_valid_dev_replace_entry_found:
+       dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr);
+       dev_replace->time_stopped =
+               btrfs_dev_replace_time_stopped(eb, ptr);
+-      atomic64_set(&dev_replace->num_write_errors,
++      atomic64_set_unchecked(&dev_replace->num_write_errors,
+                    btrfs_dev_replace_num_write_errors(eb, ptr));
+-      atomic64_set(&dev_replace->num_uncorrectable_read_errors,
++      atomic64_set_unchecked(&dev_replace->num_uncorrectable_read_errors,
+                    btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr));
+       dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr);
+       dev_replace->committed_cursor_left = dev_replace->cursor_left;
+@@ -274,9 +274,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
+       btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started);
+       btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped);
+       btrfs_set_dev_replace_num_write_errors(eb, ptr,
+-              atomic64_read(&dev_replace->num_write_errors));
++              atomic64_read_unchecked(&dev_replace->num_write_errors));
+       btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr,
+-              atomic64_read(&dev_replace->num_uncorrectable_read_errors));
++              atomic64_read_unchecked(&dev_replace->num_uncorrectable_read_errors));
+       dev_replace->cursor_left_last_write_of_item =
+               dev_replace->cursor_left;
+       btrfs_set_dev_replace_cursor_left(eb, ptr,
+@@ -377,8 +377,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+       dev_replace->cursor_right = 0;
+       dev_replace->is_valid = 1;
+       dev_replace->item_needs_writeback = 1;
+-      atomic64_set(&dev_replace->num_write_errors, 0);
+-      atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
++      atomic64_set_unchecked(&dev_replace->num_write_errors, 0);
++      atomic64_set_unchecked(&dev_replace->num_uncorrectable_read_errors, 0);
+       btrfs_dev_replace_unlock(dev_replace, 1);
+       ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
+@@ -648,9 +648,9 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
+       args->status.time_started = dev_replace->time_started;
+       args->status.time_stopped = dev_replace->time_stopped;
+       args->status.num_write_errors =
+-              atomic64_read(&dev_replace->num_write_errors);
++              atomic64_read_unchecked(&dev_replace->num_write_errors);
+       args->status.num_uncorrectable_read_errors =
+-              atomic64_read(&dev_replace->num_uncorrectable_read_errors);
++              atomic64_read_unchecked(&dev_replace->num_uncorrectable_read_errors);
+       switch (dev_replace->replace_state) {
+       case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+       case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
+index e922b42..2a5a145 100644
+--- a/fs/btrfs/dev-replace.h
++++ b/fs/btrfs/dev-replace.h
+@@ -42,8 +42,8 @@ void btrfs_dev_replace_set_lock_blocking(struct btrfs_dev_replace *dev_replace);
+ void btrfs_dev_replace_clear_lock_blocking(
+                                       struct btrfs_dev_replace *dev_replace);
+-static inline void btrfs_dev_replace_stats_inc(atomic64_t *stat_value)
++static inline void btrfs_dev_replace_stats_inc(atomic64_unchecked_t *stat_value)
+ {
+-      atomic64_inc(stat_value);
++      atomic64_inc_unchecked(stat_value);
+ }
+ #endif
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 3dede6d..6731015 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1311,7 +1311,7 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
+       atomic_set(&root->log_commit[0], 0);
+       atomic_set(&root->log_commit[1], 0);
+       atomic_set(&root->log_writers, 0);
+-      atomic_set(&root->log_batch, 0);
++      atomic_set_unchecked(&root->log_batch, 0);
+       atomic_set(&root->orphan_inodes, 0);
+       atomic_set(&root->refs, 1);
+       atomic_set(&root->will_be_snapshoted, 0);
+@@ -2662,7 +2662,7 @@ int open_ctree(struct super_block *sb,
+       atomic_set(&fs_info->defrag_running, 0);
+       atomic_set(&fs_info->qgroup_op_seq, 0);
+       atomic_set(&fs_info->reada_works_cnt, 0);
+-      atomic64_set(&fs_info->tree_mod_seq, 0);
++      atomic64_set_unchecked(&fs_info->tree_mod_seq, 0);
+       fs_info->fs_frozen = 0;
+       fs_info->sb = sb;
+       fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 26f9ac7..bc78edc 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -235,7 +235,9 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
+                       em->start = merge->start;
+                       em->orig_start = merge->orig_start;
+                       em->len += merge->len;
+-                      em->block_len += merge->block_len;
++                      if (em->block_start != EXTENT_MAP_HOLE &&
++                          em->block_start != EXTENT_MAP_INLINE)
++                              em->block_len += merge->block_len;
+                       em->block_start = merge->block_start;
+                       em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
+                       em->mod_start = merge->mod_start;
+@@ -252,7 +254,9 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
+               merge = rb_entry(rb, struct extent_map, rb_node);
+       if (rb && mergable_maps(em, merge)) {
+               em->len += merge->len;
+-              em->block_len += merge->block_len;
++              if (em->block_start != EXTENT_MAP_HOLE &&
++                  em->block_start != EXTENT_MAP_INLINE)
++                      em->block_len += merge->block_len;
+               rb_erase(&merge->rb_node, &tree->map);
+               RB_CLEAR_NODE(&merge->rb_node);
+               em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index fea31a4..8be7c86 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1935,7 +1935,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+               return ret;
+       inode_lock(inode);
+-      atomic_inc(&root->log_batch);
++      atomic_inc_unchecked(&root->log_batch);
+       full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                            &BTRFS_I(inode)->runtime_flags);
+       /*
+@@ -1989,7 +1989,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+               inode_unlock(inode);
+               goto out;
+       }
+-      atomic_inc(&root->log_batch);
++      atomic_inc_unchecked(&root->log_batch);
+       /*
+        * If the last transaction that changed this file was before the current
+diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
+index 3af651c..30b9644 100644
+--- a/fs/btrfs/free-space-cache.h
++++ b/fs/btrfs/free-space-cache.h
+@@ -48,6 +48,7 @@ struct btrfs_free_space_op {
+       bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
+                          struct btrfs_free_space *info);
+ };
++typedef struct btrfs_free_space_op __no_const btrfs_free_space_op_no_const;
+ struct btrfs_io_ctl;
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index cd8d302..dfd9e57 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -153,7 +153,7 @@ struct btrfs_raid_bio {
+       atomic_t stripes_pending;
+-      atomic_t error;
++      atomic_unchecked_t error;
+       /*
+        * these are two arrays of pointers.  We allocate the
+        * rbio big enough to hold them both and setup their
+@@ -916,7 +916,7 @@ static void raid_write_end_io(struct bio *bio)
+       /* OK, we have read all the stripes we need to. */
+       max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
+                    0 : rbio->bbio->max_errors;
+-      if (atomic_read(&rbio->error) > max_errors)
++      if (atomic_read_unchecked(&rbio->error) > max_errors)
+               err = -EIO;
+       rbio_orig_end_io(rbio, err);
+@@ -999,7 +999,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
+       rbio->faila = -1;
+       rbio->failb = -1;
+       atomic_set(&rbio->refs, 1);
+-      atomic_set(&rbio->error, 0);
++      atomic_set_unchecked(&rbio->error, 0);
+       atomic_set(&rbio->stripes_pending, 0);
+       /*
+@@ -1208,7 +1208,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
+       set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+       spin_unlock_irq(&rbio->bio_list_lock);
+-      atomic_set(&rbio->error, 0);
++      atomic_set_unchecked(&rbio->error, 0);
+       /*
+        * now that we've set rmw_locked, run through the
+@@ -1398,11 +1398,11 @@ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
+       if (rbio->faila == -1) {
+               /* first failure on this rbio */
+               rbio->faila = failed;
+-              atomic_inc(&rbio->error);
++              atomic_inc_unchecked(&rbio->error);
+       } else if (rbio->failb == -1) {
+               /* second failure on this rbio */
+               rbio->failb = failed;
+-              atomic_inc(&rbio->error);
++              atomic_inc_unchecked(&rbio->error);
+       } else {
+               ret = -EIO;
+       }
+@@ -1464,7 +1464,7 @@ static void raid_rmw_end_io(struct bio *bio)
+       if (!atomic_dec_and_test(&rbio->stripes_pending))
+               return;
+-      if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
++      if (atomic_read_unchecked(&rbio->error) > rbio->bbio->max_errors)
+               goto cleanup;
+       /*
+@@ -1519,7 +1519,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
+       index_rbio_pages(rbio);
+-      atomic_set(&rbio->error, 0);
++      atomic_set_unchecked(&rbio->error, 0);
+       /*
+        * build a list of bios to read all the missing parts of this
+        * stripe
+@@ -2012,7 +2012,7 @@ static void raid_recover_end_io(struct bio *bio)
+       if (!atomic_dec_and_test(&rbio->stripes_pending))
+               return;
+-      if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
++      if (atomic_read_unchecked(&rbio->error) > rbio->bbio->max_errors)
+               rbio_orig_end_io(rbio, -EIO);
+       else
+               __raid_recover_end_io(rbio);
+@@ -2041,7 +2041,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+       if (ret)
+               goto cleanup;
+-      atomic_set(&rbio->error, 0);
++      atomic_set_unchecked(&rbio->error, 0);
+       /*
+        * read everything that hasn't failed.  Thanks to the
+@@ -2050,7 +2050,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+        */
+       for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
+               if (rbio->faila == stripe || rbio->failb == stripe) {
+-                      atomic_inc(&rbio->error);
++                      atomic_inc_unchecked(&rbio->error);
+                       continue;
+               }
+@@ -2080,7 +2080,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+                * were up to date, or we might have no bios to read because
+                * the devices were gone.
+                */
+-              if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
++              if (atomic_read_unchecked(&rbio->error) <= rbio->bbio->max_errors) {
+                       __raid_recover_end_io(rbio);
+                       goto out;
+               } else {
+@@ -2342,7 +2342,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+               SetPageUptodate(q_page);
+       }
+-      atomic_set(&rbio->error, 0);
++      atomic_set_unchecked(&rbio->error, 0);
+       for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+               struct page *p;
+@@ -2463,7 +2463,7 @@ static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
+  */
+ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
+ {
+-      if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
++      if (atomic_read_unchecked(&rbio->error) > rbio->bbio->max_errors)
+               goto cleanup;
+       if (rbio->faila >= 0 || rbio->failb >= 0) {
+@@ -2560,7 +2560,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
+       bio_list_init(&bio_list);
+-      atomic_set(&rbio->error, 0);
++      atomic_set_unchecked(&rbio->error, 0);
+       /*
+        * build a list of bios to read all the missing parts of this
+        * stripe
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 1d195d2..9d9cfa9 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3720,7 +3720,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+               if (ret)
+                       break;
+               if (is_dev_replace &&
+-                  atomic64_read(&dev_replace->num_write_errors) > 0) {
++                  atomic64_read_unchecked(&dev_replace->num_write_errors) > 0) {
+                       ret = -EIO;
+                       break;
+               }
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 4071fe2..caa5e0e 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -268,7 +268,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+                          function, line, errstr);
+               return;
+       }
+-      ACCESS_ONCE(trans->transaction->aborted) = errno;
++      ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
+       /* Wake up anybody who may be waiting on this transaction */
+       wake_up(&fs_info->transaction_wait);
+       wake_up(&fs_info->transaction_blocked_wait);
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index c656990..e86b7cc 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -497,7 +497,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
+       for (set = 0; set < FEAT_MAX; set++) {
+               int i;
+               struct attribute *attrs[2];
+-              struct attribute_group agroup = {
++              attribute_group_no_const agroup = {
+                       .name = "features",
+                       .attrs = attrs,
+               };
+diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
+index bf62ad9..9bb3ee8 100644
+--- a/fs/btrfs/tests/btrfs-tests.c
++++ b/fs/btrfs/tests/btrfs-tests.c
+@@ -119,7 +119,7 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
+       fs_info->running_transaction = NULL;
+       fs_info->qgroup_tree = RB_ROOT;
+       fs_info->qgroup_ulist = NULL;
+-      atomic64_set(&fs_info->tree_mod_seq, 0);
++      atomic64_set_unchecked(&fs_info->tree_mod_seq, 0);
+       INIT_LIST_HEAD(&fs_info->dirty_qgroups);
+       INIT_LIST_HEAD(&fs_info->dead_roots);
+       INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
+diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
+index 3221c8d..8fe6170 100644
+--- a/fs/btrfs/tests/free-space-tests.c
++++ b/fs/btrfs/tests/free-space-tests.c
+@@ -409,7 +409,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+       int ret;
+       u64 offset;
+       u64 max_extent_size;
+-      const struct btrfs_free_space_op test_free_space_ops = {
++      const btrfs_free_space_op_no_const test_free_space_ops = {
+               .recalc_thresholds = cache->free_space_ctl->op->recalc_thresholds,
+               .use_bitmap = test_use_bitmap,
+       };
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 95d4191..f804459 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -277,7 +277,7 @@ loop:
+       if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
+               WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
+                       "creating a fresh transaction\n");
+-      atomic64_set(&fs_info->tree_mod_seq, 0);
++      atomic64_set_unchecked(&fs_info->tree_mod_seq, 0);
+       spin_lock_init(&cur_trans->delayed_refs.lock);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index ef9c55b..fcd9451 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -174,7 +174,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
+               root->log_start_pid = current->pid;
+       }
+-      atomic_inc(&root->log_batch);
++      atomic_inc_unchecked(&root->log_batch);
+       atomic_inc(&root->log_writers);
+       if (ctx) {
+               int index = root->log_transid % 2;
+@@ -2771,7 +2771,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+               wait_log_commit(root, log_transid - 1);
+       while (1) {
+-              int batch = atomic_read(&root->log_batch);
++              int batch = atomic_read_unchecked(&root->log_batch);
+               /* when we're on an ssd, just kick the log commit out */
+               if (!btrfs_test_opt(root->fs_info, SSD) &&
+                   test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
+@@ -2780,7 +2780,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+                       mutex_lock(&root->log_mutex);
+               }
+               wait_for_writer(root);
+-              if (batch == atomic_read(&root->log_batch))
++              if (batch == atomic_read_unchecked(&root->log_batch))
+                       break;
+       }
+@@ -2826,7 +2826,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+       btrfs_init_log_ctx(&root_log_ctx, NULL);
+       mutex_lock(&log_root_tree->log_mutex);
+-      atomic_inc(&log_root_tree->log_batch);
++      atomic_inc_unchecked(&log_root_tree->log_batch);
+       atomic_inc(&log_root_tree->log_writers);
+       index2 = log_root_tree->log_transid % 2;
+diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
+index ab858e3..96fd5a1 100644
+--- a/fs/btrfs/tree-log.h
++++ b/fs/btrfs/tree-log.h
+@@ -48,7 +48,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
+ static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
+                                            struct btrfs_trans_handle *trans)
+ {
+-      ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
++      ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
+ }
+ static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 035efce..f7fd1a6 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -246,7 +246,7 @@ static struct btrfs_device *__alloc_device(void)
+       spin_lock_init(&dev->reada_lock);
+       atomic_set(&dev->reada_in_flight, 0);
+-      atomic_set(&dev->dev_stats_ccnt, 0);
++      atomic_set_unchecked(&dev->dev_stats_ccnt, 0);
+       btrfs_device_data_ordered_init(dev);
+       INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+       INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+@@ -5309,7 +5309,7 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
+               sizeof(u64) * (total_stripes),
+               GFP_NOFS|__GFP_NOFAIL);
+-      atomic_set(&bbio->error, 0);
++      atomic_set_unchecked(&bbio->error, 0);
+       atomic_set(&bbio->refs, 1);
+       return bbio;
+@@ -6008,7 +6008,7 @@ static void btrfs_end_bio(struct bio *bio)
+       int is_orig_bio = 0;
+       if (bio->bi_error) {
+-              atomic_inc(&bbio->error);
++              atomic_inc_unchecked(&bbio->error);
+               if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
+                       unsigned int stripe_index =
+                               btrfs_io_bio(bio)->stripe_index;
+@@ -6046,7 +6046,7 @@ static void btrfs_end_bio(struct bio *bio)
+               /* only send an error to the higher layers if it is
+                * beyond the tolerance of the btrfs bio
+                */
+-              if (atomic_read(&bbio->error) > bbio->max_errors) {
++              if (atomic_read_unchecked(&bbio->error) > bbio->max_errors) {
+                       bio->bi_error = -EIO;
+               } else {
+                       /*
+@@ -6156,7 +6156,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
+ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
+ {
+-      atomic_inc(&bbio->error);
++      atomic_inc_unchecked(&bbio->error);
+       if (atomic_dec_and_test(&bbio->stripes_pending)) {
+               /* Should be the original bio. */
+               WARN_ON(bio != bbio->orig_bio);
+@@ -7033,10 +7033,10 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+               if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
+                       continue;
+-              stats_cnt = atomic_read(&device->dev_stats_ccnt);
++              stats_cnt = atomic_read_unchecked(&device->dev_stats_ccnt);
+               ret = update_dev_stat_item(trans, dev_root, device);
+               if (!ret)
+-                      atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++                      atomic_sub_unchecked(stats_cnt, &device->dev_stats_ccnt);
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 6613e63..688bc8e 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -148,8 +148,8 @@ struct btrfs_device {
+       int dev_stats_valid;
+       /* Counter to record the change of device stats */
+-      atomic_t dev_stats_ccnt;
+-      atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
++      atomic_unchecked_t dev_stats_ccnt;
++      atomic_unchecked_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
+ };
+ /*
+@@ -307,7 +307,7 @@ struct btrfs_bio {
+       struct bio *orig_bio;
+       unsigned long flags;
+       void *private;
+-      atomic_t error;
++      atomic_unchecked_t error;
+       int max_errors;
+       int num_stripes;
+       int mirror_num;
+@@ -466,21 +466,21 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
+ static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
+ {
+-      return atomic_read(&dev->dev_stats_ccnt);
++      return atomic_read_unchecked(&dev->dev_stats_ccnt);
+ }
+ static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
+                                     int index)
+ {
+-      atomic_inc(dev->dev_stat_values + index);
++      atomic_inc_unchecked(dev->dev_stat_values + index);
+       smp_mb__before_atomic();
+-      atomic_inc(&dev->dev_stats_ccnt);
++      atomic_inc_unchecked(&dev->dev_stats_ccnt);
+ }
+ static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
+                                     int index)
+ {
+-      return atomic_read(dev->dev_stat_values + index);
++      return atomic_read_unchecked(dev->dev_stat_values + index);
+ }
+ static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
+@@ -488,18 +488,18 @@ static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
+ {
+       int ret;
+-      ret = atomic_xchg(dev->dev_stat_values + index, 0);
++      ret = atomic_xchg_unchecked(dev->dev_stat_values + index, 0);
+       smp_mb__before_atomic();
+-      atomic_inc(&dev->dev_stats_ccnt);
++      atomic_inc_unchecked(&dev->dev_stats_ccnt);
+       return ret;
+ }
+ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
+                                     int index, unsigned long val)
+ {
+-      atomic_set(dev->dev_stat_values + index, val);
++      atomic_set_unchecked(dev->dev_stat_values + index, val);
+       smp_mb__before_atomic();
+-      atomic_inc(&dev->dev_stats_ccnt);
++      atomic_inc_unchecked(&dev->dev_stats_ccnt);
+ }
+ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 9c8eb9b..236a1ca 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -3476,7 +3476,7 @@ void __init buffer_init(void)
+       bh_cachep = kmem_cache_create("buffer_head",
+                       sizeof(struct buffer_head), 0,
+                               (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+-                              SLAB_MEM_SPREAD),
++                              SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
+                               NULL);
+       /*
+diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
+index 6af790f..ec4c1e6 100644
+--- a/fs/cachefiles/bind.c
++++ b/fs/cachefiles/bind.c
+@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
+              args);
+       /* start by checking things over */
+-      ASSERT(cache->fstop_percent >= 0 &&
+-             cache->fstop_percent < cache->fcull_percent &&
++      ASSERT(cache->fstop_percent < cache->fcull_percent &&
+              cache->fcull_percent < cache->frun_percent &&
+              cache->frun_percent  < 100);
+-      ASSERT(cache->bstop_percent >= 0 &&
+-             cache->bstop_percent < cache->bcull_percent &&
++      ASSERT(cache->bstop_percent < cache->bcull_percent &&
+              cache->bcull_percent < cache->brun_percent &&
+              cache->brun_percent  < 100);
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index 1ee54ff..ba89748 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -176,8 +176,8 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
+       cachefiles_has_space(cache, 0, 0);
+       /* summarise */
+-      f_released = atomic_xchg(&cache->f_released, 0);
+-      b_released = atomic_long_xchg(&cache->b_released, 0);
++      f_released = atomic_xchg_unchecked(&cache->f_released, 0);
++      b_released = atomic_long_xchg_unchecked(&cache->b_released, 0);
+       clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
+       n = snprintf(buffer, sizeof(buffer),
+@@ -203,7 +203,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
+       if (n > buflen)
+               return -EMSGSIZE;
+-      if (copy_to_user(_buffer, buffer, n) != 0)
++      if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
+               return -EFAULT;
+       return n;
+@@ -229,7 +229,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
+       if (test_bit(CACHEFILES_DEAD, &cache->flags))
+               return -EIO;
+-      if (datalen < 0 || datalen > PAGE_SIZE - 1)
++      if (datalen > PAGE_SIZE - 1)
+               return -EOPNOTSUPP;
+       /* drag the command string into the kernel so we can parse it */
+@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
+       if (args[0] != '%' || args[1] != '\0')
+               return -EINVAL;
+-      if (fstop < 0 || fstop >= cache->fcull_percent)
++      if (fstop >= cache->fcull_percent)
+               return cachefiles_daemon_range_error(cache, args);
+       cache->fstop_percent = fstop;
+@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
+       if (args[0] != '%' || args[1] != '\0')
+               return -EINVAL;
+-      if (bstop < 0 || bstop >= cache->bcull_percent)
++      if (bstop >= cache->bcull_percent)
+               return cachefiles_daemon_range_error(cache, args);
+       cache->bstop_percent = bstop;
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index cd1effe..73f8767 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -65,9 +65,9 @@ struct cachefiles_cache {
+       wait_queue_head_t               daemon_pollwq;  /* poll waitqueue for daemon */
+       struct rb_root                  active_nodes;   /* active nodes (can't be culled) */
+       rwlock_t                        active_lock;    /* lock for active_nodes */
+-      atomic_t                        gravecounter;   /* graveyard uniquifier */
+-      atomic_t                        f_released;     /* number of objects released lately */
+-      atomic_long_t                   b_released;     /* number of blocks released lately */
++      atomic_unchecked_t              gravecounter;   /* graveyard uniquifier */
++      atomic_unchecked_t              f_released;     /* number of objects released lately */
++      atomic_long_unchecked_t         b_released;     /* number of blocks released lately */
+       unsigned                        frun_percent;   /* when to stop culling (% files) */
+       unsigned                        fcull_percent;  /* when to start culling (% files) */
+       unsigned                        fstop_percent;  /* when to stop allocating (% files) */
+@@ -182,19 +182,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
+  * proc.c
+  */
+ #ifdef CONFIG_CACHEFILES_HISTOGRAM
+-extern atomic_t cachefiles_lookup_histogram[HZ];
+-extern atomic_t cachefiles_mkdir_histogram[HZ];
+-extern atomic_t cachefiles_create_histogram[HZ];
++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++extern atomic_unchecked_t cachefiles_create_histogram[HZ];
+ extern int __init cachefiles_proc_init(void);
+ extern void cachefiles_proc_cleanup(void);
+ static inline
+-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
+ {
+       unsigned long jif = jiffies - start_jif;
+       if (jif >= HZ)
+               jif = HZ - 1;
+-      atomic_inc(&histogram[jif]);
++      atomic_inc_unchecked(&histogram[jif]);
+ }
+ #else
+diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
+index c6ee4b5..de05717 100644
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -274,8 +274,8 @@ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
+       /* This object can now be culled, so we need to let the daemon know
+        * that there is something it can remove if it needs to.
+        */
+-      atomic_long_add(i_blocks, &cache->b_released);
+-      if (atomic_inc_return(&cache->f_released))
++      atomic_long_add_unchecked(i_blocks, &cache->b_released);
++      if (atomic_inc_return_unchecked(&cache->f_released))
+               cachefiles_state_changed(cache);
+ }
+@@ -334,7 +334,7 @@ try_again:
+       /* first step is to make up a grave dentry in the graveyard */
+       sprintf(nbuffer, "%08x%08x",
+               (uint32_t) get_seconds(),
+-              (uint32_t) atomic_inc_return(&cache->gravecounter));
++              (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
+       /* do the multiway lock magic */
+       trap = lock_rename(cache->graveyard, dir);
+diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
+index 125b90f..8c7bed5 100644
+--- a/fs/cachefiles/proc.c
++++ b/fs/cachefiles/proc.c
+@@ -14,9 +14,9 @@
+ #include <linux/seq_file.h>
+ #include "internal.h"
+-atomic_t cachefiles_lookup_histogram[HZ];
+-atomic_t cachefiles_mkdir_histogram[HZ];
+-atomic_t cachefiles_create_histogram[HZ];
++atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++atomic_unchecked_t cachefiles_create_histogram[HZ];
+ /*
+  * display the latency histogram
+@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
+               return 0;
+       default:
+               index = (unsigned long) v - 3;
+-              x = atomic_read(&cachefiles_lookup_histogram[index]);
+-              y = atomic_read(&cachefiles_mkdir_histogram[index]);
+-              z = atomic_read(&cachefiles_create_histogram[index]);
++              x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
++              y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
++              z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
+               if (x == 0 && y == 0 && z == 0)
+                       return 0;
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index e247f6f..7c4ed52 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -933,7 +933,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
+ /*
+  * construct our own bdi so we can control readahead, etc.
+  */
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+ static int ceph_register_bdi(struct super_block *sb,
+                            struct ceph_fs_client *fsc)
+@@ -950,7 +950,7 @@ static int ceph_register_bdi(struct super_block *sb,
+                       VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
+       err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
+-                         atomic_long_inc_return(&bdi_seq));
++                         atomic_long_inc_return_unchecked(&bdi_seq));
+       if (!err)
+               sb->s_bdi = &fsc->backing_dev_info;
+       return err;
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+index 6edd825..e8cbd2d 100644
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -70,7 +70,7 @@ void chrdev_show(struct seq_file *f, off_t offset)
+  *
+  * Returns a -ve errno on failure.
+  */
+-static struct char_device_struct *
++static __nocapture(4) struct char_device_struct *
+ __register_chrdev_region(unsigned int major, unsigned int baseminor,
+                          int minorct, const char *name)
+ {
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 3d03e48..0f22463 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+       rc = kstrtobool_from_user(buffer, count, &bv);
+       if (rc == 0) {
+ #ifdef CONFIG_CIFS_STATS2
+-              atomic_set(&totBufAllocCount, 0);
+-              atomic_set(&totSmBufAllocCount, 0);
++              atomic_set_unchecked(&totBufAllocCount, 0);
++              atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+               spin_lock(&cifs_tcp_ses_lock);
+               list_for_each(tmp1, &cifs_tcp_ses_list) {
+@@ -279,7 +279,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+                                       tcon = list_entry(tmp3,
+                                                         struct cifs_tcon,
+                                                         tcon_list);
+-                                      atomic_set(&tcon->num_smbs_sent, 0);
++                                      atomic_set_unchecked(&tcon->num_smbs_sent, 0);
+                                       if (server->ops->clear_stats)
+                                               server->ops->clear_stats(tcon);
+                               }
+@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+                       smBufAllocCount.counter, cifs_min_small);
+ #ifdef CONFIG_CIFS_STATS2
+       seq_printf(m, "Total Large %d Small %d Allocations\n",
+-                              atomic_read(&totBufAllocCount),
+-                              atomic_read(&totSmBufAllocCount));
++                              atomic_read_unchecked(&totBufAllocCount),
++                              atomic_read_unchecked(&totSmBufAllocCount));
+ #endif /* CONFIG_CIFS_STATS2 */
+       seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
+@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+                               if (tcon->need_reconnect)
+                                       seq_puts(m, "\tDISCONNECTED ");
+                               seq_printf(m, "\nSMBs: %d",
+-                                         atomic_read(&tcon->num_smbs_sent));
++                                         atomic_read_unchecked(&tcon->num_smbs_sent));
+                               if (server->ops->print_stats)
+                                       server->ops->print_stats(m, tcon);
+                       }
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 8c68d03..267f6dd 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1148,9 +1148,10 @@ cifs_init_request_bufs(void)
+       cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
+                CIFSMaxBufSize, CIFSMaxBufSize);
+ */
+-      cifs_req_cachep = kmem_cache_create("cifs_request",
++      cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
+                                           CIFSMaxBufSize + max_hdr_size, 0,
+-                                          SLAB_HWCACHE_ALIGN, NULL);
++                                          SLAB_HWCACHE_ALIGN, 0,
++                                          CIFSMaxBufSize + max_hdr_size, NULL);
+       if (cifs_req_cachep == NULL)
+               return -ENOMEM;
+@@ -1176,9 +1177,9 @@ cifs_init_request_bufs(void)
+       more SMBs to use small buffer alloc and is still much more
+       efficient to alloc 1 per page off the slab compared to 17K (5page)
+       alloc of large cifs buffers even when page debugging is on */
+-      cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
++      cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
+                       MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
+-                      NULL);
++                      0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
+       if (cifs_sm_req_cachep == NULL) {
+               mempool_destroy(cifs_req_poolp);
+               kmem_cache_destroy(cifs_req_cachep);
+@@ -1262,8 +1263,8 @@ init_cifs(void)
+       atomic_set(&bufAllocCount, 0);
+       atomic_set(&smBufAllocCount, 0);
+ #ifdef CONFIG_CIFS_STATS2
+-      atomic_set(&totBufAllocCount, 0);
+-      atomic_set(&totSmBufAllocCount, 0);
++      atomic_set_unchecked(&totBufAllocCount, 0);
++      atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+       atomic_set(&midCount, 0);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 65f78b7..3c8044f0 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -842,35 +842,35 @@ struct cifs_tcon {
+       __u16 Flags;            /* optional support bits */
+       enum statusEnum tidStatus;
+ #ifdef CONFIG_CIFS_STATS
+-      atomic_t num_smbs_sent;
++      atomic_unchecked_t num_smbs_sent;
+       union {
+               struct {
+-                      atomic_t num_writes;
+-                      atomic_t num_reads;
+-                      atomic_t num_flushes;
+-                      atomic_t num_oplock_brks;
+-                      atomic_t num_opens;
+-                      atomic_t num_closes;
+-                      atomic_t num_deletes;
+-                      atomic_t num_mkdirs;
+-                      atomic_t num_posixopens;
+-                      atomic_t num_posixmkdirs;
+-                      atomic_t num_rmdirs;
+-                      atomic_t num_renames;
+-                      atomic_t num_t2renames;
+-                      atomic_t num_ffirst;
+-                      atomic_t num_fnext;
+-                      atomic_t num_fclose;
+-                      atomic_t num_hardlinks;
+-                      atomic_t num_symlinks;
+-                      atomic_t num_locks;
+-                      atomic_t num_acl_get;
+-                      atomic_t num_acl_set;
++                      atomic_unchecked_t num_writes;
++                      atomic_unchecked_t num_reads;
++                      atomic_unchecked_t num_flushes;
++                      atomic_unchecked_t num_oplock_brks;
++                      atomic_unchecked_t num_opens;
++                      atomic_unchecked_t num_closes;
++                      atomic_unchecked_t num_deletes;
++                      atomic_unchecked_t num_mkdirs;
++                      atomic_unchecked_t num_posixopens;
++                      atomic_unchecked_t num_posixmkdirs;
++                      atomic_unchecked_t num_rmdirs;
++                      atomic_unchecked_t num_renames;
++                      atomic_unchecked_t num_t2renames;
++                      atomic_unchecked_t num_ffirst;
++                      atomic_unchecked_t num_fnext;
++                      atomic_unchecked_t num_fclose;
++                      atomic_unchecked_t num_hardlinks;
++                      atomic_unchecked_t num_symlinks;
++                      atomic_unchecked_t num_locks;
++                      atomic_unchecked_t num_acl_get;
++                      atomic_unchecked_t num_acl_set;
+               } cifs_stats;
+ #ifdef CONFIG_CIFS_SMB2
+               struct {
+-                      atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
+-                      atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
++                      atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
++                      atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
+               } smb2_stats;
+ #endif /* CONFIG_CIFS_SMB2 */
+       } stats;
+@@ -1223,7 +1223,7 @@ convert_delimiter(char *path, char delim)
+ }
+ #ifdef CONFIG_CIFS_STATS
+-#define cifs_stats_inc atomic_inc
++#define cifs_stats_inc atomic_inc_unchecked
+ static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
+                                           unsigned int bytes)
+@@ -1586,8 +1586,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
+ /* Various Debug counters */
+ GLOBAL_EXTERN atomic_t bufAllocCount;    /* current number allocated  */
+ #ifdef CONFIG_CIFS_STATS2
+-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
+-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
+ #endif
+ GLOBAL_EXTERN atomic_t smBufAllocCount;
+ GLOBAL_EXTERN atomic_t midCount;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 605438a..b8185c2 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1386,7 +1386,7 @@ cifs_free_llist(struct list_head *llist)
+ int
+ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
+-                unsigned int xid)
++                const unsigned int xid)
+ {
+       int rc = 0, stored_rc;
+       int types[] = {LOCKING_ANDX_LARGE_FILES,
+@@ -2072,10 +2072,14 @@ static int cifs_writepages(struct address_space *mapping,
+               index = mapping->writeback_index; /* Start from prev offset */
+               end = -1;
+       } else {
+-              index = wbc->range_start >> PAGE_SHIFT;
+-              end = wbc->range_end >> PAGE_SHIFT;
+-              if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
++              if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+                       range_whole = true;
++                      index = 0;
++                      end = ULONG_MAX;
++              } else {
++                      index = wbc->range_start >> PAGE_SHIFT;
++                      end = wbc->range_end >> PAGE_SHIFT;
++              }
+               scanned = true;
+       }
+       server = cifs_sb_master_tcon(cifs_sb)->ses->server;
+@@ -2549,7 +2553,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
+               wdata->pid = pid;
+               wdata->bytes = cur_len;
+               wdata->pagesz = PAGE_SIZE;
+-              wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
++              wdata->tailsz = cur_len - nr_pages * PAGE_SIZE + PAGE_SIZE;
+               wdata->credits = credits;
+               if (!wdata->cfile->invalidHandle ||
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index c672915..bea28bc 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -171,7 +171,7 @@ cifs_buf_get(void)
+               memset(ret_buf, 0, buf_size + 3);
+               atomic_inc(&bufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+-              atomic_inc(&totBufAllocCount);
++              atomic_inc_unchecked(&totBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+       }
+@@ -206,7 +206,7 @@ cifs_small_buf_get(void)
+       /*      memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+               atomic_inc(&smBufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+-              atomic_inc(&totSmBufAllocCount);
++              atomic_inc_unchecked(&totSmBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+       }
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index fc537c2..47d654c 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -622,27 +622,27 @@ static void
+ cifs_clear_stats(struct cifs_tcon *tcon)
+ {
+ #ifdef CONFIG_CIFS_STATS
+-      atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
+-      atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
++      atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
+ #endif
+ }
+@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+ {
+ #ifdef CONFIG_CIFS_STATS
+       seq_printf(m, " Oplocks breaks: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
+       seq_printf(m, "\nReads:  %d Bytes: %llu",
+-                 atomic_read(&tcon->stats.cifs_stats.num_reads),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
+                  (long long)(tcon->bytes_read));
+       seq_printf(m, "\nWrites: %d Bytes: %llu",
+-                 atomic_read(&tcon->stats.cifs_stats.num_writes),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
+                  (long long)(tcon->bytes_written));
+       seq_printf(m, "\nFlushes: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_flushes));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
+       seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_locks),
+-                 atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
+-                 atomic_read(&tcon->stats.cifs_stats.num_symlinks));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
+       seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_opens),
+-                 atomic_read(&tcon->stats.cifs_stats.num_closes),
+-                 atomic_read(&tcon->stats.cifs_stats.num_deletes));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
+       seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_posixopens),
+-                 atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
+       seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
+-                 atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
+       seq_printf(m, "\nRenames: %d T2 Renames %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_renames),
+-                 atomic_read(&tcon->stats.cifs_stats.num_t2renames));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
+       seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
+-                 atomic_read(&tcon->stats.cifs_stats.num_ffirst),
+-                 atomic_read(&tcon->stats.cifs_stats.num_fnext),
+-                 atomic_read(&tcon->stats.cifs_stats.num_fclose));
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
++                 atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
+ #endif
+ }
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 0e73cef..e4dba34 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -427,8 +427,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
+ #ifdef CONFIG_CIFS_STATS
+       int i;
+       for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
+-              atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
+-              atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
++              atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
++              atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
+       }
+ #endif
+ }
+@@ -468,65 +468,65 @@ static void
+ smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+ {
+ #ifdef CONFIG_CIFS_STATS
+-      atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
+-      atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
++      atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
++      atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
+       seq_printf(m, "\nNegotiates: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_NEGOTIATE_HE]),
+-                 atomic_read(&failed[SMB2_NEGOTIATE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
+       seq_printf(m, "\nSessionSetups: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
+-                 atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
++                 atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
++                 atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
+       seq_printf(m, "\nLogoffs: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_LOGOFF_HE]),
+-                 atomic_read(&failed[SMB2_LOGOFF_HE]));
++                 atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
++                 atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
+       seq_printf(m, "\nTreeConnects: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
+-                 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
++                 atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
++                 atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
+       seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
+-                 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
++                 atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
++                 atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
+       seq_printf(m, "\nCreates: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CREATE_HE]),
+-                 atomic_read(&failed[SMB2_CREATE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
+       seq_printf(m, "\nCloses: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CLOSE_HE]),
+-                 atomic_read(&failed[SMB2_CLOSE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
+       seq_printf(m, "\nFlushes: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_FLUSH_HE]),
+-                 atomic_read(&failed[SMB2_FLUSH_HE]));
++                 atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
++                 atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
+       seq_printf(m, "\nReads: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_READ_HE]),
+-                 atomic_read(&failed[SMB2_READ_HE]));
++                 atomic_read_unchecked(&sent[SMB2_READ_HE]),
++                 atomic_read_unchecked(&failed[SMB2_READ_HE]));
+       seq_printf(m, "\nWrites: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_WRITE_HE]),
+-                 atomic_read(&failed[SMB2_WRITE_HE]));
++                 atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
++                 atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
+       seq_printf(m, "\nLocks: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_LOCK_HE]),
+-                 atomic_read(&failed[SMB2_LOCK_HE]));
++                 atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
++                 atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
+       seq_printf(m, "\nIOCTLs: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_IOCTL_HE]),
+-                 atomic_read(&failed[SMB2_IOCTL_HE]));
++                 atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
++                 atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
+       seq_printf(m, "\nCancels: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CANCEL_HE]),
+-                 atomic_read(&failed[SMB2_CANCEL_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
+       seq_printf(m, "\nEchos: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_ECHO_HE]),
+-                 atomic_read(&failed[SMB2_ECHO_HE]));
++                 atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
++                 atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
+       seq_printf(m, "\nQueryDirectories: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
+-                 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
++                 atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
++                 atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
+       seq_printf(m, "\nChangeNotifies: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
+-                 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
++                 atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
++                 atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
+       seq_printf(m, "\nQueryInfos: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
+-                 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
++                 atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
++                 atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
+       seq_printf(m, "\nSetInfos: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_SET_INFO_HE]),
+-                 atomic_read(&failed[SMB2_SET_INFO_HE]));
++                 atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
++                 atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
+       seq_printf(m, "\nOplockBreaks: %d sent %d failed",
+-                 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
+-                 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
++                 atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
++                 atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
+ #endif
+ }
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 3eec96c..b0c5b76 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2430,8 +2430,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+       default:
+               cifs_dbg(VFS, "info level %u isn't supported\n",
+                        srch_inf->info_level);
+-              rc = -EINVAL;
+-              goto qdir_exit;
++              return -EINVAL;
+       }
+       req->FileIndex = cpu_to_le32(index);
+diff --git a/fs/coda/cache.c b/fs/coda/cache.c
+index 5bb630a..043dc70 100644
+--- a/fs/coda/cache.c
++++ b/fs/coda/cache.c
+@@ -24,7 +24,7 @@
+ #include "coda_linux.h"
+ #include "coda_cache.h"
+-static atomic_t permission_epoch = ATOMIC_INIT(0);
++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
+ /* replace or extend an acl cache hit */
+ void coda_cache_enter(struct inode *inode, int mask)
+@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
+       struct coda_inode_info *cii = ITOC(inode);
+       spin_lock(&cii->c_lock);
+-      cii->c_cached_epoch = atomic_read(&permission_epoch);
++      cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
+       if (!uid_eq(cii->c_uid, current_fsuid())) {
+               cii->c_uid = current_fsuid();
+                 cii->c_cached_perm = mask;
+@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
+ {
+       struct coda_inode_info *cii = ITOC(inode);
+       spin_lock(&cii->c_lock);
+-      cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
++      cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
+       spin_unlock(&cii->c_lock);
+ }
+ /* remove all acl caches */
+ void coda_cache_clear_all(struct super_block *sb)
+ {
+-      atomic_inc(&permission_epoch);
++      atomic_inc_unchecked(&permission_epoch);
+ }
+@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
+       spin_lock(&cii->c_lock);
+       hit = (mask & cii->c_cached_perm) == mask &&
+           uid_eq(cii->c_uid, current_fsuid()) &&
+-          cii->c_cached_epoch == atomic_read(&permission_epoch);
++          cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
+       spin_unlock(&cii->c_lock);
+       return hit;
+diff --git a/fs/coda/dir.c b/fs/coda/dir.c
+index 6fb8672..da34e6a 100644
+--- a/fs/coda/dir.c
++++ b/fs/coda/dir.c
+@@ -29,11 +29,10 @@
+ #include "coda_int.h"
+ /* same as fs/bad_inode.c */
+-static int coda_return_EIO(void)
++static int coda_mknod(struct inode *inode, struct dentry *dentry, umode_t mode, dev_t dev)
+ {
+       return -EIO;
+ }
+-#define CODA_EIO_ERROR ((void *) (coda_return_EIO))
+ /* inode operations for directories */
+ /* access routines: lookup, readlink, permission */
+@@ -568,7 +567,7 @@ const struct inode_operations coda_dir_inode_operations = {
+       .symlink        = coda_symlink,
+       .mkdir          = coda_mkdir,
+       .rmdir          = coda_rmdir,
+-      .mknod          = CODA_EIO_ERROR,
++      .mknod          = coda_mknod,
+       .rename         = coda_rename,
+       .permission     = coda_permission,
+       .getattr        = coda_getattr,
+diff --git a/fs/compat.c b/fs/compat.c
+index be6e48b..f7baebf 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -54,7 +54,7 @@
+ #include <asm/ioctls.h>
+ #include "internal.h"
+-int compat_log = 1;
++int compat_log = 0;
+ int compat_printk(const char *fmt, ...)
+ {
+@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
+       set_fs(KERNEL_DS);
+       /* The __user pointer cast is valid because of the set_fs() */
+-      ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
++      ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
+       set_fs(oldfs);
+       /* truncating is ok because it's a user address */
+       if (!ret)
+@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
+               goto out;
+       ret = -EINVAL;
+-      if (nr_segs > UIO_MAXIOV || nr_segs < 0)
++      if (nr_segs > UIO_MAXIOV)
+               goto out;
+       if (nr_segs > fast_segs) {
+               ret = -ENOMEM;
+@@ -843,6 +843,7 @@ struct compat_old_linux_dirent {
+ struct compat_readdir_callback {
+       struct dir_context ctx;
+       struct compat_old_linux_dirent __user *dirent;
++      struct file * file;
+       int result;
+ };
+@@ -862,6 +863,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
+               buf->result = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       buf->result++;
+       dirent = buf->dirent;
+       if (!access_ok(VERIFY_WRITE, dirent,
+@@ -893,6 +898,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+       if (!f.file)
+               return -EBADF;
++      buf.file = f.file;
+       error = iterate_dir(f.file, &buf.ctx);
+       if (buf.result)
+               error = buf.result;
+@@ -912,6 +918,7 @@ struct compat_getdents_callback {
+       struct dir_context ctx;
+       struct compat_linux_dirent __user *current_dir;
+       struct compat_linux_dirent __user *previous;
++      struct file * file;
+       int count;
+       int error;
+ };
+@@ -934,6 +941,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
+               buf->error = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+               if (signal_pending(current))
+@@ -981,6 +992,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
+       if (!f.file)
+               return -EBADF;
++      buf.file = f.file;
+       error = iterate_dir(f.file, &buf.ctx);
+       if (error >= 0)
+               error = buf.error;
+@@ -1001,6 +1013,7 @@ struct compat_getdents_callback64 {
+       struct dir_context ctx;
+       struct linux_dirent64 __user *current_dir;
+       struct linux_dirent64 __user *previous;
++      struct file * file;
+       int count;
+       int error;
+ };
+@@ -1019,6 +1032,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
+       buf->error = -EINVAL;   /* only used if we fail.. */
+       if (reclen > buf->count)
+               return -EINVAL;
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+@@ -1070,13 +1087,13 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+       if (!f.file)
+               return -EBADF;
++      buf.file = f.file;
+       error = iterate_dir(f.file, &buf.ctx);
+       if (error >= 0)
+               error = buf.error;
+       lastdirent = buf.previous;
+       if (lastdirent) {
+-              typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+-              if (__put_user_unaligned(d_off, &lastdirent->d_off))
++              if (__put_user_unaligned(buf.ctx.pos, &lastdirent->d_off))
+                       error = -EFAULT;
+               else
+                       error = count - buf.count;
+@@ -1331,7 +1348,7 @@ COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
+ }
+ struct compat_sel_arg_struct {
+-      compat_ulong_t n;
++      compat_long_t n;
+       compat_uptr_t inp;
+       compat_uptr_t outp;
+       compat_uptr_t exp;
+diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
+index 4d24d17..4f8c09e 100644
+--- a/fs/compat_binfmt_elf.c
++++ b/fs/compat_binfmt_elf.c
+@@ -30,11 +30,13 @@
+ #undef        elf_phdr
+ #undef        elf_shdr
+ #undef        elf_note
++#undef        elf_dyn
+ #undef        elf_addr_t
+ #define elfhdr                elf32_hdr
+ #define elf_phdr      elf32_phdr
+ #define elf_shdr      elf32_shdr
+ #define elf_note      elf32_note
++#define elf_dyn               Elf32_Dyn
+ #define elf_addr_t    Elf32_Addr
+ /*
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index c1e9f29..4ea2e52 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -646,7 +646,7 @@ static int serial_struct_ioctl(struct file *file,
+               if (copy_in_user(ss, ss32, offsetof(SS32, iomem_base)) ||
+                   get_user(udata, &ss32->iomem_base))
+                       return -EFAULT;
+-              iomem_base = compat_ptr(udata);
++              iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
+               if (put_user(iomem_base, &ss->iomem_base) ||
+                   convert_in_user(&ss32->iomem_reg_shift,
+                     &ss->iomem_reg_shift) ||
+@@ -728,8 +728,8 @@ static int do_i2c_rdwr_ioctl(struct file *file,
+       for (i = 0; i < nmsgs; i++) {
+               if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
+                       return -EFAULT;
+-              if (get_user(datap, &umsgs[i].buf) ||
+-                  put_user(compat_ptr(datap), &tmsgs[i].buf))
++              if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
++                  put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
+                       return -EFAULT;
+       }
+       return do_ioctl(file, cmd, (unsigned long)tdata);
+@@ -820,7 +820,7 @@ static int compat_ioctl_preallocate(struct file *file,
+           copy_in_user(&p->l_len,     &p32->l_len,    sizeof(s64)) ||
+           copy_in_user(&p->l_sysid,   &p32->l_sysid,  sizeof(s32)) ||
+           copy_in_user(&p->l_pid,     &p32->l_pid,    sizeof(u32)) ||
+-          copy_in_user(&p->l_pad,     &p32->l_pad,    4*sizeof(u32)))
++          copy_in_user(p->l_pad,      p32->l_pad,     4*sizeof(u32)))
+               return -EFAULT;
+       return ioctl_preallocate(file, p);
+@@ -1629,8 +1629,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+ static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
+ {
+       unsigned int a, b;
+-      a = *(unsigned int *)p;
+-      b = *(unsigned int *)q;
++      a = *(const unsigned int *)p;
++      b = *(const unsigned int *)q;
+       if (a > b)
+               return 1;
+       if (a < b)
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 56fb261..8c808f1 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -1638,7 +1638,7 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
+               list_move(q, &parent_sd->s_children);
+       for (p = q->next; p != &parent_sd->s_children; p = p->next) {
+               struct configfs_dirent *next;
+-              const char *name;
++              const unsigned char * name;
+               int len;
+               struct inode *inode = NULL;
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 281b768..f39dcdf 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -483,8 +483,8 @@ static void wait_for_dump_helpers(struct file *file)
+       struct pipe_inode_info *pipe = file->private_data;
+       pipe_lock(pipe);
+-      pipe->readers++;
+-      pipe->writers--;
++      atomic_inc(&pipe->readers);
++      atomic_dec(&pipe->writers);
+       wake_up_interruptible_sync(&pipe->wait);
+       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+       pipe_unlock(pipe);
+@@ -493,11 +493,11 @@ static void wait_for_dump_helpers(struct file *file)
+        * We actually want wait_event_freezable() but then we need
+        * to clear TIF_SIGPENDING and improve dump_interrupted().
+        */
+-      wait_event_interruptible(pipe->wait, pipe->readers == 1);
++      wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
+       pipe_lock(pipe);
+-      pipe->readers--;
+-      pipe->writers++;
++      atomic_dec(&pipe->readers);
++      atomic_inc(&pipe->writers);
+       pipe_unlock(pipe);
+ }
+@@ -544,7 +544,9 @@ void do_coredump(const siginfo_t *siginfo)
+       /* require nonrelative corefile path and be extra careful */
+       bool need_suid_safe = false;
+       bool core_dumped = false;
+-      static atomic_t core_dump_count = ATOMIC_INIT(0);
++      static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
++      long signr = siginfo->si_signo;
++      int dumpable;
+       struct coredump_params cprm = {
+               .siginfo = siginfo,
+               .regs = signal_pt_regs(),
+@@ -557,12 +559,17 @@ void do_coredump(const siginfo_t *siginfo)
+               .mm_flags = mm->flags,
+       };
+-      audit_core_dumps(siginfo->si_signo);
++      audit_core_dumps(signr);
++
++      dumpable = __get_dumpable(cprm.mm_flags);
++
++      if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
++              gr_handle_brute_attach(dumpable);
+       binfmt = mm->binfmt;
+       if (!binfmt || !binfmt->core_dump)
+               goto fail;
+-      if (!__get_dumpable(cprm.mm_flags))
++      if (!dumpable)
+               goto fail;
+       cred = prepare_creds();
+@@ -580,7 +587,7 @@ void do_coredump(const siginfo_t *siginfo)
+               need_suid_safe = true;
+       }
+-      retval = coredump_wait(siginfo->si_signo, &core_state);
++      retval = coredump_wait(signr, &core_state);
+       if (retval < 0)
+               goto fail_creds;
+@@ -623,7 +630,7 @@ void do_coredump(const siginfo_t *siginfo)
+               }
+               cprm.limit = RLIM_INFINITY;
+-              dump_count = atomic_inc_return(&core_dump_count);
++              dump_count = atomic_inc_return_unchecked(&core_dump_count);
+               if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+                       printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+                              task_tgid_vnr(current), current->comm);
+@@ -657,6 +664,8 @@ void do_coredump(const siginfo_t *siginfo)
+               int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
+                                O_LARGEFILE | O_EXCL;
++              gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
++
+               if (cprm.limit < binfmt->min_coredump)
+                       goto fail_unlock;
+@@ -682,7 +691,7 @@ void do_coredump(const siginfo_t *siginfo)
+                        * If it doesn't exist, that's fine. If there's some
+                        * other problem, we'll catch it at the filp_open().
+                        */
+-                      (void) sys_unlink((const char __user *)cn.corename);
++                      (void) sys_unlink((const char __force_user *)cn.corename);
+                       set_fs(old_fs);
+               }
+@@ -763,7 +772,7 @@ close_fail:
+               filp_close(cprm.file, NULL);
+ fail_dropcount:
+       if (ispipe)
+-              atomic_dec(&core_dump_count);
++              atomic_dec_unchecked(&core_dump_count);
+ fail_unlock:
+       kfree(cn.corename);
+       coredump_finish(mm, core_dumped);
+@@ -784,6 +793,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
+       struct file *file = cprm->file;
+       loff_t pos = file->f_pos;
+       ssize_t n;
++
++      gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
+       if (cprm->written + nr > cprm->limit)
+               return 0;
+       while (nr) {
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 5c7cc95..58840d7 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -321,8 +321,9 @@ static void dentry_free(struct dentry *dentry)
+  * d_iput() operation if defined.
+  */
+ static void dentry_unlink_inode(struct dentry * dentry)
+-      __releases(dentry->d_lock)
+-      __releases(dentry->d_inode->i_lock)
++      __releases(&dentry->d_lock)
++      __releases(&dentry->d_inode->i_lock);
++static void dentry_unlink_inode(struct dentry * dentry)
+ {
+       struct inode *inode = dentry->d_inode;
+       bool hashed = !d_unhashed(dentry);
+@@ -559,7 +560,8 @@ static void __dentry_kill(struct dentry *dentry)
+  * Returns dentry requiring refcount drop, or NULL if we're done.
+  */
+ static struct dentry *dentry_kill(struct dentry *dentry)
+-      __releases(dentry->d_lock)
++      __releases(&dentry->d_lock);
++static struct dentry *dentry_kill(struct dentry *dentry)
+ {
+       struct inode *inode = dentry->d_inode;
+       struct dentry *parent = NULL;
+@@ -589,7 +591,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
+       struct dentry *parent = dentry->d_parent;
+       if (IS_ROOT(dentry))
+               return NULL;
+-      if (unlikely(dentry->d_lockref.count < 0))
++      if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
+               return NULL;
+       if (likely(spin_trylock(&parent->d_lock)))
+               return parent;
+@@ -651,8 +653,8 @@ static inline bool fast_dput(struct dentry *dentry)
+        */
+       if (unlikely(ret < 0)) {
+               spin_lock(&dentry->d_lock);
+-              if (dentry->d_lockref.count > 1) {
+-                      dentry->d_lockref.count--;
++              if (__lockref_read(&dentry->d_lockref) > 1) {
++                      __lockref_dec(&dentry->d_lockref);
+                       spin_unlock(&dentry->d_lock);
+                       return 1;
+               }
+@@ -707,7 +709,7 @@ static inline bool fast_dput(struct dentry *dentry)
+        * else could have killed it and marked it dead. Either way, we
+        * don't need to do anything else.
+        */
+-      if (dentry->d_lockref.count) {
++      if (__lockref_read(&dentry->d_lockref)) {
+               spin_unlock(&dentry->d_lock);
+               return 1;
+       }
+@@ -717,7 +719,7 @@ static inline bool fast_dput(struct dentry *dentry)
+        * lock, and we just tested that it was zero, so we can just
+        * set it to 1.
+        */
+-      dentry->d_lockref.count = 1;
++      __lockref_set(&dentry->d_lockref, 1);
+       return 0;
+ }
+@@ -754,8 +756,6 @@ void dput(struct dentry *dentry)
+               return;
+ repeat:
+-      might_sleep();
+-
+       rcu_read_lock();
+       if (likely(fast_dput(dentry))) {
+               rcu_read_unlock();
+@@ -783,7 +783,7 @@ repeat:
+               dentry->d_flags |= DCACHE_REFERENCED;
+       dentry_lru_add(dentry);
+-      dentry->d_lockref.count--;
++      __lockref_dec(&dentry->d_lockref);
+       spin_unlock(&dentry->d_lock);
+       return;
+@@ -800,7 +800,7 @@ EXPORT_SYMBOL(dput);
+ /* This must be called with d_lock held */
+ static inline void __dget_dlock(struct dentry *dentry)
+ {
+-      dentry->d_lockref.count++;
++      __lockref_inc(&dentry->d_lockref);
+ }
+ static inline void __dget(struct dentry *dentry)
+@@ -841,8 +841,8 @@ repeat:
+               goto repeat;
+       }
+       rcu_read_unlock();
+-      BUG_ON(!ret->d_lockref.count);
+-      ret->d_lockref.count++;
++      BUG_ON(!__lockref_read(&ret->d_lockref));
++      __lockref_inc(&ret->d_lockref);
+       spin_unlock(&ret->d_lock);
+       return ret;
+ }
+@@ -920,9 +920,9 @@ restart:
+       spin_lock(&inode->i_lock);
+       hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
+               spin_lock(&dentry->d_lock);
+-              if (!dentry->d_lockref.count) {
++              if (!__lockref_read(&dentry->d_lockref)) {
+                       struct dentry *parent = lock_parent(dentry);
+-                      if (likely(!dentry->d_lockref.count)) {
++                      if (likely(!__lockref_read(&dentry->d_lockref))) {
+                               __dentry_kill(dentry);
+                               dput(parent);
+                               goto restart;
+@@ -957,7 +957,7 @@ static void shrink_dentry_list(struct list_head *list)
+                * We found an inuse dentry which was not removed from
+                * the LRU because of laziness during lookup. Do not free it.
+                */
+-              if (dentry->d_lockref.count > 0) {
++              if (__lockref_read(&dentry->d_lockref) > 0) {
+                       spin_unlock(&dentry->d_lock);
+                       if (parent)
+                               spin_unlock(&parent->d_lock);
+@@ -995,8 +995,8 @@ static void shrink_dentry_list(struct list_head *list)
+               dentry = parent;
+               while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
+                       parent = lock_parent(dentry);
+-                      if (dentry->d_lockref.count != 1) {
+-                              dentry->d_lockref.count--;
++                      if (__lockref_read(&dentry->d_lockref) != 1) {
++                              __lockref_inc(&dentry->d_lockref);
+                               spin_unlock(&dentry->d_lock);
+                               if (parent)
+                                       spin_unlock(&parent->d_lock);
+@@ -1036,7 +1036,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
+        * counts, just remove them from the LRU. Otherwise give them
+        * another pass through the LRU.
+        */
+-      if (dentry->d_lockref.count) {
++      if (__lockref_read(&dentry->d_lockref)) {
+               d_lru_isolate(lru, dentry);
+               spin_unlock(&dentry->d_lock);
+               return LRU_REMOVED;
+@@ -1373,7 +1373,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
+       } else {
+               if (dentry->d_flags & DCACHE_LRU_LIST)
+                       d_lru_del(dentry);
+-              if (!dentry->d_lockref.count) {
++              if (!__lockref_read(&dentry->d_lockref)) {
+                       d_shrink_add(dentry, &data->dispose);
+                       data->found++;
+               }
+@@ -1421,7 +1421,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
+               return D_WALK_CONTINUE;
+       /* root with refcount 1 is fine */
+-      if (dentry == _data && dentry->d_lockref.count == 1)
++      if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
+               return D_WALK_CONTINUE;
+       printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
+@@ -1430,7 +1430,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
+                      dentry->d_inode ?
+                      dentry->d_inode->i_ino : 0UL,
+                      dentry,
+-                     dentry->d_lockref.count,
++                     __lockref_read(&dentry->d_lockref),
+                      dentry->d_sb->s_type->name,
+                      dentry->d_sb->s_id);
+       WARN_ON(1);
+@@ -1576,7 +1576,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+               dname = dentry->d_iname;
+       } else if (name->len > DNAME_INLINE_LEN-1) {
+               size_t size = offsetof(struct external_name, name[1]);
+-              struct external_name *p = kmalloc(size + name->len,
++              struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)),
+                                                 GFP_KERNEL_ACCOUNT);
+               if (!p) {
+                       kmem_cache_free(dentry_cache, dentry); 
+@@ -1600,7 +1600,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+       smp_wmb();
+       dentry->d_name.name = dname;
+-      dentry->d_lockref.count = 1;
++      __lockref_set(&dentry->d_lockref, 1);
+       dentry->d_flags = 0;
+       spin_lock_init(&dentry->d_lock);
+       seqcount_init(&dentry->d_seq);
+@@ -1609,6 +1609,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+       dentry->d_sb = sb;
+       dentry->d_op = NULL;
+       dentry->d_fsdata = NULL;
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++      atomic_set(&dentry->chroot_refcnt, 0);
++#endif
+       INIT_HLIST_BL_NODE(&dentry->d_hash);
+       INIT_LIST_HEAD(&dentry->d_lru);
+       INIT_LIST_HEAD(&dentry->d_subdirs);
+@@ -2250,7 +2253,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
+               if (!d_same_name(dentry, parent, name))
+                       goto next;
+-              dentry->d_lockref.count++;
++              __lockref_inc(&dentry->d_lockref);
+               found = dentry;
+               spin_unlock(&dentry->d_lock);
+               break;
+@@ -2318,7 +2321,7 @@ again:
+       spin_lock(&dentry->d_lock);
+       inode = dentry->d_inode;
+       isdir = S_ISDIR(inode->i_mode);
+-      if (dentry->d_lockref.count == 1) {
++      if (__lockref_read(&dentry->d_lockref) == 1) {
+               if (!spin_trylock(&inode->i_lock)) {
+                       spin_unlock(&dentry->d_lock);
+                       cpu_relax();
+@@ -3500,7 +3503,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
+               if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
+                       dentry->d_flags |= DCACHE_GENOCIDE;
+-                      dentry->d_lockref.count--;
++                      __lockref_dec(&dentry->d_lockref);
+               }
+       }
+       return D_WALK_CONTINUE;
+@@ -3571,8 +3574,8 @@ static void __init dcache_init(void)
+        * but it is probably not worth it because of the cache nature
+        * of the dcache. 
+        */
+-      dentry_cache = KMEM_CACHE(dentry,
+-              SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
++      dentry_cache = KMEM_CACHE_USERCOPY(dentry,
++              SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT, d_iname);
+       /* Hash may have been set up in dcache_init_early */
+       if (!hashdist)
+@@ -3607,8 +3610,9 @@ void __init vfs_caches_init_early(void)
+ void __init vfs_caches_init(void)
+ {
+-      names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
+-                      SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++      names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
++                      SLAB_HWCACHE_ALIGN|SLAB_PANIC| SLAB_NO_SANITIZE,
++                      0, PATH_MAX, NULL);
+       dcache_init();
+       inode_init();
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index 309f4e9..6747d80 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -209,7 +209,7 @@ static int full_proxy_release(struct inode *inode, struct file *filp)
+       return 0;
+ }
+-static void __full_proxy_fops_init(struct file_operations *proxy_fops,
++static void __full_proxy_fops_init(file_operations_no_const *proxy_fops,
+                               const struct file_operations *real_fops)
+ {
+       proxy_fops->release = full_proxy_release;
+@@ -229,7 +229,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
+ {
+       const struct dentry *dentry = F_DENTRY(filp);
+       const struct file_operations *real_fops = NULL;
+-      struct file_operations *proxy_fops = NULL;
++      file_operations_no_const *proxy_fops = NULL;
+       int srcu_idx, r;
+       r = debugfs_use_file_start(dentry, &srcu_idx);
+@@ -734,6 +734,43 @@ struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
+ }
+ EXPORT_SYMBOL_GPL(debugfs_create_atomic_t);
++static int debugfs_atomic_unchecked_t_set(void *data, u64 val)
++{
++      atomic_set_unchecked((atomic_unchecked_t *)data, val);
++      return 0;
++}
++static int debugfs_atomic_unchecked_t_get(void *data, u64 *val)
++{
++      *val = atomic_read_unchecked((atomic_unchecked_t *)data);
++      return 0;
++}
++DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_unchecked_t, debugfs_atomic_unchecked_t_get,
++                      debugfs_atomic_unchecked_t_set, "%lld\n");
++DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_unchecked_t_ro, debugfs_atomic_unchecked_t_get, NULL,
++                      "%lld\n");
++DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_unchecked_t_wo, NULL, debugfs_atomic_unchecked_t_set,
++                      "%lld\n");
++
++/**
++ * debugfs_create_atomic_unchecked_t - create a debugfs file that is used to read and
++ * write an atomic_unchecked_t value
++ * @name: a pointer to a string containing the name of the file to create.
++ * @mode: the permission that the file should have
++ * @parent: a pointer to the parent dentry for this file.  This should be a
++ *          directory dentry if set.  If this parameter is %NULL, then the
++ *          file will be created in the root of the debugfs filesystem.
++ * @value: a pointer to the variable that the file should read to and write
++ *         from.
++ */
++struct dentry *debugfs_create_atomic_unchecked_t(const char *name, umode_t mode,
++                               struct dentry *parent, atomic_unchecked_t *value)
++{
++      return debugfs_create_mode_unsafe(name, mode, parent, value,
++                                      &fops_atomic_unchecked_t, &fops_atomic_unchecked_t_ro,
++                                      &fops_atomic_unchecked_t_wo);
++}
++EXPORT_SYMBOL_GPL(debugfs_create_atomic_unchecked_t);
++
+ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
+                              size_t count, loff_t *ppos)
+ {
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 72361ba..21d833e 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -253,6 +253,10 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
+       struct dentry *dentry;
+       int error;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      return ERR_PTR(-ENODEV);
++#endif
++
+       pr_debug("debugfs: creating file '%s'\n",name);
+       if (IS_ERR(parent))
+@@ -466,6 +470,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
+  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+  * returned.
+  */
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++extern int grsec_enable_sysfs_restrict;
++#endif
++
+ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
+ {
+       struct dentry *dentry = start_creating(name, parent);
+@@ -478,7 +486,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
+       if (unlikely(!inode))
+               return failed_creating(dentry);
+-      inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++      if (grsec_enable_sysfs_restrict)
++              inode->i_mode = S_IFDIR | S_IRWXU;
++      else
++#endif
++              inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+       inode->i_op = &simple_dir_inode_operations;
+       inode->i_fop = &simple_dir_operations;
+@@ -782,6 +795,10 @@ static int __init debugfs_init(void)
+ {
+       int retval;
++#ifdef CONFIG_GRKERNSEC_KMEM
++      return -ENOSYS;
++#endif
++
+       retval = sysfs_create_mount_point(kernel_kobj, "debug");
+       if (retval)
+               return retval;
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 9d153b6..60baa46 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -639,7 +639,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
+       old_fs = get_fs();
+       set_fs(get_ds());
+       rc = d_inode(lower_dentry)->i_op->readlink(lower_dentry,
+-                                                 (char __user *)lower_buf,
++                                                 (char __force_user *)lower_buf,
+                                                  PATH_MAX);
+       set_fs(old_fs);
+       if (rc < 0)
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index e4141f2..d8263e8 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -304,7 +304,7 @@ check_list:
+               goto out_unlock_msg_ctx;
+       i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
+       if (msg_ctx->msg) {
+-              if (copy_to_user(&buf[i], packet_length, packet_length_size))
++              if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
+                       goto out_unlock_msg_ctx;
+               i += packet_length_size;
+               if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
+diff --git a/fs/exec.c b/fs/exec.c
+index 6fcfb3f..840422d2 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -57,8 +57,20 @@
+ #include <linux/oom.h>
+ #include <linux/compat.h>
+ #include <linux/vmalloc.h>
++#include <linux/random.h>
++#include <linux/seq_file.h>
++#include <linux/coredump.h>
++#include <linux/mman.h>
++
++#ifdef CONFIG_PAX_REFCOUNT
++#include <linux/kallsyms.h>
++#include <linux/kdebug.h>
++#endif
++
++#include <trace/events/fs.h>
+ #include <asm/uaccess.h>
++#include <asm/sections.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
+@@ -67,19 +79,34 @@
+ #include <trace/events/sched.h>
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void __weak pax_set_initial_flags(struct linux_binprm *bprm)
++{
++      pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
++}
++#endif
++
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++EXPORT_SYMBOL(pax_set_initial_flags_func);
++#endif
++
+ int suid_dumpable = 0;
+ static LIST_HEAD(formats);
+ static DEFINE_RWLOCK(binfmt_lock);
++extern int gr_process_kernel_exec_ban(void);
++extern int gr_process_sugid_exec_ban(const struct linux_binprm *bprm);
++
+ void __register_binfmt(struct linux_binfmt * fmt, int insert)
+ {
+       BUG_ON(!fmt);
+       if (WARN_ON(!fmt->load_binary))
+               return;
+       write_lock(&binfmt_lock);
+-      insert ? list_add(&fmt->lh, &formats) :
+-               list_add_tail(&fmt->lh, &formats);
++      insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
++               pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
+       write_unlock(&binfmt_lock);
+ }
+@@ -88,7 +115,7 @@ EXPORT_SYMBOL(__register_binfmt);
+ void unregister_binfmt(struct linux_binfmt * fmt)
+ {
+       write_lock(&binfmt_lock);
+-      list_del(&fmt->lh);
++      pax_list_del((struct list_head *)&fmt->lh);
+       write_unlock(&binfmt_lock);
+ }
+@@ -190,22 +217,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+               int write)
+ {
+       struct page *page;
+-      int ret;
+-#ifdef CONFIG_STACK_GROWSUP
+-      if (write) {
+-              ret = expand_downwards(bprm->vma, pos);
+-              if (ret < 0)
+-                      return NULL;
+-      }
+-#endif
++      if (0 > expand_downwards(bprm->vma, pos))
++              return NULL;
+       /*
+        * We are doing an exec().  'current' is the process
+        * doing the exec and bprm->mm is the new process's mm.
+        */
+-      ret = get_user_pages_remote(current, bprm->mm, pos, 1, write,
+-                      1, &page, NULL);
+-      if (ret <= 0)
++      if (0 >= get_user_pages_remote(current, bprm->mm, pos, 1, write,
++                      1, &page, NULL))
+               return NULL;
+       if (write) {
+@@ -221,6 +241,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+               if (size <= ARG_MAX)
+                       return page;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              // only allow 512KB for argv+env on suid/sgid binaries
++              // to prevent easy ASLR exhaustion
++              if (((!uid_eq(bprm->cred->euid, current_euid())) ||
++                   (!gid_eq(bprm->cred->egid, current_egid()))) &&
++                  (size > (512 * 1024))) {
++                      put_page(page);
++                      return NULL;
++              }
++#endif
++
+               /*
+                * Limit to 1/4-th the stack size for the argv+env strings.
+                * This ensures that:
+@@ -279,6 +310,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+       vma->vm_end = STACK_TOP_MAX;
+       vma->vm_start = vma->vm_end - PAGE_SIZE;
+       vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
+@@ -290,6 +326,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+       arch_bprm_mm_init(mm, vma);
+       up_write(&mm->mmap_sem);
+       bprm->p = vma->vm_end - sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++      if (randomize_va_space)
++              bprm->p ^= prandom_u32() & ~PAGE_MASK;
++#endif
++
+       return 0;
+ err:
+       up_write(&mm->mmap_sem);
+@@ -407,7 +449,7 @@ struct user_arg_ptr {
+       } ptr;
+ };
+-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
++const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+ {
+       const char __user *native;
+@@ -416,14 +458,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+               compat_uptr_t compat;
+               if (get_user(compat, argv.ptr.compat + nr))
+-                      return ERR_PTR(-EFAULT);
++                      return (const char __force_user *)ERR_PTR(-EFAULT);
+               return compat_ptr(compat);
+       }
+ #endif
+       if (get_user(native, argv.ptr.native + nr))
+-              return ERR_PTR(-EFAULT);
++              return (const char __force_user *)ERR_PTR(-EFAULT);
+       return native;
+ }
+@@ -442,7 +484,7 @@ static int count(struct user_arg_ptr argv, int max)
+                       if (!p)
+                               break;
+-                      if (IS_ERR(p))
++                      if (IS_ERR((const char __force_kernel *)p))
+                               return -EFAULT;
+                       if (i >= max)
+@@ -477,7 +519,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
+               ret = -EFAULT;
+               str = get_user_arg_ptr(argv, argc);
+-              if (IS_ERR(str))
++              if (IS_ERR((const char __force_kernel *)str))
+                       goto out;
+               len = strnlen_user(str, MAX_ARG_STRLEN);
+@@ -559,7 +601,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
+       int r;
+       mm_segment_t oldfs = get_fs();
+       struct user_arg_ptr argv = {
+-              .ptr.native = (const char __user *const  __user *)__argv,
++              .ptr.native = (const char __user * const __force_user *)__argv,
+       };
+       set_fs(KERNEL_DS);
+@@ -594,7 +636,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+       unsigned long new_end = old_end - shift;
+       struct mmu_gather tlb;
+-      BUG_ON(new_start > new_end);
++      if (new_start >= new_end || new_start < mmap_min_addr)
++              return -ENOMEM;
+       /*
+        * ensure there are no vmas between where we want to go
+@@ -603,6 +646,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+       if (vma != find_vma(mm, new_start))
+               return -EFAULT;
++#ifdef CONFIG_PAX_SEGMEXEC
++      BUG_ON(pax_find_mirror_vma(vma));
++#endif
++
+       /*
+        * cover the whole range: [new_start, old_end)
+        */
+@@ -680,20 +727,16 @@ int setup_arg_pages(struct linux_binprm *bprm,
+       stack_base = PAGE_ALIGN(stack_top - stack_base);
+       stack_shift = vma->vm_start - stack_base;
+-      mm->arg_start = bprm->p - stack_shift;
++      mm->arg_end = mm->arg_start = bprm->p - stack_shift;
+       bprm->p = vma->vm_end - stack_shift;
+ #else
+       stack_top = arch_align_stack(stack_top);
+       stack_top = PAGE_ALIGN(stack_top);
+-      if (unlikely(stack_top < mmap_min_addr) ||
+-          unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+-              return -ENOMEM;
+-
+       stack_shift = vma->vm_end - stack_top;
+       bprm->p -= stack_shift;
+-      mm->arg_start = bprm->p;
++      mm->arg_end = mm->arg_start = bprm->p;
+ #endif
+       if (bprm->loader)
+@@ -703,8 +746,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
+       if (down_write_killable(&mm->mmap_sem))
+               return -EINTR;
++      /* Move stack pages down in memory. */
++      if (stack_shift) {
++              ret = shift_arg_pages(vma, stack_shift);
++              if (ret)
++                      goto out_unlock;
++      }
++
+       vm_flags = VM_STACK_FLAGS;
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (mm->pax_flags & MF_PAX_MPROTECT)
++                      vm_flags &= ~VM_MAYEXEC;
++#endif
++
++      }
++#endif
++
+       /*
+        * Adjust stack execute permissions; explicitly enable for
+        * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
+@@ -723,13 +785,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+               goto out_unlock;
+       BUG_ON(prev != vma);
+-      /* Move stack pages down in memory. */
+-      if (stack_shift) {
+-              ret = shift_arg_pages(vma, stack_shift);
+-              if (ret)
+-                      goto out_unlock;
+-      }
+-
+       /* mprotect_fixup is overkill to remove the temporary stack flags */
+       vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+@@ -753,6 +808,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ #endif
+       current->mm->start_stack = bprm->p;
+       ret = expand_stack(vma, stack_base);
++
++#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
++      if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
++              unsigned long size;
++              vm_flags_t vm_flags;
++
++              size = STACK_TOP - vma->vm_end;
++              vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
++
++              ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
++
++#ifdef CONFIG_X86
++              if (!ret) {
++                      size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
++                      ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
++              }
++#endif
++
++      }
++#endif
++
+       if (ret)
+               ret = -EFAULT;
+@@ -801,6 +877,7 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+ {
+       struct file *file;
+       int err;
++      int unsafe_flags = 0;
+       struct open_flags open_exec_flags = {
+               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
+               .acc_mode = MAY_EXEC,
+@@ -826,12 +903,22 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+       if (path_noexec(&file->f_path))
+               goto exit;
++      if (current->ptrace && !(current->ptrace & PT_PTRACE_CAP))
++              unsafe_flags = LSM_UNSAFE_PTRACE;
++
++      if (gr_ptrace_readexec(file, unsafe_flags)) {
++              err = -EPERM;
++              goto exit;
++      }
++
+       err = deny_write_access(file);
+       if (err)
+               goto exit;
+-      if (name->name[0] != '\0')
++      if (name->name[0] != '\0') {
+               fsnotify_open(file);
++              trace_open_exec(name->name);
++      }
+ out:
+       return file;
+@@ -861,10 +948,13 @@ int kernel_read(struct file *file, loff_t offset,
+       loff_t pos = offset;
+       int result;
++      if (count > INT_MAX)
++              return -EINVAL;
++
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      result = vfs_read(file, (void __user *)addr, count, &pos);
++      result = vfs_read(file, (void __force_user *)addr, count, &pos);
+       set_fs(old_fs);
+       return result;
+ }
+@@ -1424,7 +1514,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+       }
+       rcu_read_unlock();
+-      if (p->fs->users > n_fs)
++      if (atomic_read(&p->fs->users) > n_fs)
+               bprm->unsafe |= LSM_UNSAFE_SHARE;
+       else
+               p->fs->in_exec = 1;
+@@ -1627,6 +1717,31 @@ static int exec_binprm(struct linux_binprm *bprm)
+       return ret;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++static DEFINE_PER_CPU(u64, exec_counter);
++static int __init init_exec_counters(void)
++{
++      unsigned int cpu;
++
++      for_each_possible_cpu(cpu) {
++              per_cpu(exec_counter, cpu) = (u64)cpu;
++      }
++
++      return 0;
++}
++early_initcall(init_exec_counters);
++static inline void increment_exec_counter(void)
++{
++      BUILD_BUG_ON(NR_CPUS > (1 << 16));
++      current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
++}
++#else
++static inline void increment_exec_counter(void) {}
++#endif
++
++extern void gr_handle_exec_args(struct linux_binprm *bprm,
++                              struct user_arg_ptr argv);
++
+ /*
+  * sys_execve() executes a new program.
+  */
+@@ -1635,6 +1750,11 @@ static int do_execveat_common(int fd, struct filename *filename,
+                             struct user_arg_ptr envp,
+                             int flags)
+ {
++#ifdef CONFIG_GRKERNSEC
++      struct file *old_exec_file;
++      struct acl_subject_label *old_acl;
++      struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+       char *pathbuf = NULL;
+       struct linux_binprm *bprm;
+       struct file *file;
+@@ -1644,6 +1764,8 @@ static int do_execveat_common(int fd, struct filename *filename,
+       if (IS_ERR(filename))
+               return PTR_ERR(filename);
++      gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
++
+       /*
+        * We move the actual failure in case of RLIMIT_NPROC excess from
+        * set*uid() to execve() because too many poorly written programs
+@@ -1707,6 +1829,11 @@ static int do_execveat_common(int fd, struct filename *filename,
+       }
+       bprm->interp = bprm->filename;
++      if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
++              retval = -EACCES;
++              goto out_unmark;
++      }
++
+       retval = bprm_mm_init(bprm);
+       if (retval)
+               goto out_unmark;
+@@ -1723,24 +1850,70 @@ static int do_execveat_common(int fd, struct filename *filename,
+       if (retval < 0)
+               goto out;
++#ifdef CONFIG_GRKERNSEC
++      old_acl = current->acl;
++      memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
++      old_exec_file = current->exec_file;
++      get_file(file);
++      current->exec_file = file;
++#endif
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      /* limit suid stack to 8MB
++       * we saved the old limits above and will restore them if this exec fails
++       */
++      if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
++          (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
++              current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
++#endif
++
++      if (gr_process_kernel_exec_ban() || gr_process_sugid_exec_ban(bprm)) {
++              retval = -EPERM;
++              goto out_fail;
++      }
++
++      if (!gr_tpe_allow(file)) {
++              retval = -EACCES;
++              goto out_fail;
++      }
++
++      if (gr_check_crash_exec(file)) {
++              retval = -EACCES;
++              goto out_fail;
++      }
++
++      retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
++                                      bprm->unsafe);
++      if (retval < 0)
++              goto out_fail;
++
+       retval = copy_strings_kernel(1, &bprm->filename, bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
+       bprm->exec = bprm->p;
+       retval = copy_strings(bprm->envc, envp, bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
+       retval = copy_strings(bprm->argc, argv, bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
++
++      gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
++
++      gr_handle_exec_args(bprm, argv);
+       retval = exec_binprm(bprm);
+       if (retval < 0)
+-              goto out;
++              goto out_fail;
++#ifdef CONFIG_GRKERNSEC
++      if (old_exec_file)
++              fput(old_exec_file);
++#endif
+       /* execve succeeded */
++
++      increment_exec_counter();
+       current->fs->in_exec = 0;
+       current->in_execve = 0;
+       acct_update_integrals(current);
+@@ -1752,6 +1925,14 @@ static int do_execveat_common(int fd, struct filename *filename,
+               put_files_struct(displaced);
+       return retval;
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++      current->acl = old_acl;
++      memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
++      fput(current->exec_file);
++      current->exec_file = old_exec_file;
++#endif
++
+ out:
+       if (bprm->mm) {
+               acct_arg_size(bprm, 0);
+@@ -1898,3 +2079,194 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
+                                 argv, envp, flags);
+ }
+ #endif
++
++int pax_check_flags(unsigned long *flags)
++{
++      int retval = 0;
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
++      if (*flags & MF_PAX_SEGMEXEC)
++      {
++              *flags &= ~MF_PAX_SEGMEXEC;
++              retval = -EINVAL;
++      }
++#endif
++
++      if ((*flags & MF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++          &&  (*flags & MF_PAX_SEGMEXEC)
++#endif
++
++         )
++      {
++              *flags &= ~MF_PAX_PAGEEXEC;
++              retval = -EINVAL;
++      }
++
++      if ((*flags & MF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++          && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++         )
++      {
++              *flags &= ~MF_PAX_MPROTECT;
++              retval = -EINVAL;
++      }
++
++      if ((*flags & MF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++          && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++         )
++      {
++              *flags &= ~MF_PAX_EMUTRAMP;
++              retval = -EINVAL;
++      }
++
++      return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++char *pax_get_path(const struct path *path, char *buf, int buflen)
++{
++      char *pathname = d_path(path, buf, buflen);
++
++      if (IS_ERR(pathname))
++              goto toolong;
++
++      pathname = mangle_path(buf, pathname, "\t\n\\");
++      if (!pathname)
++              goto toolong;
++
++      *pathname = 0;
++      return buf;
++
++toolong:
++      return "<path too long>";
++}
++EXPORT_SYMBOL(pax_get_path);
++
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++      struct task_struct *tsk = current;
++      struct mm_struct *mm = current->mm;
++      char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
++      char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
++      char *path_exec = NULL;
++      char *path_fault = NULL;
++      unsigned long start = 0UL, end = 0UL, offset = 0UL;
++      siginfo_t info = { };
++
++      if (buffer_exec && buffer_fault) {
++              struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
++
++              down_read(&mm->mmap_sem);
++              vma = mm->mmap;
++              while (vma && (!vma_exec || !vma_fault)) {
++                      if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
++                              vma_exec = vma;
++                      if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++                              vma_fault = vma;
++                      vma = vma->vm_next;
++              }
++              if (vma_exec)
++                      path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
++              if (vma_fault) {
++                      start = vma_fault->vm_start;
++                      end = vma_fault->vm_end;
++                      offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++                      if (vma_fault->vm_file)
++                              path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
++                      else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
++                              path_fault = "<heap>";
++                      else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++                              path_fault = "<stack>";
++                      else
++                              path_fault = "<anonymous mapping>";
++              }
++              up_read(&mm->mmap_sem);
++      }
++      if (tsk->signal->curr_ip)
++              printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
++      else
++              printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++      printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
++                      from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
++      free_page((unsigned long)buffer_exec);
++      free_page((unsigned long)buffer_fault);
++      pax_report_insns(regs, pc, sp);
++      info.si_signo = SIGKILL;
++      info.si_errno = 0;
++      info.si_code = SI_KERNEL;
++      info.si_pid = 0;
++      info.si_uid = 0;
++      do_coredump(&info);
++}
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++static DEFINE_RATELIMIT_STATE(refcount_ratelimit, 15 * HZ, 3);
++
++void pax_report_refcount_error(struct pt_regs *regs, const char *kind)
++{
++      do_send_sig_info(SIGKILL, SEND_SIG_FORCED, current, true);
++
++      if (!__ratelimit(&refcount_ratelimit))
++              return;
++
++      if (current->signal->curr_ip)
++              pr_emerg("PAX: From %pI4: %s detected in: %s:%d, uid/euid: %u/%u\n",
++                       &current->signal->curr_ip,
++                       kind ? kind : "refcount error",
++                       current->comm, task_pid_nr(current),
++                       from_kuid_munged(&init_user_ns, current_uid()),
++                       from_kuid_munged(&init_user_ns, current_euid()));
++      else
++              pr_emerg("PAX: %s detected in: %s:%d, uid/euid: %u/%u\n",
++                       kind ? kind : "refcount error",
++                       current->comm, task_pid_nr(current),
++                       from_kuid_munged(&init_user_ns, current_uid()),
++                       from_kuid_munged(&init_user_ns, current_euid()));
++      print_symbol(KERN_EMERG "PAX: refcount error occured at: %s\n", instruction_pointer(regs));
++      preempt_disable();
++      show_regs(regs);
++      preempt_enable();
++}
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void __used pax_track_stack(void)
++{
++      unsigned long sp = (unsigned long)&sp;
++      if (sp < current_thread_info()->lowest_stack &&
++          sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
++              current_thread_info()->lowest_stack = sp;
++      if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
++              BUG();
++}
++EXPORT_SYMBOL(pax_track_stack);
++#endif
++
++#ifdef CONFIG_PAX_SIZE_OVERFLOW
++static DEFINE_RATELIMIT_STATE(size_overflow_ratelimit, 15 * HZ, 3);
++extern bool pax_size_overflow_report_only;
++
++void __nocapture(1, 3, 4) __used report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++{
++      if (!pax_size_overflow_report_only || __ratelimit(&size_overflow_ratelimit)) {
++              pr_emerg("PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
++              dump_stack();
++      }
++
++      if (!pax_size_overflow_report_only)
++              do_group_exit(SIGKILL);
++}
++EXPORT_SYMBOL(report_size_overflow);
++#endif
+diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
+index 9dc4c6d..ed7c0e7 100644
+--- a/fs/exofs/inode.c
++++ b/fs/exofs/inode.c
+@@ -470,6 +470,11 @@ fail:
+       return ret;
+ }
++static int readpage_filler(struct file *data, struct page *page)
++{
++      return readpage_strip(data, page);
++}
++
+ static int exofs_readpages(struct file *file, struct address_space *mapping,
+                          struct list_head *pages, unsigned nr_pages)
+ {
+@@ -478,7 +483,7 @@ static int exofs_readpages(struct file *file, struct address_space *mapping,
+       _pcol_init(&pcol, nr_pages, mapping->host);
+-      ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
++      ret = read_cache_pages(mapping, pages, readpage_filler, &pcol);
+       if (ret) {
+               EXOFS_ERR("read_cache_pages => %d\n", ret);
+               return ret;
+diff --git a/fs/exofs/super.c b/fs/exofs/super.c
+index 1076a42..54faf08 100644
+--- a/fs/exofs/super.c
++++ b/fs/exofs/super.c
+@@ -192,10 +192,11 @@ static void exofs_init_once(void *foo)
+  */
+ static int init_inodecache(void)
+ {
+-      exofs_inode_cachep = kmem_cache_create("exofs_inode_cache",
++      exofs_inode_cachep = kmem_cache_create_usercopy("exofs_inode_cache",
+                               sizeof(struct exofs_i_info), 0,
+                               SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
+-                              SLAB_ACCOUNT, exofs_init_once);
++                              SLAB_ACCOUNT, offsetof(struct exofs_i_info, i_data),
++                              sizeof(((struct exofs_i_info *)0)->i_data), exofs_init_once);
+       if (exofs_inode_cachep == NULL)
+               return -ENOMEM;
+       return 0;
+diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
+index 4c40c07..7345640 100644
+--- a/fs/ext2/balloc.c
++++ b/fs/ext2/balloc.c
+@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
+       free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+       root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+-      if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++      if (free_blocks < root_blocks + 1 &&
+               !uid_eq(sbi->s_resuid, current_fsuid()) &&
+               (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
+-               !in_group_p (sbi->s_resgid))) {
++               !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
+               return 0;
+       }
+       return 1;
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 1d93795..dbb5a08 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -203,10 +203,12 @@ static void init_once(void *foo)
+ static int __init init_inodecache(void)
+ {
+-      ext2_inode_cachep = kmem_cache_create("ext2_inode_cache",
++      ext2_inode_cachep = kmem_cache_create_usercopy("ext2_inode_cache",
+                                            sizeof(struct ext2_inode_info),
+                                            0, (SLAB_RECLAIM_ACCOUNT|
+                                               SLAB_MEM_SPREAD|SLAB_ACCOUNT),
++                                           offsetof(struct ext2_inode_info, i_data),
++                                           sizeof(((struct ext2_inode_info *)0)->i_data),
+                                            init_once);
+       if (ext2_inode_cachep == NULL)
+               return -ENOMEM;
+@@ -273,10 +275,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
+ #ifdef CONFIG_EXT2_FS_XATTR
+       if (test_opt(sb, XATTR_USER))
+               seq_puts(seq, ",user_xattr");
+-      if (!test_opt(sb, XATTR_USER) &&
+-          (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
++      if (!test_opt(sb, XATTR_USER))
+               seq_puts(seq, ",nouser_xattr");
+-      }
+ #endif
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+@@ -864,8 +864,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+       if (def_mount_opts & EXT2_DEFM_UID16)
+               set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT2_FS_XATTR
+-      if (def_mount_opts & EXT2_DEFM_XATTR_USER)
+-              set_opt(sbi->s_mount_opt, XATTR_USER);
++      /* always enable user xattrs */
++      set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+       if (def_mount_opts & EXT2_DEFM_ACL)
+diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
+index b7f896f..61d52fe 100644
+--- a/fs/ext2/xattr.c
++++ b/fs/ext2/xattr.c
+@@ -244,7 +244,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+       struct buffer_head *bh = NULL;
+       struct ext2_xattr_entry *entry;
+       char *end;
+-      size_t rest = buffer_size;
++      size_t rest = buffer_size, total_size = 0;
+       int error;
+       struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
+@@ -307,9 +307,10 @@ bad_block:        ext2_error(inode->i_sb, "ext2_xattr_list",
+                               *buffer++ = 0;
+                       }
+                       rest -= size;
++                      total_size += size;
+               }
+       }
+-      error = buffer_size - rest;  /* total size */
++      error = total_size;
+ cleanup:
+       brelse(bh);
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index e04ec86..953c3e6 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -566,8 +566,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+       /* Hm, nope.  Are (enough) root reserved clusters available? */
+       if (uid_eq(sbi->s_resuid, current_fsuid()) ||
+           (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
+-          capable(CAP_SYS_RESOURCE) ||
+-          (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
++          (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
++          capable_nolog(CAP_SYS_RESOURCE)) {
+               if (free_clusters >= (nclusters + dirty_clusters +
+                                     resv_clusters))
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index ea31931..2e49089 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1439,19 +1439,19 @@ struct ext4_sb_info {
+       unsigned long s_mb_last_start;
+       /* stats for buddy allocator */
+-      atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
+-      atomic_t s_bal_success; /* we found long enough chunks */
+-      atomic_t s_bal_allocated;       /* in blocks */
+-      atomic_t s_bal_ex_scanned;      /* total extents scanned */
+-      atomic_t s_bal_goals;   /* goal hits */
+-      atomic_t s_bal_breaks;  /* too long searches */
+-      atomic_t s_bal_2orders; /* 2^order hits */
++      atomic_unchecked_t s_bal_reqs;  /* number of reqs with len > 1 */
++      atomic_unchecked_t s_bal_success;       /* we found long enough chunks */
++      atomic_unchecked_t s_bal_allocated;     /* in blocks */
++      atomic_unchecked_t s_bal_ex_scanned;    /* total extents scanned */
++      atomic_unchecked_t s_bal_goals; /* goal hits */
++      atomic_unchecked_t s_bal_breaks;        /* too long searches */
++      atomic_unchecked_t s_bal_2orders;       /* 2^order hits */
+       spinlock_t s_bal_lock;
+       unsigned long s_mb_buddies_generated;
+       unsigned long long s_mb_generation_time;
+-      atomic_t s_mb_lost_chunks;
+-      atomic_t s_mb_preallocated;
+-      atomic_t s_mb_discarded;
++      atomic_unchecked_t s_mb_lost_chunks;
++      atomic_unchecked_t s_mb_preallocated;
++      atomic_unchecked_t s_mb_discarded;
+       atomic_t s_lock_busy;
+       /* locality groups */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 7f69347..7fb5e14 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -876,7 +876,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+       struct ext4_extent_header *eh;
+       struct buffer_head *bh;
+       struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
+-      short int depth, i, ppos = 0;
++      int depth, i, ppos = 0;
+       int ret;
+       eh = ext_inode_hdr(inode);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index f418f55..1c38f23 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1921,7 +1921,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
+               BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
+               if (EXT4_SB(sb)->s_mb_stats)
+-                      atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
++                      atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
+               break;
+       }
+@@ -2244,7 +2244,7 @@ repeat:
+                       ac->ac_status = AC_STATUS_CONTINUE;
+                       ac->ac_flags |= EXT4_MB_HINT_FIRST;
+                       cr = 3;
+-                      atomic_inc(&sbi->s_mb_lost_chunks);
++                      atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
+                       goto repeat;
+               }
+       }
+@@ -2743,25 +2743,25 @@ int ext4_mb_release(struct super_block *sb)
+       if (sbi->s_mb_stats) {
+               ext4_msg(sb, KERN_INFO,
+                      "mballoc: %u blocks %u reqs (%u success)",
+-                              atomic_read(&sbi->s_bal_allocated),
+-                              atomic_read(&sbi->s_bal_reqs),
+-                              atomic_read(&sbi->s_bal_success));
++                              atomic_read_unchecked(&sbi->s_bal_allocated),
++                              atomic_read_unchecked(&sbi->s_bal_reqs),
++                              atomic_read_unchecked(&sbi->s_bal_success));
+               ext4_msg(sb, KERN_INFO,
+                     "mballoc: %u extents scanned, %u goal hits, "
+                               "%u 2^N hits, %u breaks, %u lost",
+-                              atomic_read(&sbi->s_bal_ex_scanned),
+-                              atomic_read(&sbi->s_bal_goals),
+-                              atomic_read(&sbi->s_bal_2orders),
+-                              atomic_read(&sbi->s_bal_breaks),
+-                              atomic_read(&sbi->s_mb_lost_chunks));
++                              atomic_read_unchecked(&sbi->s_bal_ex_scanned),
++                              atomic_read_unchecked(&sbi->s_bal_goals),
++                              atomic_read_unchecked(&sbi->s_bal_2orders),
++                              atomic_read_unchecked(&sbi->s_bal_breaks),
++                              atomic_read_unchecked(&sbi->s_mb_lost_chunks));
+               ext4_msg(sb, KERN_INFO,
+                      "mballoc: %lu generated and it took %Lu",
+                               sbi->s_mb_buddies_generated,
+                               sbi->s_mb_generation_time);
+               ext4_msg(sb, KERN_INFO,
+                      "mballoc: %u preallocated, %u discarded",
+-                              atomic_read(&sbi->s_mb_preallocated),
+-                              atomic_read(&sbi->s_mb_discarded));
++                              atomic_read_unchecked(&sbi->s_mb_preallocated),
++                              atomic_read_unchecked(&sbi->s_mb_discarded));
+       }
+       free_percpu(sbi->s_locality_groups);
+@@ -3222,16 +3222,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
+       struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+       if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
+-              atomic_inc(&sbi->s_bal_reqs);
+-              atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
++              atomic_inc_unchecked(&sbi->s_bal_reqs);
++              atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
+               if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
+-                      atomic_inc(&sbi->s_bal_success);
+-              atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
++                      atomic_inc_unchecked(&sbi->s_bal_success);
++              atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
+               if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
+                               ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
+-                      atomic_inc(&sbi->s_bal_goals);
++                      atomic_inc_unchecked(&sbi->s_bal_goals);
+               if (ac->ac_found > sbi->s_mb_max_to_scan)
+-                      atomic_inc(&sbi->s_bal_breaks);
++                      atomic_inc_unchecked(&sbi->s_bal_breaks);
+       }
+       if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
+@@ -3658,7 +3658,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+       trace_ext4_mb_new_inode_pa(ac, pa);
+       ext4_mb_use_inode_pa(ac, pa);
+-      atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
++      atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
+       ei = EXT4_I(ac->ac_inode);
+       grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+@@ -3718,7 +3718,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
+       trace_ext4_mb_new_group_pa(ac, pa);
+       ext4_mb_use_group_pa(ac, pa);
+-      atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
++      atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+       grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+       lg = ac->ac_lg;
+@@ -3807,7 +3807,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
+                * from the bitmap and continue.
+                */
+       }
+-      atomic_add(free, &sbi->s_mb_discarded);
++      atomic_add_unchecked(free, &sbi->s_mb_discarded);
+       return err;
+ }
+@@ -3825,7 +3825,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+       ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+       BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+       mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
+-      atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
++      atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+       trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
+       return 0;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index cf68100..f96c5c0 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+       ext4_debug("mark blocks [%llu/%u] used\n", block, count);
+       for (count2 = count; count > 0; count -= count2, block += count2) {
+-              ext4_fsblk_t start;
++              ext4_fsblk_t start, diff;
+               struct buffer_head *bh;
+               ext4_group_t group;
+               int err;
+@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+               start = ext4_group_first_block_no(sb, group);
+               group -= flex_gd->groups[0].group;
+-              count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
+-              if (count2 > count)
+-                      count2 = count;
+-
+               if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
+                       BUG_ON(flex_gd->count > 1);
+                       continue;
+@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+               err = ext4_journal_get_write_access(handle, bh);
+               if (err)
+                       return err;
++
++              diff = block - start;
++              count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
++              if (count2 > count)
++                      count2 = count;
++
+               ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
+-                         block - start, count2);
+-              ext4_set_bits(bh->b_data, block - start, count2);
++                         diff, count2);
++              ext4_set_bits(bh->b_data, diff, count2);
+               err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               if (unlikely(err))
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3ec8708..f39299c 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -989,10 +989,12 @@ static void init_once(void *foo)
+ static int __init init_inodecache(void)
+ {
+-      ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
++      ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
+                                            sizeof(struct ext4_inode_info),
+                                            0, (SLAB_RECLAIM_ACCOUNT|
+                                               SLAB_MEM_SPREAD|SLAB_ACCOUNT),
++                                           offsetof(struct ext4_inode_info, i_data),
++                                           sizeof(((struct ext4_inode_info *)0)->i_data),
+                                            init_once);
+       if (ext4_inode_cachep == NULL)
+               return -ENOMEM;
+@@ -1387,7 +1389,7 @@ static ext4_fsblk_t get_sb_block(void **data)
+ }
+ #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
++static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
+       "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
+ #ifdef CONFIG_QUOTA
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 42145be..1f1db90 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -45,7 +45,7 @@ struct ext4_attr {
+               int offset;
+               void *explicit_ptr;
+       } u;
+-};
++} __do_const;
+ static ssize_t session_write_kbytes_show(struct ext4_attr *a,
+                                        struct ext4_sb_info *sbi, char *buf)
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 2eb935c..2fda99e 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -418,7 +418,7 @@ static int
+ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
+                       char *buffer, size_t buffer_size)
+ {
+-      size_t rest = buffer_size;
++      size_t rest = buffer_size, total_size = 0;
+       for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+               const struct xattr_handler *handler =
+@@ -439,9 +439,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
+                               *buffer++ = 0;
+                       }
+                       rest -= size;
++                      total_size += size;
+               }
+       }
+-      return buffer_size - rest;  /* total size */
++      return total_size;
+ }
+ static int
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 14f5fe2..ec3b8ad 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -50,7 +50,7 @@ enum {
+ };
+ struct f2fs_fault_info {
+-      atomic_t inject_ops;
++      atomic_unchecked_t inject_ops;
+       unsigned int inject_rate;
+       unsigned int inject_type;
+ };
+@@ -78,9 +78,8 @@ static inline bool time_to_inject(int type)
+       else if (type == FAULT_EVICT_INODE && !IS_FAULT_SET(type))
+               return false;
+-      atomic_inc(&f2fs_fault.inject_ops);
+-      if (atomic_read(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) {
+-              atomic_set(&f2fs_fault.inject_ops, 0);
++      if (atomic_inc_return_unchecked(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) {
++              atomic_set_unchecked(&f2fs_fault.inject_ops, 0);
+               printk("%sF2FS-fs : inject %s in %pF\n",
+                               KERN_INFO,
+                               fault_name[type],
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 7f863a6..74c873f 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -55,7 +55,7 @@ char *fault_name[FAULT_MAX] = {
+ static void f2fs_build_fault_attr(unsigned int rate)
+ {
+       if (rate) {
+-              atomic_set(&f2fs_fault.inject_ops, 0);
++              atomic_set_unchecked(&f2fs_fault.inject_ops, 0);
+               f2fs_fault.inject_rate = rate;
+               f2fs_fault.inject_type = (1 << FAULT_MAX) - 1;
+       } else {
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 350a2c8..9fb9bf7 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -103,6 +103,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
+               int force)
+ {
+       security_file_set_fowner(filp);
++      if (gr_handle_chroot_fowner(pid, type))
++              return;
++      if (gr_check_protected_task_fowner(pid, type))
++              return;
+       f_modown(filp, pid, type, force);
+ }
+ EXPORT_SYMBOL(__f_setown);
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index ca3c3dd..0c5456e 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -8,6 +8,7 @@
+ #include <linux/fs_struct.h>
+ #include <linux/fsnotify.h>
+ #include <linux/personality.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include "internal.h"
+ #include "mount.h"
+@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
+       } else
+               retval = 0;
+       /* copy the mount id */
+-      if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
+-                       sizeof(*mnt_id)) ||
++      if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
+           copy_to_user(ufh, handle,
+                        sizeof(struct file_handle) + handle_bytes))
+               retval = -EFAULT;
+@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
+        * the directory. Ideally we would like CAP_DAC_SEARCH.
+        * But we don't have that
+        */
+-      if (!capable(CAP_DAC_READ_SEARCH)) {
++      if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
+               retval = -EPERM;
+               goto out_err;
+       }
+@@ -197,7 +197,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
+       /* copy the full handle */
+       *handle = f_handle;
+       if (copy_from_user(&handle->f_handle,
+-                         &ufh->f_handle,
++                         ufh->f_handle,
+                          f_handle.handle_bytes)) {
+               retval = -EFAULT;
+               goto out_handle;
+diff --git a/fs/file.c b/fs/file.c
+index 6b1acdf..b908eba 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/fdtable.h>
+ #include <linux/bitops.h>
+ #include <linux/interrupt.h>
+@@ -163,9 +164,10 @@ out:
+  * Return <0 error code on error; 1 on successful completion.
+  * The files->file_lock should be held on entry, and will be held on exit.
+  */
+-static int expand_fdtable(struct files_struct *files, int nr)
+-      __releases(files->file_lock)
+-      __acquires(files->file_lock)
++static int expand_fdtable(struct files_struct *files, unsigned int nr)
++      __releases(&files->file_lock)
++      __acquires(&files->file_lock);
++static int expand_fdtable(struct files_struct *files, unsigned int nr)
+ {
+       struct fdtable *new_fdt, *cur_fdt;
+@@ -208,9 +210,10 @@ static int expand_fdtable(struct files_struct *files, int nr)
+  * expanded and execution may have blocked.
+  * The files->file_lock should be held on entry, and will be held on exit.
+  */
+-static int expand_files(struct files_struct *files, int nr)
+-      __releases(files->file_lock)
+-      __acquires(files->file_lock)
++static int expand_files(struct files_struct *files, unsigned int nr)
++      __releases(&files->file_lock)
++      __acquires(&files->file_lock);
++static int expand_files(struct files_struct *files, unsigned int nr)
+ {
+       struct fdtable *fdt;
+       int expanded = 0;
+@@ -822,7 +825,9 @@ bool get_close_on_exec(unsigned int fd)
+ static int do_dup2(struct files_struct *files,
+       struct file *file, unsigned fd, unsigned flags)
+-__releases(&files->file_lock)
++__releases(&files->file_lock);
++static int do_dup2(struct files_struct *files,
++      struct file *file, unsigned fd, unsigned flags)
+ {
+       struct file *tofree;
+       struct fdtable *fdt;
+@@ -872,6 +877,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
+       if (!file)
+               return __close_fd(files, fd);
++      gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
+       if (fd >= rlimit(RLIMIT_NOFILE))
+               return -EBADF;
+@@ -898,6 +904,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
+       if (unlikely(oldfd == newfd))
+               return -EINVAL;
++      gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
+       if (newfd >= rlimit(RLIMIT_NOFILE))
+               return -EBADF;
+@@ -953,6 +960,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
+ int f_dupfd(unsigned int from, struct file *file, unsigned flags)
+ {
+       int err;
++      gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
+       if (from >= rlimit(RLIMIT_NOFILE))
+               return -EINVAL;
+       err = alloc_fd(from, flags);
+diff --git a/fs/filesystems.c b/fs/filesystems.c
+index c5618db..50c38f4 100644
+--- a/fs/filesystems.c
++++ b/fs/filesystems.c
+@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
+       int len = dot ? dot - name : strlen(name);
+       fs = __get_fs_type(name, len);
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
++#else
+       if (!fs && (request_module("fs-%.*s", len, name) == 0))
++#endif
+               fs = __get_fs_type(name, len);
+       if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
+diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
+index 455ce5b..ec65e7e 100644
+--- a/fs/freevxfs/vxfs_super.c
++++ b/fs/freevxfs/vxfs_super.c
+@@ -332,9 +332,11 @@ vxfs_init(void)
+ {
+       int rv;
+-      vxfs_inode_cachep = kmem_cache_create("vxfs_inode",
++      vxfs_inode_cachep = kmem_cache_create_usercopy("vxfs_inode",
+                       sizeof(struct vxfs_inode_info), 0,
+-                      SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
++                      SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
++                      offsetof(struct vxfs_inode_info, vii_immed.vi_immed),
++                      sizeof(((struct vxfs_inode_info *)0)->vii_immed.vi_immed),  NULL);
+       if (!vxfs_inode_cachep)
+               return -ENOMEM;
+       rv = register_filesystem(&vxfs_fs_type);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 05713a5..6cfd433 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -880,9 +880,9 @@ fs_initcall(cgroup_writeback_init);
+ #else /* CONFIG_CGROUP_WRITEBACK */
+ static struct bdi_writeback *
++locked_inode_to_wb_and_lock_list(struct inode *inode) __releases(&inode->i_lock) __acquires(&wb->list_lock);
++static struct bdi_writeback *
+ locked_inode_to_wb_and_lock_list(struct inode *inode)
+-      __releases(&inode->i_lock)
+-      __acquires(&wb->list_lock)
+ {
+       struct bdi_writeback *wb = inode_to_wb(inode);
+@@ -891,8 +891,8 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
+       return wb;
+ }
++static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) __acquires(&wb->list_lock);
+ static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
+-      __acquires(&wb->list_lock)
+ {
+       struct bdi_writeback *wb = inode_to_wb(inode);
+@@ -1173,9 +1173,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
+  * Wait for writeback on an inode to complete. Called with i_lock held.
+  * Caller must make sure inode cannot go away when we drop i_lock.
+  */
++static void __inode_wait_for_writeback(struct inode *inode) __must_hold(&inode->i_lock);
+ static void __inode_wait_for_writeback(struct inode *inode)
+-      __releases(inode->i_lock)
+-      __acquires(inode->i_lock)
+ {
+       DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
+       wait_queue_head_t *wqh;
+@@ -1204,8 +1203,8 @@ void inode_wait_for_writeback(struct inode *inode)
+  * held and drops it. It is aimed for callers not holding any inode reference
+  * so once i_lock is dropped, inode can go away.
+  */
++static void inode_sleep_on_writeback(struct inode *inode) __releases(&inode->i_lock);
+ static void inode_sleep_on_writeback(struct inode *inode)
+-      __releases(inode->i_lock)
+ {
+       DEFINE_WAIT(wait);
+       wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+diff --git a/fs/fs_struct.c b/fs/fs_struct.c
+index 7dca743..1ff87ae 100644
+--- a/fs/fs_struct.c
++++ b/fs/fs_struct.c
+@@ -4,6 +4,7 @@
+ #include <linux/path.h>
+ #include <linux/slab.h>
+ #include <linux/fs_struct.h>
++#include <linux/grsecurity.h>
+ #include "internal.h"
+ /*
+@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
+       struct path old_root;
+       path_get(path);
++      gr_inc_chroot_refcnts(path->dentry, path->mnt);
+       spin_lock(&fs->lock);
+       write_seqcount_begin(&fs->seq);
+       old_root = fs->root;
+       fs->root = *path;
++      gr_set_chroot_entries(current, path);
+       write_seqcount_end(&fs->seq);
+       spin_unlock(&fs->lock);
+-      if (old_root.dentry)
++      if (old_root.dentry) {
++              gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
+               path_put(&old_root);
++      }
+ }
+ /*
+@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
+                       int hits = 0;
+                       spin_lock(&fs->lock);
+                       write_seqcount_begin(&fs->seq);
++                      /* this root replacement is only done by pivot_root,
++                         leave grsec's chroot tagging alone for this task
++                         so that a pivoted root isn't treated as a chroot
++                      */
+                       hits += replace_path(&fs->root, old_root, new_root);
+                       hits += replace_path(&fs->pwd, old_root, new_root);
+                       write_seqcount_end(&fs->seq);
+@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
+ void free_fs_struct(struct fs_struct *fs)
+ {
++      gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
+       path_put(&fs->root);
+       path_put(&fs->pwd);
+       kmem_cache_free(fs_cachep, fs);
+@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
+               task_lock(tsk);
+               spin_lock(&fs->lock);
+               tsk->fs = NULL;
+-              kill = !--fs->users;
++              gr_clear_chroot_entries(tsk);
++              kill = !atomic_dec_return(&fs->users);
+               spin_unlock(&fs->lock);
+               task_unlock(tsk);
+               if (kill)
+@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
+       struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
+       /* We don't need to lock fs - think why ;-) */
+       if (fs) {
+-              fs->users = 1;
++              atomic_set(&fs->users, 1);
+               fs->in_exec = 0;
+               spin_lock_init(&fs->lock);
+               seqcount_init(&fs->seq);
+@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
+               spin_lock(&old->lock);
+               fs->root = old->root;
+               path_get(&fs->root);
++              /* instead of calling gr_set_chroot_entries here,
++                 we call it from every caller of this function
++              */
+               fs->pwd = old->pwd;
+               path_get(&fs->pwd);
+               spin_unlock(&old->lock);
++              gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
+       }
+       return fs;
+ }
+@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
+       task_lock(current);
+       spin_lock(&fs->lock);
+-      kill = !--fs->users;
++      kill = !atomic_dec_return(&fs->users);
+       current->fs = new_fs;
++      gr_set_chroot_entries(current, &new_fs->root);
+       spin_unlock(&fs->lock);
+       task_unlock(current);
+@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
+ int current_umask(void)
+ {
+-      return current->fs->umask;
++      return current->fs->umask | gr_acl_umask();
+ }
+ EXPORT_SYMBOL(current_umask);
+ /* to be mentioned only in INIT_TASK */
+ struct fs_struct init_fs = {
+-      .users          = 1,
++      .users          = ATOMIC_INIT(1),
+       .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
+       .seq            = SEQCNT_ZERO(init_fs.seq),
+       .umask          = 0022,
+diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
+index 43040721..2780191 100644
+--- a/fs/fscache/cookie.c
++++ b/fs/fscache/cookie.c
+@@ -19,7 +19,7 @@
+ struct kmem_cache *fscache_cookie_jar;
+-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
++static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
+ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
+ static int fscache_alloc_object(struct fscache_cache *cache,
+@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
+              parent ? (char *) parent->def->name : "<no-parent>",
+              def->name, netfs_data, enable);
+-      fscache_stat(&fscache_n_acquires);
++      fscache_stat_unchecked(&fscache_n_acquires);
+       /* if there's no parent cookie, then we don't create one here either */
+       if (!parent) {
+-              fscache_stat(&fscache_n_acquires_null);
++              fscache_stat_unchecked(&fscache_n_acquires_null);
+               _leave(" [no parent]");
+               return NULL;
+       }
+@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
+       /* allocate and initialise a cookie */
+       cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+       if (!cookie) {
+-              fscache_stat(&fscache_n_acquires_oom);
++              fscache_stat_unchecked(&fscache_n_acquires_oom);
+               _leave(" [ENOMEM]");
+               return NULL;
+       }
+@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
+       switch (cookie->def->type) {
+       case FSCACHE_COOKIE_TYPE_INDEX:
+-              fscache_stat(&fscache_n_cookie_index);
++              fscache_stat_unchecked(&fscache_n_cookie_index);
+               break;
+       case FSCACHE_COOKIE_TYPE_DATAFILE:
+-              fscache_stat(&fscache_n_cookie_data);
++              fscache_stat_unchecked(&fscache_n_cookie_data);
+               break;
+       default:
+-              fscache_stat(&fscache_n_cookie_special);
++              fscache_stat_unchecked(&fscache_n_cookie_special);
+               break;
+       }
+@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
+                       } else {
+                               atomic_dec(&parent->n_children);
+                               __fscache_cookie_put(cookie);
+-                              fscache_stat(&fscache_n_acquires_nobufs);
++                              fscache_stat_unchecked(&fscache_n_acquires_nobufs);
+                               _leave(" = NULL");
+                               return NULL;
+                       }
+@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
+               }
+       }
+-      fscache_stat(&fscache_n_acquires_ok);
++      fscache_stat_unchecked(&fscache_n_acquires_ok);
+       _leave(" = %p", cookie);
+       return cookie;
+ }
+@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
+       cache = fscache_select_cache_for_object(cookie->parent);
+       if (!cache) {
+               up_read(&fscache_addremove_sem);
+-              fscache_stat(&fscache_n_acquires_no_cache);
++              fscache_stat_unchecked(&fscache_n_acquires_no_cache);
+               _leave(" = -ENOMEDIUM [no cache]");
+               return -ENOMEDIUM;
+       }
+@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
+       object = cache->ops->alloc_object(cache, cookie);
+       fscache_stat_d(&fscache_n_cop_alloc_object);
+       if (IS_ERR(object)) {
+-              fscache_stat(&fscache_n_object_no_alloc);
++              fscache_stat_unchecked(&fscache_n_object_no_alloc);
+               ret = PTR_ERR(object);
+               goto error;
+       }
+-      fscache_stat(&fscache_n_object_alloc);
++      fscache_stat_unchecked(&fscache_n_object_alloc);
+-      object->debug_id = atomic_inc_return(&fscache_object_debug_id);
++      object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
+       _debug("ALLOC OBJ%x: %s {%lx}",
+              object->debug_id, cookie->def->name, object->events);
+@@ -419,7 +419,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
+       _enter("{%s}", cookie->def->name);
+-      fscache_stat(&fscache_n_invalidates);
++      fscache_stat_unchecked(&fscache_n_invalidates);
+       /* Only permit invalidation of data files.  Invalidating an index will
+        * require the caller to release all its attachments to the tree rooted
+@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
+ {
+       struct fscache_object *object;
+-      fscache_stat(&fscache_n_updates);
++      fscache_stat_unchecked(&fscache_n_updates);
+       if (!cookie) {
+-              fscache_stat(&fscache_n_updates_null);
++              fscache_stat_unchecked(&fscache_n_updates_null);
+               _leave(" [no cookie]");
+               return;
+       }
+@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
+  */
+ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+ {
+-      fscache_stat(&fscache_n_relinquishes);
++      fscache_stat_unchecked(&fscache_n_relinquishes);
+       if (retire)
+-              fscache_stat(&fscache_n_relinquishes_retire);
++              fscache_stat_unchecked(&fscache_n_relinquishes_retire);
+       if (!cookie) {
+-              fscache_stat(&fscache_n_relinquishes_null);
++              fscache_stat_unchecked(&fscache_n_relinquishes_null);
+               _leave(" [no cookie]");
+               return;
+       }
+@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
+       if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
+               goto inconsistent;
+-      op->debug_id = atomic_inc_return(&fscache_op_debug_id);
++      op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+       __fscache_use_cookie(cookie);
+       if (fscache_submit_op(object, op) < 0)
+diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
+index 97ec451..f722cee 100644
+--- a/fs/fscache/internal.h
++++ b/fs/fscache/internal.h
+@@ -136,8 +136,8 @@ extern void fscache_operation_gc(struct work_struct *);
+ extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
+ extern int fscache_wait_for_operation_activation(struct fscache_object *,
+                                                struct fscache_operation *,
+-                                               atomic_t *,
+-                                               atomic_t *);
++                                               atomic_unchecked_t *,
++                                               atomic_unchecked_t *);
+ extern void fscache_invalidate_writes(struct fscache_cookie *);
+ /*
+@@ -155,102 +155,102 @@ extern void fscache_proc_cleanup(void);
+  * stats.c
+  */
+ #ifdef CONFIG_FSCACHE_STATS
+-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
++extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
++extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+-extern atomic_t fscache_n_op_pend;
+-extern atomic_t fscache_n_op_run;
+-extern atomic_t fscache_n_op_enqueue;
+-extern atomic_t fscache_n_op_deferred_release;
+-extern atomic_t fscache_n_op_initialised;
+-extern atomic_t fscache_n_op_release;
+-extern atomic_t fscache_n_op_gc;
+-extern atomic_t fscache_n_op_cancelled;
+-extern atomic_t fscache_n_op_rejected;
++extern atomic_unchecked_t fscache_n_op_pend;
++extern atomic_unchecked_t fscache_n_op_run;
++extern atomic_unchecked_t fscache_n_op_enqueue;
++extern atomic_unchecked_t fscache_n_op_deferred_release;
++extern atomic_unchecked_t fscache_n_op_initialised;
++extern atomic_unchecked_t fscache_n_op_release;
++extern atomic_unchecked_t fscache_n_op_gc;
++extern atomic_unchecked_t fscache_n_op_cancelled;
++extern atomic_unchecked_t fscache_n_op_rejected;
+-extern atomic_t fscache_n_attr_changed;
+-extern atomic_t fscache_n_attr_changed_ok;
+-extern atomic_t fscache_n_attr_changed_nobufs;
+-extern atomic_t fscache_n_attr_changed_nomem;
+-extern atomic_t fscache_n_attr_changed_calls;
++extern atomic_unchecked_t fscache_n_attr_changed;
++extern atomic_unchecked_t fscache_n_attr_changed_ok;
++extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
++extern atomic_unchecked_t fscache_n_attr_changed_nomem;
++extern atomic_unchecked_t fscache_n_attr_changed_calls;
+-extern atomic_t fscache_n_allocs;
+-extern atomic_t fscache_n_allocs_ok;
+-extern atomic_t fscache_n_allocs_wait;
+-extern atomic_t fscache_n_allocs_nobufs;
+-extern atomic_t fscache_n_allocs_intr;
+-extern atomic_t fscache_n_allocs_object_dead;
+-extern atomic_t fscache_n_alloc_ops;
+-extern atomic_t fscache_n_alloc_op_waits;
++extern atomic_unchecked_t fscache_n_allocs;
++extern atomic_unchecked_t fscache_n_allocs_ok;
++extern atomic_unchecked_t fscache_n_allocs_wait;
++extern atomic_unchecked_t fscache_n_allocs_nobufs;
++extern atomic_unchecked_t fscache_n_allocs_intr;
++extern atomic_unchecked_t fscache_n_allocs_object_dead;
++extern atomic_unchecked_t fscache_n_alloc_ops;
++extern atomic_unchecked_t fscache_n_alloc_op_waits;
+-extern atomic_t fscache_n_retrievals;
+-extern atomic_t fscache_n_retrievals_ok;
+-extern atomic_t fscache_n_retrievals_wait;
+-extern atomic_t fscache_n_retrievals_nodata;
+-extern atomic_t fscache_n_retrievals_nobufs;
+-extern atomic_t fscache_n_retrievals_intr;
+-extern atomic_t fscache_n_retrievals_nomem;
+-extern atomic_t fscache_n_retrievals_object_dead;
+-extern atomic_t fscache_n_retrieval_ops;
+-extern atomic_t fscache_n_retrieval_op_waits;
++extern atomic_unchecked_t fscache_n_retrievals;
++extern atomic_unchecked_t fscache_n_retrievals_ok;
++extern atomic_unchecked_t fscache_n_retrievals_wait;
++extern atomic_unchecked_t fscache_n_retrievals_nodata;
++extern atomic_unchecked_t fscache_n_retrievals_nobufs;
++extern atomic_unchecked_t fscache_n_retrievals_intr;
++extern atomic_unchecked_t fscache_n_retrievals_nomem;
++extern atomic_unchecked_t fscache_n_retrievals_object_dead;
++extern atomic_unchecked_t fscache_n_retrieval_ops;
++extern atomic_unchecked_t fscache_n_retrieval_op_waits;
+-extern atomic_t fscache_n_stores;
+-extern atomic_t fscache_n_stores_ok;
+-extern atomic_t fscache_n_stores_again;
+-extern atomic_t fscache_n_stores_nobufs;
+-extern atomic_t fscache_n_stores_oom;
+-extern atomic_t fscache_n_store_ops;
+-extern atomic_t fscache_n_store_calls;
+-extern atomic_t fscache_n_store_pages;
+-extern atomic_t fscache_n_store_radix_deletes;
+-extern atomic_t fscache_n_store_pages_over_limit;
++extern atomic_unchecked_t fscache_n_stores;
++extern atomic_unchecked_t fscache_n_stores_ok;
++extern atomic_unchecked_t fscache_n_stores_again;
++extern atomic_unchecked_t fscache_n_stores_nobufs;
++extern atomic_unchecked_t fscache_n_stores_oom;
++extern atomic_unchecked_t fscache_n_store_ops;
++extern atomic_unchecked_t fscache_n_store_calls;
++extern atomic_unchecked_t fscache_n_store_pages;
++extern atomic_unchecked_t fscache_n_store_radix_deletes;
++extern atomic_unchecked_t fscache_n_store_pages_over_limit;
+-extern atomic_t fscache_n_store_vmscan_not_storing;
+-extern atomic_t fscache_n_store_vmscan_gone;
+-extern atomic_t fscache_n_store_vmscan_busy;
+-extern atomic_t fscache_n_store_vmscan_cancelled;
+-extern atomic_t fscache_n_store_vmscan_wait;
++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++extern atomic_unchecked_t fscache_n_store_vmscan_gone;
++extern atomic_unchecked_t fscache_n_store_vmscan_busy;
++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++extern atomic_unchecked_t fscache_n_store_vmscan_wait;
+-extern atomic_t fscache_n_marks;
+-extern atomic_t fscache_n_uncaches;
++extern atomic_unchecked_t fscache_n_marks;
++extern atomic_unchecked_t fscache_n_uncaches;
+-extern atomic_t fscache_n_acquires;
+-extern atomic_t fscache_n_acquires_null;
+-extern atomic_t fscache_n_acquires_no_cache;
+-extern atomic_t fscache_n_acquires_ok;
+-extern atomic_t fscache_n_acquires_nobufs;
+-extern atomic_t fscache_n_acquires_oom;
++extern atomic_unchecked_t fscache_n_acquires;
++extern atomic_unchecked_t fscache_n_acquires_null;
++extern atomic_unchecked_t fscache_n_acquires_no_cache;
++extern atomic_unchecked_t fscache_n_acquires_ok;
++extern atomic_unchecked_t fscache_n_acquires_nobufs;
++extern atomic_unchecked_t fscache_n_acquires_oom;
+-extern atomic_t fscache_n_invalidates;
+-extern atomic_t fscache_n_invalidates_run;
++extern atomic_unchecked_t fscache_n_invalidates;
++extern atomic_unchecked_t fscache_n_invalidates_run;
+-extern atomic_t fscache_n_updates;
+-extern atomic_t fscache_n_updates_null;
+-extern atomic_t fscache_n_updates_run;
++extern atomic_unchecked_t fscache_n_updates;
++extern atomic_unchecked_t fscache_n_updates_null;
++extern atomic_unchecked_t fscache_n_updates_run;
+-extern atomic_t fscache_n_relinquishes;
+-extern atomic_t fscache_n_relinquishes_null;
+-extern atomic_t fscache_n_relinquishes_waitcrt;
+-extern atomic_t fscache_n_relinquishes_retire;
++extern atomic_unchecked_t fscache_n_relinquishes;
++extern atomic_unchecked_t fscache_n_relinquishes_null;
++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++extern atomic_unchecked_t fscache_n_relinquishes_retire;
+-extern atomic_t fscache_n_cookie_index;
+-extern atomic_t fscache_n_cookie_data;
+-extern atomic_t fscache_n_cookie_special;
++extern atomic_unchecked_t fscache_n_cookie_index;
++extern atomic_unchecked_t fscache_n_cookie_data;
++extern atomic_unchecked_t fscache_n_cookie_special;
+-extern atomic_t fscache_n_object_alloc;
+-extern atomic_t fscache_n_object_no_alloc;
+-extern atomic_t fscache_n_object_lookups;
+-extern atomic_t fscache_n_object_lookups_negative;
+-extern atomic_t fscache_n_object_lookups_positive;
+-extern atomic_t fscache_n_object_lookups_timed_out;
+-extern atomic_t fscache_n_object_created;
+-extern atomic_t fscache_n_object_avail;
+-extern atomic_t fscache_n_object_dead;
++extern atomic_unchecked_t fscache_n_object_alloc;
++extern atomic_unchecked_t fscache_n_object_no_alloc;
++extern atomic_unchecked_t fscache_n_object_lookups;
++extern atomic_unchecked_t fscache_n_object_lookups_negative;
++extern atomic_unchecked_t fscache_n_object_lookups_positive;
++extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
++extern atomic_unchecked_t fscache_n_object_created;
++extern atomic_unchecked_t fscache_n_object_avail;
++extern atomic_unchecked_t fscache_n_object_dead;
+-extern atomic_t fscache_n_checkaux_none;
+-extern atomic_t fscache_n_checkaux_okay;
+-extern atomic_t fscache_n_checkaux_update;
+-extern atomic_t fscache_n_checkaux_obsolete;
++extern atomic_unchecked_t fscache_n_checkaux_none;
++extern atomic_unchecked_t fscache_n_checkaux_okay;
++extern atomic_unchecked_t fscache_n_checkaux_update;
++extern atomic_unchecked_t fscache_n_checkaux_obsolete;
+ extern atomic_t fscache_n_cop_alloc_object;
+ extern atomic_t fscache_n_cop_lookup_object;
+@@ -280,6 +280,11 @@ static inline void fscache_stat(atomic_t *stat)
+       atomic_inc(stat);
+ }
++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
++{
++      atomic_inc_unchecked(stat);
++}
++
+ static inline void fscache_stat_d(atomic_t *stat)
+ {
+       atomic_dec(stat);
+@@ -292,6 +297,7 @@ extern const struct file_operations fscache_stats_fops;
+ #define __fscache_stat(stat) (NULL)
+ #define fscache_stat(stat) do {} while (0)
++#define fscache_stat_unchecked(stat) do {} while (0)
+ #define fscache_stat_d(stat) do {} while (0)
+ #endif
+diff --git a/fs/fscache/object.c b/fs/fscache/object.c
+index 9e792e3..6b2affb 100644
+--- a/fs/fscache/object.c
++++ b/fs/fscache/object.c
+@@ -465,7 +465,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
+       _debug("LOOKUP \"%s\" in \"%s\"",
+              cookie->def->name, object->cache->tag->name);
+-      fscache_stat(&fscache_n_object_lookups);
++      fscache_stat_unchecked(&fscache_n_object_lookups);
+       fscache_stat(&fscache_n_cop_lookup_object);
+       ret = object->cache->ops->lookup_object(object);
+       fscache_stat_d(&fscache_n_cop_lookup_object);
+@@ -475,7 +475,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
+       if (ret == -ETIMEDOUT) {
+               /* probably stuck behind another object, so move this one to
+                * the back of the queue */
+-              fscache_stat(&fscache_n_object_lookups_timed_out);
++              fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
+               _leave(" [timeout]");
+               return NO_TRANSIT;
+       }
+@@ -503,7 +503,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
+       _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
+       if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
+-              fscache_stat(&fscache_n_object_lookups_negative);
++              fscache_stat_unchecked(&fscache_n_object_lookups_negative);
+               /* Allow write requests to begin stacking up and read requests to begin
+                * returning ENODATA.
+@@ -538,7 +538,7 @@ void fscache_obtained_object(struct fscache_object *object)
+       /* if we were still looking up, then we must have a positive lookup
+        * result, in which case there may be data available */
+       if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
+-              fscache_stat(&fscache_n_object_lookups_positive);
++              fscache_stat_unchecked(&fscache_n_object_lookups_positive);
+               /* We do (presumably) have data */
+               clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+@@ -550,7 +550,7 @@ void fscache_obtained_object(struct fscache_object *object)
+               clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+       } else {
+-              fscache_stat(&fscache_n_object_created);
++              fscache_stat_unchecked(&fscache_n_object_created);
+       }
+       set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
+@@ -586,7 +586,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
+       fscache_stat_d(&fscache_n_cop_lookup_complete);
+       fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
+-      fscache_stat(&fscache_n_object_avail);
++      fscache_stat_unchecked(&fscache_n_object_avail);
+       _leave("");
+       return transit_to(JUMPSTART_DEPS);
+@@ -735,7 +735,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
+       /* this just shifts the object release to the work processor */
+       fscache_put_object(object);
+-      fscache_stat(&fscache_n_object_dead);
++      fscache_stat_unchecked(&fscache_n_object_dead);
+       _leave("");
+       return transit_to(OBJECT_DEAD);
+@@ -900,7 +900,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+       enum fscache_checkaux result;
+       if (!object->cookie->def->check_aux) {
+-              fscache_stat(&fscache_n_checkaux_none);
++              fscache_stat_unchecked(&fscache_n_checkaux_none);
+               return FSCACHE_CHECKAUX_OKAY;
+       }
+@@ -909,17 +909,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+       switch (result) {
+               /* entry okay as is */
+       case FSCACHE_CHECKAUX_OKAY:
+-              fscache_stat(&fscache_n_checkaux_okay);
++              fscache_stat_unchecked(&fscache_n_checkaux_okay);
+               break;
+               /* entry requires update */
+       case FSCACHE_CHECKAUX_NEEDS_UPDATE:
+-              fscache_stat(&fscache_n_checkaux_update);
++              fscache_stat_unchecked(&fscache_n_checkaux_update);
+               break;
+               /* entry requires deletion */
+       case FSCACHE_CHECKAUX_OBSOLETE:
+-              fscache_stat(&fscache_n_checkaux_obsolete);
++              fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
+               break;
+       default:
+@@ -1007,7 +1007,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
+ {
+       const struct fscache_state *s;
+-      fscache_stat(&fscache_n_invalidates_run);
++      fscache_stat_unchecked(&fscache_n_invalidates_run);
+       fscache_stat(&fscache_n_cop_invalidate_object);
+       s = _fscache_invalidate_object(object, event);
+       fscache_stat_d(&fscache_n_cop_invalidate_object);
+@@ -1022,7 +1022,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
+ {
+       _enter("{OBJ%x},%d", object->debug_id, event);
+-      fscache_stat(&fscache_n_updates_run);
++      fscache_stat_unchecked(&fscache_n_updates_run);
+       fscache_stat(&fscache_n_cop_update_object);
+       object->cache->ops->update_object(object);
+       fscache_stat_d(&fscache_n_cop_update_object);
+diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
+index de67745..6a3a9b6 100644
+--- a/fs/fscache/operation.c
++++ b/fs/fscache/operation.c
+@@ -17,7 +17,7 @@
+ #include <linux/slab.h>
+ #include "internal.h"
+-atomic_t fscache_op_debug_id;
++atomic_unchecked_t fscache_op_debug_id;
+ EXPORT_SYMBOL(fscache_op_debug_id);
+ static void fscache_operation_dummy_cancel(struct fscache_operation *op)
+@@ -40,12 +40,12 @@ void fscache_operation_init(struct fscache_operation *op,
+       INIT_WORK(&op->work, fscache_op_work_func);
+       atomic_set(&op->usage, 1);
+       op->state = FSCACHE_OP_ST_INITIALISED;
+-      op->debug_id = atomic_inc_return(&fscache_op_debug_id);
++      op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+       op->processor = processor;
+       op->cancel = cancel ?: fscache_operation_dummy_cancel;
+       op->release = release;
+       INIT_LIST_HEAD(&op->pend_link);
+-      fscache_stat(&fscache_n_op_initialised);
++      fscache_stat_unchecked(&fscache_n_op_initialised);
+ }
+ EXPORT_SYMBOL(fscache_operation_init);
+@@ -68,7 +68,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
+       ASSERTCMP(atomic_read(&op->usage), >, 0);
+       ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+-      fscache_stat(&fscache_n_op_enqueue);
++      fscache_stat_unchecked(&fscache_n_op_enqueue);
+       switch (op->flags & FSCACHE_OP_TYPE) {
+       case FSCACHE_OP_ASYNC:
+               _debug("queue async");
+@@ -101,7 +101,7 @@ static void fscache_run_op(struct fscache_object *object,
+               wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+       if (op->processor)
+               fscache_enqueue_operation(op);
+-      fscache_stat(&fscache_n_op_run);
++      fscache_stat_unchecked(&fscache_n_op_run);
+ }
+ /*
+@@ -169,7 +169,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
+       op->state = FSCACHE_OP_ST_PENDING;
+       flags = READ_ONCE(object->flags);
+       if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
+-              fscache_stat(&fscache_n_op_rejected);
++              fscache_stat_unchecked(&fscache_n_op_rejected);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -ENOBUFS;
+@@ -185,11 +185,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
+               if (object->n_in_progress > 0) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+               } else if (!list_empty(&object->pending_ops)) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+                       fscache_start_operations(object);
+               } else {
+                       ASSERTCMP(object->n_in_progress, ==, 0);
+@@ -205,7 +205,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
+               object->n_exclusive++;  /* reads and writes must wait */
+               atomic_inc(&op->usage);
+               list_add_tail(&op->pend_link, &object->pending_ops);
+-              fscache_stat(&fscache_n_op_pend);
++              fscache_stat_unchecked(&fscache_n_op_pend);
+               ret = 0;
+       } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
+               op->cancel(op);
+@@ -254,7 +254,7 @@ int fscache_submit_op(struct fscache_object *object,
+       op->state = FSCACHE_OP_ST_PENDING;
+       flags = READ_ONCE(object->flags);
+       if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
+-              fscache_stat(&fscache_n_op_rejected);
++              fscache_stat_unchecked(&fscache_n_op_rejected);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -ENOBUFS;
+@@ -269,11 +269,11 @@ int fscache_submit_op(struct fscache_object *object,
+               if (object->n_exclusive > 0) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+               } else if (!list_empty(&object->pending_ops)) {
+                       atomic_inc(&op->usage);
+                       list_add_tail(&op->pend_link, &object->pending_ops);
+-                      fscache_stat(&fscache_n_op_pend);
++                      fscache_stat_unchecked(&fscache_n_op_pend);
+                       fscache_start_operations(object);
+               } else {
+                       ASSERTCMP(object->n_exclusive, ==, 0);
+@@ -285,7 +285,7 @@ int fscache_submit_op(struct fscache_object *object,
+               object->n_ops++;
+               atomic_inc(&op->usage);
+               list_add_tail(&op->pend_link, &object->pending_ops);
+-              fscache_stat(&fscache_n_op_pend);
++              fscache_stat_unchecked(&fscache_n_op_pend);
+               ret = 0;
+       } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
+               op->cancel(op);
+@@ -369,7 +369,7 @@ int fscache_cancel_op(struct fscache_operation *op,
+               list_del_init(&op->pend_link);
+               put = true;
+-              fscache_stat(&fscache_n_op_cancelled);
++              fscache_stat_unchecked(&fscache_n_op_cancelled);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+@@ -385,7 +385,7 @@ int fscache_cancel_op(struct fscache_operation *op,
+               if (object->n_in_progress == 0)
+                       fscache_start_operations(object);
+-              fscache_stat(&fscache_n_op_cancelled);
++              fscache_stat_unchecked(&fscache_n_op_cancelled);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+@@ -416,7 +416,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
+       while (!list_empty(&object->pending_ops)) {
+               op = list_entry(object->pending_ops.next,
+                               struct fscache_operation, pend_link);
+-              fscache_stat(&fscache_n_op_cancelled);
++              fscache_stat_unchecked(&fscache_n_op_cancelled);
+               list_del_init(&op->pend_link);
+               ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
+@@ -493,7 +493,7 @@ void fscache_put_operation(struct fscache_operation *op)
+                   op->state != FSCACHE_OP_ST_COMPLETE,
+                   op->state, ==, FSCACHE_OP_ST_CANCELLED);
+-      fscache_stat(&fscache_n_op_release);
++      fscache_stat_unchecked(&fscache_n_op_release);
+       if (op->release) {
+               op->release(op);
+@@ -513,7 +513,7 @@ void fscache_put_operation(struct fscache_operation *op)
+                * lock, and defer it otherwise */
+               if (!spin_trylock(&object->lock)) {
+                       _debug("defer put");
+-                      fscache_stat(&fscache_n_op_deferred_release);
++                      fscache_stat_unchecked(&fscache_n_op_deferred_release);
+                       cache = object->cache;
+                       spin_lock(&cache->op_gc_list_lock);
+@@ -567,7 +567,7 @@ void fscache_operation_gc(struct work_struct *work)
+               _debug("GC DEFERRED REL OBJ%x OP%x",
+                      object->debug_id, op->debug_id);
+-              fscache_stat(&fscache_n_op_gc);
++              fscache_stat_unchecked(&fscache_n_op_gc);
+               ASSERTCMP(atomic_read(&op->usage), ==, 0);
+               ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
+diff --git a/fs/fscache/page.c b/fs/fscache/page.c
+index c8c4f79..0512aeb 100644
+--- a/fs/fscache/page.c
++++ b/fs/fscache/page.c
+@@ -74,7 +74,7 @@ try_again:
+       val = radix_tree_lookup(&cookie->stores, page->index);
+       if (!val) {
+               rcu_read_unlock();
+-              fscache_stat(&fscache_n_store_vmscan_not_storing);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
+               __fscache_uncache_page(cookie, page);
+               return true;
+       }
+@@ -104,11 +104,11 @@ try_again:
+       spin_unlock(&cookie->stores_lock);
+       if (xpage) {
+-              fscache_stat(&fscache_n_store_vmscan_cancelled);
+-              fscache_stat(&fscache_n_store_radix_deletes);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
++              fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+               ASSERTCMP(xpage, ==, page);
+       } else {
+-              fscache_stat(&fscache_n_store_vmscan_gone);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
+       }
+       wake_up_bit(&cookie->flags, 0);
+@@ -123,11 +123,11 @@ page_busy:
+        * sleeping on memory allocation, so we may need to impose a timeout
+        * too. */
+       if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
+-              fscache_stat(&fscache_n_store_vmscan_busy);
++              fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
+               return false;
+       }
+-      fscache_stat(&fscache_n_store_vmscan_wait);
++      fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
+       if (!release_page_wait_timeout(cookie, page))
+               _debug("fscache writeout timeout page: %p{%lx}",
+                       page, page->index);
+@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
+                                    FSCACHE_COOKIE_STORING_TAG);
+               if (!radix_tree_tag_get(&cookie->stores, page->index,
+                                       FSCACHE_COOKIE_PENDING_TAG)) {
+-                      fscache_stat(&fscache_n_store_radix_deletes);
++                      fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+                       xpage = radix_tree_delete(&cookie->stores, page->index);
+               }
+               spin_unlock(&cookie->stores_lock);
+@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
+       _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
+-      fscache_stat(&fscache_n_attr_changed_calls);
++      fscache_stat_unchecked(&fscache_n_attr_changed_calls);
+       if (fscache_object_is_active(object)) {
+               fscache_stat(&fscache_n_cop_attr_changed);
+@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+       ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+-      fscache_stat(&fscache_n_attr_changed);
++      fscache_stat_unchecked(&fscache_n_attr_changed);
+       op = kzalloc(sizeof(*op), GFP_KERNEL);
+       if (!op) {
+-              fscache_stat(&fscache_n_attr_changed_nomem);
++              fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
+               _leave(" = -ENOMEM");
+               return -ENOMEM;
+       }
+@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+       if (fscache_submit_exclusive_op(object, op) < 0)
+               goto nobufs_dec;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_attr_changed_ok);
++      fscache_stat_unchecked(&fscache_n_attr_changed_ok);
+       fscache_put_operation(op);
+       _leave(" = 0");
+       return 0;
+@@ -242,7 +242,7 @@ nobufs:
+       fscache_put_operation(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
+-      fscache_stat(&fscache_n_attr_changed_nobufs);
++      fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
+       _leave(" = %d", -ENOBUFS);
+       return -ENOBUFS;
+ }
+@@ -293,7 +293,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
+       /* allocate a retrieval operation and attempt to submit it */
+       op = kzalloc(sizeof(*op), GFP_NOIO);
+       if (!op) {
+-              fscache_stat(&fscache_n_retrievals_nomem);
++              fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+               return NULL;
+       }
+@@ -332,12 +332,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
+               return 0;
+       }
+-      fscache_stat(&fscache_n_retrievals_wait);
++      fscache_stat_unchecked(&fscache_n_retrievals_wait);
+       jif = jiffies;
+       if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
+                       TASK_INTERRUPTIBLE) != 0) {
+-              fscache_stat(&fscache_n_retrievals_intr);
++              fscache_stat_unchecked(&fscache_n_retrievals_intr);
+               _leave(" = -ERESTARTSYS");
+               return -ERESTARTSYS;
+       }
+@@ -355,8 +355,8 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
+  */
+ int fscache_wait_for_operation_activation(struct fscache_object *object,
+                                         struct fscache_operation *op,
+-                                        atomic_t *stat_op_waits,
+-                                        atomic_t *stat_object_dead)
++                                        atomic_unchecked_t *stat_op_waits,
++                                        atomic_unchecked_t *stat_object_dead)
+ {
+       int ret;
+@@ -365,7 +365,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
+       _debug(">>> WT");
+       if (stat_op_waits)
+-              fscache_stat(stat_op_waits);
++              fscache_stat_unchecked(stat_op_waits);
+       if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
+                       TASK_INTERRUPTIBLE) != 0) {
+               ret = fscache_cancel_op(op, false);
+@@ -382,7 +382,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
+ check_if_dead:
+       if (op->state == FSCACHE_OP_ST_CANCELLED) {
+               if (stat_object_dead)
+-                      fscache_stat(stat_object_dead);
++                      fscache_stat_unchecked(stat_object_dead);
+               _leave(" = -ENOBUFS [cancelled]");
+               return -ENOBUFS;
+       }
+@@ -391,7 +391,7 @@ check_if_dead:
+               enum fscache_operation_state state = op->state;
+               fscache_cancel_op(op, true);
+               if (stat_object_dead)
+-                      fscache_stat(stat_object_dead);
++                      fscache_stat_unchecked(stat_object_dead);
+               _leave(" = -ENOBUFS [obj dead %d]", state);
+               return -ENOBUFS;
+       }
+@@ -420,7 +420,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+       _enter("%p,%p,,,", cookie, page);
+-      fscache_stat(&fscache_n_retrievals);
++      fscache_stat_unchecked(&fscache_n_retrievals);
+       if (hlist_empty(&cookie->backing_objects))
+               goto nobufs;
+@@ -462,7 +462,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+               goto nobufs_unlock_dec;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_retrieval_ops);
++      fscache_stat_unchecked(&fscache_n_retrieval_ops);
+       /* we wait for the operation to become active, and then process it
+        * *here*, in this thread, and not in the thread pool */
+@@ -488,15 +488,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+ error:
+       if (ret == -ENOMEM)
+-              fscache_stat(&fscache_n_retrievals_nomem);
++              fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+       else if (ret == -ERESTARTSYS)
+-              fscache_stat(&fscache_n_retrievals_intr);
++              fscache_stat_unchecked(&fscache_n_retrievals_intr);
+       else if (ret == -ENODATA)
+-              fscache_stat(&fscache_n_retrievals_nodata);
++              fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+       else if (ret < 0)
+-              fscache_stat(&fscache_n_retrievals_nobufs);
++              fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       else
+-              fscache_stat(&fscache_n_retrievals_ok);
++              fscache_stat_unchecked(&fscache_n_retrievals_ok);
+       fscache_put_retrieval(op);
+       _leave(" = %d", ret);
+@@ -511,7 +511,7 @@ nobufs_unlock:
+               __fscache_wake_unused_cookie(cookie);
+       fscache_put_retrieval(op);
+ nobufs:
+-      fscache_stat(&fscache_n_retrievals_nobufs);
++      fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ }
+@@ -550,7 +550,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+       _enter("%p,,%d,,,", cookie, *nr_pages);
+-      fscache_stat(&fscache_n_retrievals);
++      fscache_stat_unchecked(&fscache_n_retrievals);
+       if (hlist_empty(&cookie->backing_objects))
+               goto nobufs;
+@@ -588,7 +588,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+               goto nobufs_unlock_dec;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_retrieval_ops);
++      fscache_stat_unchecked(&fscache_n_retrieval_ops);
+       /* we wait for the operation to become active, and then process it
+        * *here*, in this thread, and not in the thread pool */
+@@ -614,15 +614,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+ error:
+       if (ret == -ENOMEM)
+-              fscache_stat(&fscache_n_retrievals_nomem);
++              fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+       else if (ret == -ERESTARTSYS)
+-              fscache_stat(&fscache_n_retrievals_intr);
++              fscache_stat_unchecked(&fscache_n_retrievals_intr);
+       else if (ret == -ENODATA)
+-              fscache_stat(&fscache_n_retrievals_nodata);
++              fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+       else if (ret < 0)
+-              fscache_stat(&fscache_n_retrievals_nobufs);
++              fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       else
+-              fscache_stat(&fscache_n_retrievals_ok);
++              fscache_stat_unchecked(&fscache_n_retrievals_ok);
+       fscache_put_retrieval(op);
+       _leave(" = %d", ret);
+@@ -637,7 +637,7 @@ nobufs_unlock:
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
+ nobufs:
+-      fscache_stat(&fscache_n_retrievals_nobufs);
++      fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ }
+@@ -662,7 +662,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+       _enter("%p,%p,,,", cookie, page);
+-      fscache_stat(&fscache_n_allocs);
++      fscache_stat_unchecked(&fscache_n_allocs);
+       if (hlist_empty(&cookie->backing_objects))
+               goto nobufs;
+@@ -696,7 +696,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+               goto nobufs_unlock_dec;
+       spin_unlock(&cookie->lock);
+-      fscache_stat(&fscache_n_alloc_ops);
++      fscache_stat_unchecked(&fscache_n_alloc_ops);
+       ret = fscache_wait_for_operation_activation(
+               object, &op->op,
+@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+ error:
+       if (ret == -ERESTARTSYS)
+-              fscache_stat(&fscache_n_allocs_intr);
++              fscache_stat_unchecked(&fscache_n_allocs_intr);
+       else if (ret < 0)
+-              fscache_stat(&fscache_n_allocs_nobufs);
++              fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+       else
+-              fscache_stat(&fscache_n_allocs_ok);
++              fscache_stat_unchecked(&fscache_n_allocs_ok);
+       fscache_put_retrieval(op);
+       _leave(" = %d", ret);
+@@ -730,7 +730,7 @@ nobufs_unlock:
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
+ nobufs:
+-      fscache_stat(&fscache_n_allocs_nobufs);
++      fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ }
+@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+       spin_lock(&cookie->stores_lock);
+-      fscache_stat(&fscache_n_store_calls);
++      fscache_stat_unchecked(&fscache_n_store_calls);
+       /* find a page to store */
+       page = NULL;
+@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+       page = results[0];
+       _debug("gang %d [%lx]", n, page->index);
+       if (page->index >= op->store_limit) {
+-              fscache_stat(&fscache_n_store_pages_over_limit);
++              fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
+               goto superseded;
+       }
+@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+       spin_unlock(&cookie->stores_lock);
+       spin_unlock(&object->lock);
+-      fscache_stat(&fscache_n_store_pages);
++      fscache_stat_unchecked(&fscache_n_store_pages);
+       fscache_stat(&fscache_n_cop_write_page);
+       ret = object->cache->ops->write_page(op, page);
+       fscache_stat_d(&fscache_n_cop_write_page);
+@@ -935,7 +935,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+       ASSERT(PageFsCache(page));
+-      fscache_stat(&fscache_n_stores);
++      fscache_stat_unchecked(&fscache_n_stores);
+       if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
+               _leave(" = -ENOBUFS [invalidating]");
+@@ -994,7 +994,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       spin_unlock(&cookie->stores_lock);
+       spin_unlock(&object->lock);
+-      op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
++      op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+       op->store_limit = object->store_limit;
+       __fscache_use_cookie(cookie);
+@@ -1003,8 +1003,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       spin_unlock(&cookie->lock);
+       radix_tree_preload_end();
+-      fscache_stat(&fscache_n_store_ops);
+-      fscache_stat(&fscache_n_stores_ok);
++      fscache_stat_unchecked(&fscache_n_store_ops);
++      fscache_stat_unchecked(&fscache_n_stores_ok);
+       /* the work queue now carries its own ref on the object */
+       fscache_put_operation(&op->op);
+@@ -1012,14 +1012,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+       return 0;
+ already_queued:
+-      fscache_stat(&fscache_n_stores_again);
++      fscache_stat_unchecked(&fscache_n_stores_again);
+ already_pending:
+       spin_unlock(&cookie->stores_lock);
+       spin_unlock(&object->lock);
+       spin_unlock(&cookie->lock);
+       radix_tree_preload_end();
+       fscache_put_operation(&op->op);
+-      fscache_stat(&fscache_n_stores_ok);
++      fscache_stat_unchecked(&fscache_n_stores_ok);
+       _leave(" = 0");
+       return 0;
+@@ -1041,14 +1041,14 @@ nobufs:
+       fscache_put_operation(&op->op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
+-      fscache_stat(&fscache_n_stores_nobufs);
++      fscache_stat_unchecked(&fscache_n_stores_nobufs);
+       _leave(" = -ENOBUFS");
+       return -ENOBUFS;
+ nomem_free:
+       fscache_put_operation(&op->op);
+ nomem:
+-      fscache_stat(&fscache_n_stores_oom);
++      fscache_stat_unchecked(&fscache_n_stores_oom);
+       _leave(" = -ENOMEM");
+       return -ENOMEM;
+ }
+@@ -1066,7 +1066,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
+       ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+       ASSERTCMP(page, !=, NULL);
+-      fscache_stat(&fscache_n_uncaches);
++      fscache_stat_unchecked(&fscache_n_uncaches);
+       /* cache withdrawal may beat us to it */
+       if (!PageFsCache(page))
+@@ -1117,7 +1117,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
+       struct fscache_cookie *cookie = op->op.object->cookie;
+ #ifdef CONFIG_FSCACHE_STATS
+-      atomic_inc(&fscache_n_marks);
++      atomic_inc_unchecked(&fscache_n_marks);
+ #endif
+       _debug("- mark %p{%lx}", page, page->index);
+diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
+index 7ac6e83..352976a 100644
+--- a/fs/fscache/stats.c
++++ b/fs/fscache/stats.c
+@@ -18,100 +18,100 @@
+ /*
+  * operation counters
+  */
+-atomic_t fscache_n_op_pend;
+-atomic_t fscache_n_op_run;
+-atomic_t fscache_n_op_enqueue;
+-atomic_t fscache_n_op_requeue;
+-atomic_t fscache_n_op_deferred_release;
+-atomic_t fscache_n_op_initialised;
+-atomic_t fscache_n_op_release;
+-atomic_t fscache_n_op_gc;
+-atomic_t fscache_n_op_cancelled;
+-atomic_t fscache_n_op_rejected;
++atomic_unchecked_t fscache_n_op_pend;
++atomic_unchecked_t fscache_n_op_run;
++atomic_unchecked_t fscache_n_op_enqueue;
++atomic_unchecked_t fscache_n_op_requeue;
++atomic_unchecked_t fscache_n_op_deferred_release;
++atomic_unchecked_t fscache_n_op_initialised;
++atomic_unchecked_t fscache_n_op_release;
++atomic_unchecked_t fscache_n_op_gc;
++atomic_unchecked_t fscache_n_op_cancelled;
++atomic_unchecked_t fscache_n_op_rejected;
+-atomic_t fscache_n_attr_changed;
+-atomic_t fscache_n_attr_changed_ok;
+-atomic_t fscache_n_attr_changed_nobufs;
+-atomic_t fscache_n_attr_changed_nomem;
+-atomic_t fscache_n_attr_changed_calls;
++atomic_unchecked_t fscache_n_attr_changed;
++atomic_unchecked_t fscache_n_attr_changed_ok;
++atomic_unchecked_t fscache_n_attr_changed_nobufs;
++atomic_unchecked_t fscache_n_attr_changed_nomem;
++atomic_unchecked_t fscache_n_attr_changed_calls;
+-atomic_t fscache_n_allocs;
+-atomic_t fscache_n_allocs_ok;
+-atomic_t fscache_n_allocs_wait;
+-atomic_t fscache_n_allocs_nobufs;
+-atomic_t fscache_n_allocs_intr;
+-atomic_t fscache_n_allocs_object_dead;
+-atomic_t fscache_n_alloc_ops;
+-atomic_t fscache_n_alloc_op_waits;
++atomic_unchecked_t fscache_n_allocs;
++atomic_unchecked_t fscache_n_allocs_ok;
++atomic_unchecked_t fscache_n_allocs_wait;
++atomic_unchecked_t fscache_n_allocs_nobufs;
++atomic_unchecked_t fscache_n_allocs_intr;
++atomic_unchecked_t fscache_n_allocs_object_dead;
++atomic_unchecked_t fscache_n_alloc_ops;
++atomic_unchecked_t fscache_n_alloc_op_waits;
+-atomic_t fscache_n_retrievals;
+-atomic_t fscache_n_retrievals_ok;
+-atomic_t fscache_n_retrievals_wait;
+-atomic_t fscache_n_retrievals_nodata;
+-atomic_t fscache_n_retrievals_nobufs;
+-atomic_t fscache_n_retrievals_intr;
+-atomic_t fscache_n_retrievals_nomem;
+-atomic_t fscache_n_retrievals_object_dead;
+-atomic_t fscache_n_retrieval_ops;
+-atomic_t fscache_n_retrieval_op_waits;
++atomic_unchecked_t fscache_n_retrievals;
++atomic_unchecked_t fscache_n_retrievals_ok;
++atomic_unchecked_t fscache_n_retrievals_wait;
++atomic_unchecked_t fscache_n_retrievals_nodata;
++atomic_unchecked_t fscache_n_retrievals_nobufs;
++atomic_unchecked_t fscache_n_retrievals_intr;
++atomic_unchecked_t fscache_n_retrievals_nomem;
++atomic_unchecked_t fscache_n_retrievals_object_dead;
++atomic_unchecked_t fscache_n_retrieval_ops;
++atomic_unchecked_t fscache_n_retrieval_op_waits;
+-atomic_t fscache_n_stores;
+-atomic_t fscache_n_stores_ok;
+-atomic_t fscache_n_stores_again;
+-atomic_t fscache_n_stores_nobufs;
+-atomic_t fscache_n_stores_oom;
+-atomic_t fscache_n_store_ops;
+-atomic_t fscache_n_store_calls;
+-atomic_t fscache_n_store_pages;
+-atomic_t fscache_n_store_radix_deletes;
+-atomic_t fscache_n_store_pages_over_limit;
++atomic_unchecked_t fscache_n_stores;
++atomic_unchecked_t fscache_n_stores_ok;
++atomic_unchecked_t fscache_n_stores_again;
++atomic_unchecked_t fscache_n_stores_nobufs;
++atomic_unchecked_t fscache_n_stores_oom;
++atomic_unchecked_t fscache_n_store_ops;
++atomic_unchecked_t fscache_n_store_calls;
++atomic_unchecked_t fscache_n_store_pages;
++atomic_unchecked_t fscache_n_store_radix_deletes;
++atomic_unchecked_t fscache_n_store_pages_over_limit;
+-atomic_t fscache_n_store_vmscan_not_storing;
+-atomic_t fscache_n_store_vmscan_gone;
+-atomic_t fscache_n_store_vmscan_busy;
+-atomic_t fscache_n_store_vmscan_cancelled;
+-atomic_t fscache_n_store_vmscan_wait;
++atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++atomic_unchecked_t fscache_n_store_vmscan_gone;
++atomic_unchecked_t fscache_n_store_vmscan_busy;
++atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++atomic_unchecked_t fscache_n_store_vmscan_wait;
+-atomic_t fscache_n_marks;
+-atomic_t fscache_n_uncaches;
++atomic_unchecked_t fscache_n_marks;
++atomic_unchecked_t fscache_n_uncaches;
+-atomic_t fscache_n_acquires;
+-atomic_t fscache_n_acquires_null;
+-atomic_t fscache_n_acquires_no_cache;
+-atomic_t fscache_n_acquires_ok;
+-atomic_t fscache_n_acquires_nobufs;
+-atomic_t fscache_n_acquires_oom;
++atomic_unchecked_t fscache_n_acquires;
++atomic_unchecked_t fscache_n_acquires_null;
++atomic_unchecked_t fscache_n_acquires_no_cache;
++atomic_unchecked_t fscache_n_acquires_ok;
++atomic_unchecked_t fscache_n_acquires_nobufs;
++atomic_unchecked_t fscache_n_acquires_oom;
+-atomic_t fscache_n_invalidates;
+-atomic_t fscache_n_invalidates_run;
++atomic_unchecked_t fscache_n_invalidates;
++atomic_unchecked_t fscache_n_invalidates_run;
+-atomic_t fscache_n_updates;
+-atomic_t fscache_n_updates_null;
+-atomic_t fscache_n_updates_run;
++atomic_unchecked_t fscache_n_updates;
++atomic_unchecked_t fscache_n_updates_null;
++atomic_unchecked_t fscache_n_updates_run;
+-atomic_t fscache_n_relinquishes;
+-atomic_t fscache_n_relinquishes_null;
+-atomic_t fscache_n_relinquishes_waitcrt;
+-atomic_t fscache_n_relinquishes_retire;
++atomic_unchecked_t fscache_n_relinquishes;
++atomic_unchecked_t fscache_n_relinquishes_null;
++atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++atomic_unchecked_t fscache_n_relinquishes_retire;
+-atomic_t fscache_n_cookie_index;
+-atomic_t fscache_n_cookie_data;
+-atomic_t fscache_n_cookie_special;
++atomic_unchecked_t fscache_n_cookie_index;
++atomic_unchecked_t fscache_n_cookie_data;
++atomic_unchecked_t fscache_n_cookie_special;
+-atomic_t fscache_n_object_alloc;
+-atomic_t fscache_n_object_no_alloc;
+-atomic_t fscache_n_object_lookups;
+-atomic_t fscache_n_object_lookups_negative;
+-atomic_t fscache_n_object_lookups_positive;
+-atomic_t fscache_n_object_lookups_timed_out;
+-atomic_t fscache_n_object_created;
+-atomic_t fscache_n_object_avail;
+-atomic_t fscache_n_object_dead;
++atomic_unchecked_t fscache_n_object_alloc;
++atomic_unchecked_t fscache_n_object_no_alloc;
++atomic_unchecked_t fscache_n_object_lookups;
++atomic_unchecked_t fscache_n_object_lookups_negative;
++atomic_unchecked_t fscache_n_object_lookups_positive;
++atomic_unchecked_t fscache_n_object_lookups_timed_out;
++atomic_unchecked_t fscache_n_object_created;
++atomic_unchecked_t fscache_n_object_avail;
++atomic_unchecked_t fscache_n_object_dead;
+-atomic_t fscache_n_checkaux_none;
+-atomic_t fscache_n_checkaux_okay;
+-atomic_t fscache_n_checkaux_update;
+-atomic_t fscache_n_checkaux_obsolete;
++atomic_unchecked_t fscache_n_checkaux_none;
++atomic_unchecked_t fscache_n_checkaux_okay;
++atomic_unchecked_t fscache_n_checkaux_update;
++atomic_unchecked_t fscache_n_checkaux_obsolete;
+ atomic_t fscache_n_cop_alloc_object;
+ atomic_t fscache_n_cop_lookup_object;
+@@ -144,119 +144,119 @@ static int fscache_stats_show(struct seq_file *m, void *v)
+       seq_puts(m, "FS-Cache statistics\n");
+       seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
+-                 atomic_read(&fscache_n_cookie_index),
+-                 atomic_read(&fscache_n_cookie_data),
+-                 atomic_read(&fscache_n_cookie_special));
++                 atomic_read_unchecked(&fscache_n_cookie_index),
++                 atomic_read_unchecked(&fscache_n_cookie_data),
++                 atomic_read_unchecked(&fscache_n_cookie_special));
+       seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
+-                 atomic_read(&fscache_n_object_alloc),
+-                 atomic_read(&fscache_n_object_no_alloc),
+-                 atomic_read(&fscache_n_object_avail),
+-                 atomic_read(&fscache_n_object_dead));
++                 atomic_read_unchecked(&fscache_n_object_alloc),
++                 atomic_read_unchecked(&fscache_n_object_no_alloc),
++                 atomic_read_unchecked(&fscache_n_object_avail),
++                 atomic_read_unchecked(&fscache_n_object_dead));
+       seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
+-                 atomic_read(&fscache_n_checkaux_none),
+-                 atomic_read(&fscache_n_checkaux_okay),
+-                 atomic_read(&fscache_n_checkaux_update),
+-                 atomic_read(&fscache_n_checkaux_obsolete));
++                 atomic_read_unchecked(&fscache_n_checkaux_none),
++                 atomic_read_unchecked(&fscache_n_checkaux_okay),
++                 atomic_read_unchecked(&fscache_n_checkaux_update),
++                 atomic_read_unchecked(&fscache_n_checkaux_obsolete));
+       seq_printf(m, "Pages  : mrk=%u unc=%u\n",
+-                 atomic_read(&fscache_n_marks),
+-                 atomic_read(&fscache_n_uncaches));
++                 atomic_read_unchecked(&fscache_n_marks),
++                 atomic_read_unchecked(&fscache_n_uncaches));
+       seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
+                  " oom=%u\n",
+-                 atomic_read(&fscache_n_acquires),
+-                 atomic_read(&fscache_n_acquires_null),
+-                 atomic_read(&fscache_n_acquires_no_cache),
+-                 atomic_read(&fscache_n_acquires_ok),
+-                 atomic_read(&fscache_n_acquires_nobufs),
+-                 atomic_read(&fscache_n_acquires_oom));
++                 atomic_read_unchecked(&fscache_n_acquires),
++                 atomic_read_unchecked(&fscache_n_acquires_null),
++                 atomic_read_unchecked(&fscache_n_acquires_no_cache),
++                 atomic_read_unchecked(&fscache_n_acquires_ok),
++                 atomic_read_unchecked(&fscache_n_acquires_nobufs),
++                 atomic_read_unchecked(&fscache_n_acquires_oom));
+       seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
+-                 atomic_read(&fscache_n_object_lookups),
+-                 atomic_read(&fscache_n_object_lookups_negative),
+-                 atomic_read(&fscache_n_object_lookups_positive),
+-                 atomic_read(&fscache_n_object_created),
+-                 atomic_read(&fscache_n_object_lookups_timed_out));
++                 atomic_read_unchecked(&fscache_n_object_lookups),
++                 atomic_read_unchecked(&fscache_n_object_lookups_negative),
++                 atomic_read_unchecked(&fscache_n_object_lookups_positive),
++                 atomic_read_unchecked(&fscache_n_object_created),
++                 atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
+       seq_printf(m, "Invals : n=%u run=%u\n",
+-                 atomic_read(&fscache_n_invalidates),
+-                 atomic_read(&fscache_n_invalidates_run));
++                 atomic_read_unchecked(&fscache_n_invalidates),
++                 atomic_read_unchecked(&fscache_n_invalidates_run));
+       seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
+-                 atomic_read(&fscache_n_updates),
+-                 atomic_read(&fscache_n_updates_null),
+-                 atomic_read(&fscache_n_updates_run));
++                 atomic_read_unchecked(&fscache_n_updates),
++                 atomic_read_unchecked(&fscache_n_updates_null),
++                 atomic_read_unchecked(&fscache_n_updates_run));
+       seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
+-                 atomic_read(&fscache_n_relinquishes),
+-                 atomic_read(&fscache_n_relinquishes_null),
+-                 atomic_read(&fscache_n_relinquishes_waitcrt),
+-                 atomic_read(&fscache_n_relinquishes_retire));
++                 atomic_read_unchecked(&fscache_n_relinquishes),
++                 atomic_read_unchecked(&fscache_n_relinquishes_null),
++                 atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
++                 atomic_read_unchecked(&fscache_n_relinquishes_retire));
+       seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
+-                 atomic_read(&fscache_n_attr_changed),
+-                 atomic_read(&fscache_n_attr_changed_ok),
+-                 atomic_read(&fscache_n_attr_changed_nobufs),
+-                 atomic_read(&fscache_n_attr_changed_nomem),
+-                 atomic_read(&fscache_n_attr_changed_calls));
++                 atomic_read_unchecked(&fscache_n_attr_changed),
++                 atomic_read_unchecked(&fscache_n_attr_changed_ok),
++                 atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
++                 atomic_read_unchecked(&fscache_n_attr_changed_nomem),
++                 atomic_read_unchecked(&fscache_n_attr_changed_calls));
+       seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
+-                 atomic_read(&fscache_n_allocs),
+-                 atomic_read(&fscache_n_allocs_ok),
+-                 atomic_read(&fscache_n_allocs_wait),
+-                 atomic_read(&fscache_n_allocs_nobufs),
+-                 atomic_read(&fscache_n_allocs_intr));
++                 atomic_read_unchecked(&fscache_n_allocs),
++                 atomic_read_unchecked(&fscache_n_allocs_ok),
++                 atomic_read_unchecked(&fscache_n_allocs_wait),
++                 atomic_read_unchecked(&fscache_n_allocs_nobufs),
++                 atomic_read_unchecked(&fscache_n_allocs_intr));
+       seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
+-                 atomic_read(&fscache_n_alloc_ops),
+-                 atomic_read(&fscache_n_alloc_op_waits),
+-                 atomic_read(&fscache_n_allocs_object_dead));
++                 atomic_read_unchecked(&fscache_n_alloc_ops),
++                 atomic_read_unchecked(&fscache_n_alloc_op_waits),
++                 atomic_read_unchecked(&fscache_n_allocs_object_dead));
+       seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
+                  " int=%u oom=%u\n",
+-                 atomic_read(&fscache_n_retrievals),
+-                 atomic_read(&fscache_n_retrievals_ok),
+-                 atomic_read(&fscache_n_retrievals_wait),
+-                 atomic_read(&fscache_n_retrievals_nodata),
+-                 atomic_read(&fscache_n_retrievals_nobufs),
+-                 atomic_read(&fscache_n_retrievals_intr),
+-                 atomic_read(&fscache_n_retrievals_nomem));
++                 atomic_read_unchecked(&fscache_n_retrievals),
++                 atomic_read_unchecked(&fscache_n_retrievals_ok),
++                 atomic_read_unchecked(&fscache_n_retrievals_wait),
++                 atomic_read_unchecked(&fscache_n_retrievals_nodata),
++                 atomic_read_unchecked(&fscache_n_retrievals_nobufs),
++                 atomic_read_unchecked(&fscache_n_retrievals_intr),
++                 atomic_read_unchecked(&fscache_n_retrievals_nomem));
+       seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
+-                 atomic_read(&fscache_n_retrieval_ops),
+-                 atomic_read(&fscache_n_retrieval_op_waits),
+-                 atomic_read(&fscache_n_retrievals_object_dead));
++                 atomic_read_unchecked(&fscache_n_retrieval_ops),
++                 atomic_read_unchecked(&fscache_n_retrieval_op_waits),
++                 atomic_read_unchecked(&fscache_n_retrievals_object_dead));
+       seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
+-                 atomic_read(&fscache_n_stores),
+-                 atomic_read(&fscache_n_stores_ok),
+-                 atomic_read(&fscache_n_stores_again),
+-                 atomic_read(&fscache_n_stores_nobufs),
+-                 atomic_read(&fscache_n_stores_oom));
++                 atomic_read_unchecked(&fscache_n_stores),
++                 atomic_read_unchecked(&fscache_n_stores_ok),
++                 atomic_read_unchecked(&fscache_n_stores_again),
++                 atomic_read_unchecked(&fscache_n_stores_nobufs),
++                 atomic_read_unchecked(&fscache_n_stores_oom));
+       seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+-                 atomic_read(&fscache_n_store_ops),
+-                 atomic_read(&fscache_n_store_calls),
+-                 atomic_read(&fscache_n_store_pages),
+-                 atomic_read(&fscache_n_store_radix_deletes),
+-                 atomic_read(&fscache_n_store_pages_over_limit));
++                 atomic_read_unchecked(&fscache_n_store_ops),
++                 atomic_read_unchecked(&fscache_n_store_calls),
++                 atomic_read_unchecked(&fscache_n_store_pages),
++                 atomic_read_unchecked(&fscache_n_store_radix_deletes),
++                 atomic_read_unchecked(&fscache_n_store_pages_over_limit));
+       seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
+-                 atomic_read(&fscache_n_store_vmscan_not_storing),
+-                 atomic_read(&fscache_n_store_vmscan_gone),
+-                 atomic_read(&fscache_n_store_vmscan_busy),
+-                 atomic_read(&fscache_n_store_vmscan_cancelled),
+-                 atomic_read(&fscache_n_store_vmscan_wait));
++                 atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_gone),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_busy),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
++                 atomic_read_unchecked(&fscache_n_store_vmscan_wait));
+       seq_printf(m, "Ops    : pend=%u run=%u enq=%u can=%u rej=%u\n",
+-                 atomic_read(&fscache_n_op_pend),
+-                 atomic_read(&fscache_n_op_run),
+-                 atomic_read(&fscache_n_op_enqueue),
+-                 atomic_read(&fscache_n_op_cancelled),
+-                 atomic_read(&fscache_n_op_rejected));
++                 atomic_read_unchecked(&fscache_n_op_pend),
++                 atomic_read_unchecked(&fscache_n_op_run),
++                 atomic_read_unchecked(&fscache_n_op_enqueue),
++                 atomic_read_unchecked(&fscache_n_op_cancelled),
++                 atomic_read_unchecked(&fscache_n_op_rejected));
+       seq_printf(m, "Ops    : ini=%u dfr=%u rel=%u gc=%u\n",
+-                 atomic_read(&fscache_n_op_initialised),
+-                 atomic_read(&fscache_n_op_deferred_release),
+-                 atomic_read(&fscache_n_op_release),
+-                 atomic_read(&fscache_n_op_gc));
++                 atomic_read_unchecked(&fscache_n_op_initialised),
++                 atomic_read_unchecked(&fscache_n_op_deferred_release),
++                 atomic_read_unchecked(&fscache_n_op_release),
++                 atomic_read_unchecked(&fscache_n_op_gc));
+       seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
+                  atomic_read(&fscache_n_cop_alloc_object),
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index c5b6b71..527e347 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -611,10 +611,12 @@ static int __init cuse_init(void)
+               INIT_LIST_HEAD(&cuse_conntbl[i]);
+       /* inherit and extend fuse_dev_operations */
+-      cuse_channel_fops               = fuse_dev_operations;
+-      cuse_channel_fops.owner         = THIS_MODULE;
+-      cuse_channel_fops.open          = cuse_channel_open;
+-      cuse_channel_fops.release       = cuse_channel_release;
++      pax_open_kernel();
++      memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
++      const_cast(cuse_channel_fops.owner)     = THIS_MODULE;
++      const_cast(cuse_channel_fops.open)      = cuse_channel_open;
++      const_cast(cuse_channel_fops.release)   = cuse_channel_release;
++      pax_close_kernel();
+       cuse_class = class_create(THIS_MODULE, "cuse");
+       if (IS_ERR(cuse_class))
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index a94d2ed..80c8060 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1366,7 +1366,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+       ret = 0;
+       pipe_lock(pipe);
+-      if (!pipe->readers) {
++      if (!atomic_read(&pipe->readers)) {
+               send_sig(SIGPIPE, current, 0);
+               if (!ret)
+                       ret = -EPIPE;
+@@ -1395,7 +1395,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+               page_nr++;
+               ret += buf->len;
+-              if (pipe->files)
++              if (atomic_read(&pipe->files))
+                       do_wakeup = 1;
+       }
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 3988b43..c02080c 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -838,9 +838,9 @@ struct fuse_fill_data {
+       unsigned nr_pages;
+ };
+-static int fuse_readpages_fill(void *_data, struct page *page)
++static int fuse_readpages_fill(struct file *_data, struct page *page)
+ {
+-      struct fuse_fill_data *data = _data;
++      struct fuse_fill_data *data = (struct fuse_fill_data *)_data;
+       struct fuse_req *req = data->req;
+       struct inode *inode = data->inode;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 4e05b51..36c4e1f 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -29,7 +29,7 @@ static struct kmem_cache *fuse_inode_cachep;
+ struct list_head fuse_conn_list;
+ DEFINE_MUTEX(fuse_mutex);
+-static int set_global_limit(const char *val, struct kernel_param *kp);
++static int set_global_limit(const char *val, const struct kernel_param *kp);
+ unsigned max_user_bgreq;
+ module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
+@@ -824,7 +824,7 @@ static void sanitize_global_limit(unsigned *limit)
+               *limit = (1 << 16) - 1;
+ }
+-static int set_global_limit(const char *val, struct kernel_param *kp)
++static int set_global_limit(const char *val, const struct kernel_param *kp)
+ {
+       int rv;
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index 82df368..0079887 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -511,7 +511,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+  *
+  */
+-static int __gfs2_readpage(void *file, struct page *page)
++static int __gfs2_readpage(struct file *file, struct page *page)
+ {
+       struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+       struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index 320e65e..7eb400d 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -776,7 +776,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
+ {
+       loff_t max = *len;
+       const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+-      unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
++      unsigned int tmp, max_data = max_blocks - 3 * sdp->sd_max_height + 3;
+       for (tmp = max_data; tmp > sdp->sd_diptrs;) {
+               tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 3a90b2b..7335643 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -324,9 +324,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
+       if (held1 != held2) {
+               GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
+               if (held2)
+-                      gl->gl_lockref.count++;
++                      __lockref_inc(&gl->gl_lockref);
+               else
+-                      gl->gl_lockref.count--;
++                      __lockref_dec(&gl->gl_lockref);
+       }
+       if (held1 && held2 && list_empty(&gl->gl_holders))
+               clear_bit(GLF_QUEUED, &gl->gl_flags);
+@@ -560,9 +560,9 @@ out:
+ out_sched:
+       clear_bit(GLF_LOCK, &gl->gl_flags);
+       smp_mb__after_atomic();
+-      gl->gl_lockref.count++;
++      __lockref_inc(&gl->gl_lockref);
+       if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+-              gl->gl_lockref.count--;
++              __lockref_dec(&gl->gl_lockref);
+       return;
+ out_unlock:
+@@ -690,7 +690,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+       gl->gl_node.next = NULL;
+       gl->gl_flags = 0;
+       gl->gl_name = name;
+-      gl->gl_lockref.count = 1;
++      __lockref_set(&gl->gl_lockref, 1);
+       gl->gl_state = LM_ST_UNLOCKED;
+       gl->gl_target = LM_ST_UNLOCKED;
+       gl->gl_demote_state = LM_ST_EXCLUSIVE;
+@@ -979,9 +979,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
+       if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
+                    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
+               set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+-              gl->gl_lockref.count++;
++              __lockref_inc(&gl->gl_lockref);
+               if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+-                      gl->gl_lockref.count--;
++                      __lockref_dec(&gl->gl_lockref);
+       }
+       run_queue(gl, 1);
+       spin_unlock(&gl->gl_lockref.lock);
+@@ -1286,7 +1286,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
+               }
+       }
+-      gl->gl_lockref.count++;
++      __lockref_inc(&gl->gl_lockref);
+       set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+       spin_unlock(&gl->gl_lockref.lock);
+@@ -1345,12 +1345,12 @@ add_back_to_lru:
+                       goto add_back_to_lru;
+               }
+               clear_bit(GLF_LRU, &gl->gl_flags);
+-              gl->gl_lockref.count++;
++              __lockref_inc(&gl->gl_lockref);
+               if (demote_ok(gl))
+                       handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+               WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
+               if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+-                      gl->gl_lockref.count--;
++                      __lockref_dec(&gl->gl_lockref);
+               spin_unlock(&gl->gl_lockref.lock);
+               cond_resched_lock(&lru_lock);
+       }
+@@ -1677,7 +1677,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
+                 state2str(gl->gl_demote_state), dtime,
+                 atomic_read(&gl->gl_ail_count),
+                 atomic_read(&gl->gl_revokes),
+-                (int)gl->gl_lockref.count, gl->gl_hold_time);
++                __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
+       list_for_each_entry(gh, &gl->gl_holders, gh_list)
+               dump_holder(seq, gh);
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 5db59d4..817f4eb 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -549,9 +549,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
+       if (gl->gl_demote_state == LM_ST_UNLOCKED &&
+           gl->gl_state == LM_ST_SHARED && ip) {
+-              gl->gl_lockref.count++;
++              __lockref_inc(&gl->gl_lockref);
+               if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+-                      gl->gl_lockref.count--;
++                      __lockref_dec(&gl->gl_lockref);
+       }
+ }
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 77930ca..684c04d 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
+       if (!spin_trylock(&qd->qd_lockref.lock))
+               return LRU_SKIP;
+-      if (qd->qd_lockref.count == 0) {
++      if (__lockref_read(&qd->qd_lockref) == 0) {
+               lockref_mark_dead(&qd->qd_lockref);
+               list_lru_isolate_move(lru, &qd->qd_lru, dispose);
+       }
+@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
+               return NULL;
+       qd->qd_sbd = sdp;
+-      qd->qd_lockref.count = 1;
++      __lockref_set(&qd->qd_lockref, 1);
+       spin_lock_init(&qd->qd_lockref.lock);
+       qd->qd_id = qid;
+       qd->qd_slot = -1;
+@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
+       if (lockref_put_or_lock(&qd->qd_lockref))
+               return;
+-      qd->qd_lockref.count = 0;
++      __lockref_set(&qd->qd_lockref, 0);
+       list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
+       spin_unlock(&qd->qd_lockref.lock);
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 4ea71eb..19effa7 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -174,6 +174,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       struct hstate *h = hstate_file(file);
++      unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+       struct vm_unmapped_area_info info;
+       if (len & ~huge_page_mask(h))
+@@ -187,17 +188,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               return addr;
+       }
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = ALIGN(addr, huge_page_size(h));
+               vma = find_vma(mm, addr);
+-              if (TASK_SIZE - len >= addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++              if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              info.low_limit += mm->delta_mmap;
++#endif
++
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+       info.align_offset = 0;
+@@ -1212,7 +1222,7 @@ static struct file_system_type hugetlbfs_fs_type = {
+       .kill_sb        = kill_litter_super,
+ };
+-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
++struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
+ static int can_do_hugetlb_shm(void)
+ {
+diff --git a/fs/inode.c b/fs/inode.c
+index 7e3ef3a..4e28e95 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -853,19 +853,19 @@ unsigned int get_next_ino(void)
+       unsigned int *p = &get_cpu_var(last_ino);
+       unsigned int res = *p;
++start:
++
+ #ifdef CONFIG_SMP
+       if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+-              static atomic_t shared_last_ino;
+-              int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
++              static atomic_unchecked_t shared_last_ino;
++              int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
+               res = next - LAST_INO_BATCH;
+       }
+ #endif
+-      res++;
+-      /* get_next_ino should not provide a 0 inode number */
+-      if (unlikely(!res))
+-              res++;
++      if (unlikely(!++res))
++              goto start;     /* never zero */
+       *p = res;
+       put_cpu_var(last_ino);
+       return res;
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 5bb565f..41cbee9 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -1077,7 +1077,7 @@ restart_loop:
+        */
+       stats.ts_tid = commit_transaction->t_tid;
+       stats.run.rs_handle_count =
+-              atomic_read(&commit_transaction->t_handle_count);
++              atomic_read_unchecked(&commit_transaction->t_handle_count);
+       trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
+                            commit_transaction->t_tid, &stats.run);
+       stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index e165266..0799fc5 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -91,7 +91,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
+       atomic_set(&transaction->t_updates, 0);
+       atomic_set(&transaction->t_outstanding_credits,
+                  atomic_read(&journal->j_reserved_credits));
+-      atomic_set(&transaction->t_handle_count, 0);
++      atomic_set_unchecked(&transaction->t_handle_count, 0);
+       INIT_LIST_HEAD(&transaction->t_inode_list);
+       INIT_LIST_HEAD(&transaction->t_private_list);
+@@ -378,7 +378,7 @@ repeat:
+       handle->h_requested_credits = blocks;
+       handle->h_start_jiffies = jiffies;
+       atomic_inc(&transaction->t_updates);
+-      atomic_inc(&transaction->t_handle_count);
++      atomic_inc_unchecked(&transaction->t_handle_count);
+       jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
+                 handle, blocks,
+                 atomic_read(&transaction->t_outstanding_credits),
+diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
+index 4a6cf28..d3a29d3 100644
+--- a/fs/jffs2/erase.c
++++ b/fs/jffs2/erase.c
+@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
+               struct jffs2_unknown_node marker = {
+                       .magic =        cpu_to_je16(JFFS2_MAGIC_BITMASK),
+                       .nodetype =     cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+-                      .totlen =       cpu_to_je32(c->cleanmarker_size)
++                      .totlen =       cpu_to_je32(c->cleanmarker_size),
++                      .hdr_crc =      cpu_to_je32(0)
+               };
+               jffs2_prealloc_raw_node_refs(c, jeb, 1);
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index 0e62dec..2beac6f 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -112,8 +112,9 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
+       return ret;
+ }
+-int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
++int jffs2_do_readpage_unlock(struct file *_inode, struct page *pg)
+ {
++      struct inode *inode = (struct inode *)_inode;
+       int ret = jffs2_do_readpage_nolock(inode, pg);
+       unlock_page(pg);
+       return ret;
+@@ -126,7 +127,7 @@ static int jffs2_readpage (struct file *filp, struct page *pg)
+       int ret;
+       mutex_lock(&f->sem);
+-      ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
++      ret = jffs2_do_readpage_unlock((struct file *)pg->mapping->host, pg);
+       mutex_unlock(&f->sem);
+       return ret;
+ }
+diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
+index ae2ebb2..39becae 100644
+--- a/fs/jffs2/fs.c
++++ b/fs/jffs2/fs.c
+@@ -686,7 +686,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
+       struct page *pg;
+       pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
+-                           (void *)jffs2_do_readpage_unlock, inode);
++                           jffs2_do_readpage_unlock, inode);
+       if (IS_ERR(pg))
+               return (void *)pg;
+diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
+index 824e61e..2d686a6 100644
+--- a/fs/jffs2/os-linux.h
++++ b/fs/jffs2/os-linux.h
+@@ -154,7 +154,7 @@ extern const struct file_operations jffs2_file_operations;
+ extern const struct inode_operations jffs2_file_inode_operations;
+ extern const struct address_space_operations jffs2_file_address_operations;
+ int jffs2_fsync(struct file *, loff_t, loff_t, int);
+-int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
++int jffs2_do_readpage_unlock (struct file *_inode, struct page *pg);
+ /* ioctl.c */
+ long jffs2_ioctl(struct file *, unsigned int, unsigned long);
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index b25d28a..7934a69 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
+ {
+       .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
+       .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+-      .totlen = constant_cpu_to_je32(8)
++      .totlen = constant_cpu_to_je32(8),
++      .hdr_crc = constant_cpu_to_je32(0)
+ };
+ /*
+diff --git a/fs/jfs/super.c b/fs/jfs/super.c
+index cec8814..daae32f 100644
+--- a/fs/jfs/super.c
++++ b/fs/jfs/super.c
+@@ -897,8 +897,10 @@ static int __init init_jfs_fs(void)
+       int rc;
+       jfs_inode_cachep =
+-          kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
++          kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info), 0,
+                           SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
++                          offsetof(struct jfs_inode_info, i_inline),
++                          sizeof(((struct jfs_inode_info *)0)->i_inline),
+                           init_once);
+       if (jfs_inode_cachep == NULL)
+               return -ENOMEM;
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index e57174d..573ed14 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -334,7 +334,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
+  *
+  *    Returns 31 bit hash of ns + name (so it fits in an off_t )
+  */
+-static unsigned int kernfs_name_hash(const char *name, const void *ns)
++static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
+ {
+       unsigned long hash = init_name_hash(ns);
+       unsigned int len = strlen(name);
+@@ -1074,6 +1074,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
+       ret = scops->mkdir(parent, dentry->d_name.name, mode);
+       kernfs_put_active(parent);
++
++      if (!ret) {
++              struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
++              ret = PTR_ERR_OR_ZERO(dentry_ret);
++      }
++
+       return ret;
+ }
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 2bcb86e..b9fad5d 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
+ struct kernfs_open_node {
+       atomic_t                refcnt;
+-      atomic_t                event;
++      atomic_unchecked_t      event;
+       wait_queue_head_t       poll;
+       struct list_head        files; /* goes through kernfs_open_file.list */
+ };
+@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
+ {
+       struct kernfs_open_file *of = sf->private;
+-      of->event = atomic_read(&of->kn->attr.open->event);
++      of->event = atomic_read_unchecked(&of->kn->attr.open->event);
+       return of->kn->attr.ops->seq_show(sf, v);
+ }
+@@ -208,7 +208,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
+               goto out_free;
+       }
+-      of->event = atomic_read(&of->kn->attr.open->event);
++      of->event = atomic_read_unchecked(&of->kn->attr.open->event);
+       ops = kernfs_ops(of->kn);
+       if (ops->read)
+               len = ops->read(of, buf, len, *ppos);
+@@ -275,7 +275,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
+ {
+       struct kernfs_open_file *of = kernfs_of(file);
+       const struct kernfs_ops *ops;
+-      size_t len;
++      ssize_t len;
+       char *buf;
+       if (of->atomic_write_len) {
+@@ -391,12 +391,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
+       return ret;
+ }
+-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
+-                           void *buf, int len, int write)
++static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
++                           void *buf, size_t len, int write)
+ {
+       struct file *file = vma->vm_file;
+       struct kernfs_open_file *of = kernfs_of(file);
+-      int ret;
++      ssize_t ret;
+       if (!of->vm_ops)
+               return -EINVAL;
+@@ -575,7 +575,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
+               return -ENOMEM;
+       atomic_set(&new_on->refcnt, 0);
+-      atomic_set(&new_on->event, 1);
++      atomic_set_unchecked(&new_on->event, 1);
+       init_waitqueue_head(&new_on->poll);
+       INIT_LIST_HEAD(&new_on->files);
+       goto retry;
+@@ -799,7 +799,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
+       kernfs_put_active(kn);
+-      if (of->event != atomic_read(&on->event))
++      if (of->event != atomic_read_unchecked(&on->event))
+               goto trigger;
+       return DEFAULT_POLLMASK;
+@@ -830,7 +830,7 @@ repeat:
+       on = kn->attr.open;
+       if (on) {
+-              atomic_inc(&on->event);
++              atomic_inc_unchecked(&on->event);
+               wake_up_interruptible(&on->poll);
+       }
+diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
+index d3e40db..a300f9c 100644
+--- a/fs/lockd/clnt4xdr.c
++++ b/fs/lockd/clnt4xdr.c
+@@ -379,10 +379,11 @@ static void encode_nlm4_lock(struct xdr_stream *xdr,
+  *            struct nlm4_lock alock;
+  *    };
+  */
+-static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
++static void nlm4_xdr_enc_testargs(void *req,
+                                 struct xdr_stream *xdr,
+-                                const struct nlm_args *args)
++                                void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -400,10 +401,11 @@ static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
+  *            int state;
+  *    };
+  */
+-static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
++static void nlm4_xdr_enc_lockargs(void *req,
+                                 struct xdr_stream *xdr,
+-                                const struct nlm_args *args)
++                                void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -422,10 +424,11 @@ static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
+  *            struct nlm4_lock alock;
+  *    };
+  */
+-static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
++static void nlm4_xdr_enc_cancargs(void *req,
+                                 struct xdr_stream *xdr,
+-                                const struct nlm_args *args)
++                                void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -440,10 +443,11 @@ static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
+  *            struct nlm4_lock alock;
+  *    };
+  */
+-static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
++static void nlm4_xdr_enc_unlockargs(void *req,
+                                   struct xdr_stream *xdr,
+-                                  const struct nlm_args *args)
++                                  void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -456,10 +460,12 @@ static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
+  *            nlm4_stat stat;
+  *    };
+  */
+-static void nlm4_xdr_enc_res(struct rpc_rqst *req,
++static void nlm4_xdr_enc_res(void *req,
+                            struct xdr_stream *xdr,
+-                           const struct nlm_res *result)
++                           void *_result)
+ {
++      const struct nlm_res *result = _result;
++
+       encode_cookie(xdr, &result->cookie);
+       encode_nlm4_stat(xdr, result->status);
+ }
+@@ -477,10 +483,12 @@ static void nlm4_xdr_enc_res(struct rpc_rqst *req,
+  *            nlm4_testrply test_stat;
+  *    };
+  */
+-static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
++static void nlm4_xdr_enc_testres(void *req,
+                                struct xdr_stream *xdr,
+-                               const struct nlm_res *result)
++                               void *_result)
+ {
++      const struct nlm_res *result = _result;
++
+       encode_cookie(xdr, &result->cookie);
+       encode_nlm4_stat(xdr, result->status);
+       if (result->status == nlm_lck_denied)
+@@ -523,10 +531,11 @@ out:
+       return error;
+ }
+-static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
++static int nlm4_xdr_dec_testres(void *req,
+                               struct xdr_stream *xdr,
+-                              struct nlm_res *result)
++                              void *_result)
+ {
++      struct nlm_res *result = _result;
+       int error;
+       error = decode_cookie(xdr, &result->cookie);
+@@ -543,10 +552,11 @@ out:
+  *            nlm4_stat stat;
+  *    };
+  */
+-static int nlm4_xdr_dec_res(struct rpc_rqst *req,
++static int nlm4_xdr_dec_res(void *req,
+                           struct xdr_stream *xdr,
+-                          struct nlm_res *result)
++                          void *_result)
+ {
++      struct nlm_res *result = _result;
+       int error;
+       error = decode_cookie(xdr, &result->cookie);
+@@ -566,8 +576,8 @@ out:
+ #define PROC(proc, argtype, restype)                                  \
+ [NLMPROC_##proc] = {                                                  \
+       .p_proc      = NLMPROC_##proc,                                  \
+-      .p_encode    = (kxdreproc_t)nlm4_xdr_enc_##argtype,             \
+-      .p_decode    = (kxdrdproc_t)nlm4_xdr_dec_##restype,             \
++      .p_encode    = nlm4_xdr_enc_##argtype,                          \
++      .p_decode    = nlm4_xdr_dec_##restype,                          \
+       .p_arglen    = NLM4_##argtype##_sz,                             \
+       .p_replen    = NLM4_##restype##_sz,                             \
+       .p_statidx   = NLMPROC_##proc,                                  \
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 1129520..356aeca 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
+ /*
+  * Cookie counter for NLM requests
+  */
+-static atomic_t       nlm_cookie = ATOMIC_INIT(0x1234);
++static atomic_unchecked_t     nlm_cookie = ATOMIC_INIT(0x1234);
+ void nlmclnt_next_cookie(struct nlm_cookie *c)
+ {
+-      u32     cookie = atomic_inc_return(&nlm_cookie);
++      u32     cookie = atomic_inc_return_unchecked(&nlm_cookie);
+       memcpy(c->data, &cookie, 4);
+       c->len=4;
+diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
+index 3e9f787..c2177b8 100644
+--- a/fs/lockd/clntxdr.c
++++ b/fs/lockd/clntxdr.c
+@@ -372,10 +372,11 @@ static void encode_nlm_lock(struct xdr_stream *xdr,
+  *            struct nlm_lock alock;
+  *    };
+  */
+-static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
++static void nlm_xdr_enc_testargs(void *req,
+                                struct xdr_stream *xdr,
+-                               const struct nlm_args *args)
++                               void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -393,10 +394,11 @@ static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
+  *            int state;
+  *    };
+  */
+-static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
++static void nlm_xdr_enc_lockargs(void *req,
+                                struct xdr_stream *xdr,
+-                               const struct nlm_args *args)
++                               void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -415,10 +417,11 @@ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
+  *            struct nlm_lock alock;
+  *    };
+  */
+-static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
++static void nlm_xdr_enc_cancargs(void *req,
+                                struct xdr_stream *xdr,
+-                               const struct nlm_args *args)
++                               void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -433,10 +436,11 @@ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
+  *            struct nlm_lock alock;
+  *    };
+  */
+-static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
++static void nlm_xdr_enc_unlockargs(void *req,
+                                  struct xdr_stream *xdr,
+-                                 const struct nlm_args *args)
++                                 void *_args)
+ {
++      const struct nlm_args *args = _args;
+       const struct nlm_lock *lock = &args->lock;
+       encode_cookie(xdr, &args->cookie);
+@@ -449,10 +453,11 @@ static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
+  *            nlm_stat stat;
+  *    };
+  */
+-static void nlm_xdr_enc_res(struct rpc_rqst *req,
++static void nlm_xdr_enc_res(void *req,
+                           struct xdr_stream *xdr,
+-                          const struct nlm_res *result)
++                          void *_result)
+ {
++      const struct nlm_res *result = _result;
+       encode_cookie(xdr, &result->cookie);
+       encode_nlm_stat(xdr, result->status);
+ }
+@@ -477,10 +482,11 @@ static void encode_nlm_testrply(struct xdr_stream *xdr,
+               encode_nlm_holder(xdr, result);
+ }
+-static void nlm_xdr_enc_testres(struct rpc_rqst *req,
++static void nlm_xdr_enc_testres(void *req,
+                               struct xdr_stream *xdr,
+-                              const struct nlm_res *result)
++                              void *_result)
+ {
++      const struct nlm_res *result = _result;
+       encode_cookie(xdr, &result->cookie);
+       encode_nlm_stat(xdr, result->status);
+       encode_nlm_testrply(xdr, result);
+@@ -521,11 +527,12 @@ out:
+       return error;
+ }
+-static int nlm_xdr_dec_testres(struct rpc_rqst *req,
++static int nlm_xdr_dec_testres(void *req,
+                              struct xdr_stream *xdr,
+-                             struct nlm_res *result)
++                             void *_result)
+ {
+       int error;
++      struct nlm_res *result = _result;
+       error = decode_cookie(xdr, &result->cookie);
+       if (unlikely(error))
+@@ -541,11 +548,12 @@ out:
+  *            nlm_stat stat;
+  *    };
+  */
+-static int nlm_xdr_dec_res(struct rpc_rqst *req,
++static int nlm_xdr_dec_res(void *req,
+                          struct xdr_stream *xdr,
+-                         struct nlm_res *result)
++                         void *_result)
+ {
+       int error;
++      struct nlm_res *result = _result;
+       error = decode_cookie(xdr, &result->cookie);
+       if (unlikely(error))
+@@ -564,8 +572,8 @@ out:
+ #define PROC(proc, argtype, restype)  \
+ [NLMPROC_##proc] = {                                                  \
+       .p_proc      = NLMPROC_##proc,                                  \
+-      .p_encode    = (kxdreproc_t)nlm_xdr_enc_##argtype,              \
+-      .p_decode    = (kxdrdproc_t)nlm_xdr_dec_##restype,              \
++      .p_encode    = nlm_xdr_enc_##argtype,                           \
++      .p_decode    = nlm_xdr_dec_##restype,                           \
+       .p_arglen    = NLM_##argtype##_sz,                              \
+       .p_replen    = NLM_##restype##_sz,                              \
+       .p_statidx   = NLMPROC_##proc,                                  \
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 19166d4..c841d52 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -475,23 +475,22 @@ static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
+       xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
+ }
+-static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                          const struct nsm_args *argp)
++static void nsm_xdr_enc_mon(void *req, struct xdr_stream *xdr, void *argp)
+ {
+       encode_mon_id(xdr, argp);
+       encode_priv(xdr, argp);
+ }
+-static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                            const struct nsm_args *argp)
++static void nsm_xdr_enc_unmon(void *req, struct xdr_stream *xdr, void *argp)
+ {
+       encode_mon_id(xdr, argp);
+ }
+-static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
++static int nsm_xdr_dec_stat_res(void *rqstp,
+                               struct xdr_stream *xdr,
+-                              struct nsm_res *resp)
++                              void *_resp)
+ {
++      struct nsm_res *resp = _resp;
+       __be32 *p;
+       p = xdr_inline_decode(xdr, 4 + 4);
+@@ -505,10 +504,11 @@ static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
+       return 0;
+ }
+-static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
++static int nsm_xdr_dec_stat(void *rqstp,
+                           struct xdr_stream *xdr,
+-                          struct nsm_res *resp)
++                          void *_resp)
+ {
++      struct nsm_res *resp = _resp;
+       __be32 *p;
+       p = xdr_inline_decode(xdr, 4);
+@@ -532,8 +532,8 @@ static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
+ static struct rpc_procinfo    nsm_procedures[] = {
+ [NSMPROC_MON] = {
+               .p_proc         = NSMPROC_MON,
+-              .p_encode       = (kxdreproc_t)nsm_xdr_enc_mon,
+-              .p_decode       = (kxdrdproc_t)nsm_xdr_dec_stat_res,
++              .p_encode       = nsm_xdr_enc_mon,
++              .p_decode       = nsm_xdr_dec_stat_res,
+               .p_arglen       = SM_mon_sz,
+               .p_replen       = SM_monres_sz,
+               .p_statidx      = NSMPROC_MON,
+@@ -541,8 +541,8 @@ static struct rpc_procinfo nsm_procedures[] = {
+       },
+ [NSMPROC_UNMON] = {
+               .p_proc         = NSMPROC_UNMON,
+-              .p_encode       = (kxdreproc_t)nsm_xdr_enc_unmon,
+-              .p_decode       = (kxdrdproc_t)nsm_xdr_dec_stat,
++              .p_encode       = nsm_xdr_enc_unmon,
++              .p_decode       = nsm_xdr_dec_stat,
+               .p_arglen       = SM_mon_id_sz,
+               .p_replen       = SM_unmonres_sz,
+               .p_statidx      = NSMPROC_UNMON,
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index fc4084e..25d725d 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -598,7 +598,7 @@ static struct ctl_table nlm_sysctl_root[] = {
+  */
+ #define param_set_min_max(name, type, which_strtol, min, max)         \
+-static int param_set_##name(const char *val, struct kernel_param *kp) \
++static int param_set_##name(const char *val, const struct kernel_param *kp)\
+ {                                                                     \
+       char *endp;                                                     \
+       __typeof__(type) num = which_strtol(val, &endp, 0);             \
+diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
+index 09c576f..89b4d3d 100644
+--- a/fs/lockd/svc4proc.c
++++ b/fs/lockd/svc4proc.c
+@@ -72,9 +72,10 @@ nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+  * TEST: Check for conflicting lock
+  */
+ static __be32
+-nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                       struct nlm_res  *resp)
++nlm4svc_proc_test(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res  *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+       __be32 rc = rpc_success;
+@@ -99,9 +100,10 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+ }
+ static __be32
+-nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                       struct nlm_res  *resp)
++nlm4svc_proc_lock(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res  *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+       __be32 rc = rpc_success;
+@@ -141,9 +143,10 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+ }
+ static __be32
+-nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                         struct nlm_res  *resp)
++nlm4svc_proc_cancel(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res  *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+@@ -174,9 +177,10 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * UNLOCK: release a lock
+  */
+ static __be32
+-nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                         struct nlm_res  *resp)
++nlm4svc_proc_unlock(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res  *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+@@ -208,9 +212,11 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * was granted
+  */
+ static __be32
+-nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                          struct nlm_res  *resp)
++nlm4svc_proc_granted(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res  *resp = _resp;
++
+       resp->cookie = argp->cookie;
+       dprintk("lockd: GRANTED       called\n");
+@@ -244,7 +250,7 @@ static const struct rpc_call_ops nlm4svc_callback_ops = {
+  * doesn't break any clients.
+  */
+ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp,
+-              __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res  *))
++              __be32 (*func)(struct svc_rqst *, void *, void *))
+ {
+       struct nlm_host *host;
+       struct nlm_rqst *call;
+@@ -273,35 +279,35 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
+       return rpc_success;
+ }
+-static __be32 nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
++static __be32 nlm4svc_proc_test_msg(struct svc_rqst *rqstp, void *argp,
+                                            void            *resp)
+ {
+       dprintk("lockd: TEST_MSG      called\n");
+       return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, argp, nlm4svc_proc_test);
+ }
+-static __be32 nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
++static __be32 nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, void *argp,
+                                            void            *resp)
+ {
+       dprintk("lockd: LOCK_MSG      called\n");
+       return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlm4svc_proc_lock);
+ }
+-static __be32 nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
++static __be32 nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, void *argp,
+                                              void            *resp)
+ {
+       dprintk("lockd: CANCEL_MSG    called\n");
+       return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlm4svc_proc_cancel);
+ }
+-static __be32 nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
++static __be32 nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, void *argp,
+                                                void            *resp)
+ {
+       dprintk("lockd: UNLOCK_MSG    called\n");
+       return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlm4svc_proc_unlock);
+ }
+-static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
++static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, void *argp,
+                                                 void            *resp)
+ {
+       dprintk("lockd: GRANTED_MSG   called\n");
+@@ -312,9 +318,10 @@ static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *
+  * SHARE: create a DOS share or alter existing share.
+  */
+ static __be32
+-nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                        struct nlm_res  *resp)
++nlm4svc_proc_share(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res  *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+@@ -345,9 +352,10 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * UNSHARE: Release a DOS share.
+  */
+ static __be32
+-nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                          struct nlm_res  *resp)
++nlm4svc_proc_unshare(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res  *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+@@ -378,9 +386,10 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * NM_LOCK: Create an unmonitored lock
+  */
+ static __be32
+-nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                          struct nlm_res  *resp)
++nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, void *_argp, void *resp)
+ {
++      struct nlm_args *argp = _argp;
++
+       dprintk("lockd: NM_LOCK       called\n");
+       argp->monitor = 0;              /* just clean the monitor flag */
+@@ -391,8 +400,7 @@ nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * FREE_ALL: Release all locks and shares held by client
+  */
+ static __be32
+-nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                           void            *resp)
++nlm4svc_proc_free_all(struct svc_rqst *rqstp, void *argp, void *resp)
+ {
+       struct nlm_host *host;
+@@ -409,7 +417,7 @@ nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * SM_NOTIFY: private callback from statd (not part of official NLM proto)
+  */
+ static __be32
+-nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
++nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, void *argp,
+                                             void              *resp)
+ {
+       dprintk("lockd: SM_NOTIFY     called\n");
+@@ -429,9 +437,10 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+  * client sent a GRANTED_RES, let's remove the associated block
+  */
+ static __be32
+-nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+-                                                void            *resp)
++nlm4svc_proc_granted_res(struct svc_rqst *rqstp, void *_argp, void *resp)
+ {
++      struct nlm_res *argp = _argp;
++
+         if (!nlmsvc_ops)
+                 return rpc_success;
+@@ -463,9 +472,9 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+ struct nlm_void                       { int dummy; };
+ #define PROC(name, xargt, xrest, argt, rest, respsize)        \
+- { .pc_func   = (svc_procfunc) nlm4svc_proc_##name,   \
+-   .pc_decode = (kxdrproc_t) nlm4svc_decode_##xargt,  \
+-   .pc_encode = (kxdrproc_t) nlm4svc_encode_##xrest,  \
++ { .pc_func   = nlm4svc_proc_##name,                  \
++   .pc_decode = nlm4svc_decode_##xargt,               \
++   .pc_encode = nlm4svc_encode_##xrest,               \
+    .pc_release        = NULL,                                 \
+    .pc_argsize        = sizeof(struct nlm_##argt),            \
+    .pc_ressize        = sizeof(struct nlm_##rest),            \
+diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
+index fb26b9f..a6d5582 100644
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -102,9 +102,10 @@ nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+  * TEST: Check for conflicting lock
+  */
+ static __be32
+-nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                       struct nlm_res  *resp)
++nlmsvc_proc_test(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+       __be32 rc = rpc_success;
+@@ -130,9 +131,10 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
+ }
+ static __be32
+-nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                       struct nlm_res  *resp)
++nlmsvc_proc_lock(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+       __be32 rc = rpc_success;
+@@ -172,9 +174,10 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+ }
+ static __be32
+-nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                         struct nlm_res  *resp)
++nlmsvc_proc_cancel(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+       struct net *net = SVC_NET(rqstp);
+@@ -206,9 +209,10 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * UNLOCK: release a lock
+  */
+ static __be32
+-nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                         struct nlm_res  *resp)
++nlmsvc_proc_unlock(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+       struct net *net = SVC_NET(rqstp);
+@@ -241,9 +245,11 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * was granted
+  */
+ static __be32
+-nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                          struct nlm_res  *resp)
++nlmsvc_proc_granted(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res *resp = _resp;
++
+       resp->cookie = argp->cookie;
+       dprintk("lockd: GRANTED       called\n");
+@@ -285,7 +291,7 @@ static const struct rpc_call_ops nlmsvc_callback_ops = {
+  * doesn't break any clients.
+  */
+ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp,
+-              __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res  *))
++              __be32 (*func)(struct svc_rqst *, void *, void *))
+ {
+       struct nlm_host *host;
+       struct nlm_rqst *call;
+@@ -314,38 +320,33 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
+       return rpc_success;
+ }
+-static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                           void            *resp)
++static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp,void *argp, void *resp)
+ {
+       dprintk("lockd: TEST_MSG      called\n");
+       return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test);
+ }
+-static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                           void            *resp)
++static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, void *argp, void *resp)
+ {
+       dprintk("lockd: LOCK_MSG      called\n");
+       return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock);
+ }
+-static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                             void            *resp)
++static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, void *argp, void *resp)
+ {
+       dprintk("lockd: CANCEL_MSG    called\n");
+       return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel);
+ }
+ static __be32
+-nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                               void            *resp)
++nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, void *argp, void *resp)
+ {
+       dprintk("lockd: UNLOCK_MSG    called\n");
+       return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock);
+ }
+ static __be32
+-nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                                void            *resp)
++nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, void *argp, void *resp)
+ {
+       dprintk("lockd: GRANTED_MSG   called\n");
+       return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted);
+@@ -355,9 +356,10 @@ nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * SHARE: create a DOS share or alter existing share.
+  */
+ static __be32
+-nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                        struct nlm_res  *resp)
++nlmsvc_proc_share(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+@@ -388,9 +390,10 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * UNSHARE: Release a DOS share.
+  */
+ static __be32
+-nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                          struct nlm_res  *resp)
++nlmsvc_proc_unshare(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nlm_args *argp = _argp;
++      struct nlm_res *resp = _resp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+@@ -421,9 +424,10 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * NM_LOCK: Create an unmonitored lock
+  */
+ static __be32
+-nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                          struct nlm_res  *resp)
++nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, void *_argp, void *resp)
+ {
++      struct nlm_args *argp = _argp;
++
+       dprintk("lockd: NM_LOCK       called\n");
+       argp->monitor = 0;              /* just clean the monitor flag */
+@@ -434,8 +438,7 @@ nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * FREE_ALL: Release all locks and shares held by client
+  */
+ static __be32
+-nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+-                                           void            *resp)
++nlmsvc_proc_free_all(struct svc_rqst *rqstp, void *argp, void *resp)
+ {
+       struct nlm_host *host;
+@@ -452,8 +455,7 @@ nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
+  * SM_NOTIFY: private callback from statd (not part of official NLM proto)
+  */
+ static __be32
+-nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+-                                            void              *resp)
++nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, void *argp, void *resp)
+ {
+       dprintk("lockd: SM_NOTIFY     called\n");
+@@ -472,9 +474,10 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+  * client sent a GRANTED_RES, let's remove the associated block
+  */
+ static __be32
+-nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+-                                                void            *resp)
++nlmsvc_proc_granted_res(struct svc_rqst *rqstp, void *_argp, void *resp)
+ {
++      struct nlm_res *argp = _argp;
++
+       if (!nlmsvc_ops)
+               return rpc_success;
+@@ -505,9 +508,9 @@ nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res  *argp,
+ struct nlm_void                       { int dummy; };
+ #define PROC(name, xargt, xrest, argt, rest, respsize)        \
+- { .pc_func   = (svc_procfunc) nlmsvc_proc_##name,    \
+-   .pc_decode = (kxdrproc_t) nlmsvc_decode_##xargt,   \
+-   .pc_encode = (kxdrproc_t) nlmsvc_encode_##xrest,   \
++ { .pc_func   = nlmsvc_proc_##name,                   \
++   .pc_decode = nlmsvc_decode_##xargt,                \
++   .pc_encode = nlmsvc_encode_##xrest,                \
+    .pc_release        = NULL,                                 \
+    .pc_argsize        = sizeof(struct nlm_##argt),            \
+    .pc_ressize        = sizeof(struct nlm_##rest),            \
+diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
+index 5b651da..cfe0944 100644
+--- a/fs/lockd/xdr.c
++++ b/fs/lockd/xdr.c
+@@ -182,8 +182,9 @@ nlm_encode_testres(__be32 *p, struct nlm_res *resp)
+  * First, the server side XDR functions
+  */
+ int
+-nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlmsvc_decode_testargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
+       u32     exclusive;
+       if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+@@ -199,16 +200,19 @@ nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlmsvc_encode_testres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nlm_res *resp = _resp;
++
+       if (!(p = nlm_encode_testres(p, resp)))
+               return 0;
+       return xdr_ressize_check(rqstp, p);
+ }
+ int
+-nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlmsvc_decode_lockargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
+       u32     exclusive;
+       if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+@@ -227,8 +231,9 @@ nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlmsvc_decode_cancargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
+       u32     exclusive;
+       if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+@@ -243,8 +248,10 @@ nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlmsvc_decode_unlockargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
++
+       if (!(p = nlm_decode_cookie(p, &argp->cookie))
+        || !(p = nlm_decode_lock(p, &argp->lock)))
+               return 0;
+@@ -253,8 +260,10 @@ nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlmsvc_decode_shareargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
++
+       struct nlm_lock *lock = &argp->lock;
+       memset(lock, 0, sizeof(*lock));
+@@ -274,8 +283,10 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlmsvc_encode_shareres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nlm_res *resp = _resp;
++
+       if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+               return 0;
+       *p++ = resp->status;
+@@ -284,8 +295,10 @@ nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+ }
+ int
+-nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlmsvc_encode_res(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nlm_res *resp = _resp;
++
+       if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+               return 0;
+       *p++ = resp->status;
+@@ -293,8 +306,9 @@ nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+ }
+ int
+-nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
++nlmsvc_decode_notify(void *rqstp, __be32 *p, void *_argp)
+ {
++      struct nlm_args *argp = _argp;
+       struct nlm_lock *lock = &argp->lock;
+       if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+@@ -305,8 +319,10 @@ nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
+ }
+ int
+-nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
++nlmsvc_decode_reboot(void *rqstp, __be32 *p, void *_argp)
+ {
++      struct nlm_reboot *argp = _argp;
++
+       if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+               return 0;
+       argp->state = ntohl(*p++);
+@@ -316,8 +332,10 @@ nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
+ }
+ int
+-nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlmsvc_decode_res(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nlm_res *resp = _resp;
++
+       if (!(p = nlm_decode_cookie(p, &resp->cookie)))
+               return 0;
+       resp->status = *p++;
+@@ -325,13 +343,13 @@ nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+ }
+ int
+-nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nlmsvc_decode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_argsize_check(rqstp, p);
+ }
+ int
+-nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nlmsvc_encode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_ressize_check(rqstp, p);
+ }
+diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
+index dfa4789..be443bd 100644
+--- a/fs/lockd/xdr4.c
++++ b/fs/lockd/xdr4.c
+@@ -179,8 +179,9 @@ nlm4_encode_testres(__be32 *p, struct nlm_res *resp)
+  * First, the server side XDR functions
+  */
+ int
+-nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlm4svc_decode_testargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
+       u32     exclusive;
+       if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+@@ -196,7 +197,7 @@ nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlm4svc_encode_testres(void *rqstp, __be32 *p, void *resp)
+ {
+       if (!(p = nlm4_encode_testres(p, resp)))
+               return 0;
+@@ -204,8 +205,9 @@ nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+ }
+ int
+-nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlm4svc_decode_lockargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
+       u32     exclusive;
+       if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+@@ -224,8 +226,9 @@ nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlm4svc_decode_cancargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
+       u32     exclusive;
+       if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+@@ -240,8 +243,10 @@ nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlm4svc_decode_unlockargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
++
+       if (!(p = nlm4_decode_cookie(p, &argp->cookie))
+        || !(p = nlm4_decode_lock(p, &argp->lock)))
+               return 0;
+@@ -250,8 +255,9 @@ nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
++nlm4svc_decode_shareargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      nlm_args *argp = _argp;
+       struct nlm_lock *lock = &argp->lock;
+       memset(lock, 0, sizeof(*lock));
+@@ -271,8 +277,10 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
+ }
+ int
+-nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlm4svc_encode_shareres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nlm_res *resp = _resp;
++
+       if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+               return 0;
+       *p++ = resp->status;
+@@ -281,8 +289,10 @@ nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+ }
+ int
+-nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlm4svc_encode_res(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nlm_res *resp = _resp;
++
+       if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+               return 0;
+       *p++ = resp->status;
+@@ -290,8 +300,9 @@ nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+ }
+ int
+-nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
++nlm4svc_decode_notify(void *rqstp, __be32 *p, void *_argp)
+ {
++      struct nlm_args *argp = _argp;
+       struct nlm_lock *lock = &argp->lock;
+       if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+@@ -302,8 +313,10 @@ nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
+ }
+ int
+-nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
++nlm4svc_decode_reboot(void *rqstp, __be32 *p, void *_argp)
+ {
++      struct nlm_reboot *argp = _argp;
++
+       if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+               return 0;
+       argp->state = ntohl(*p++);
+@@ -313,8 +326,10 @@ nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp
+ }
+ int
+-nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
++nlm4svc_decode_res(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nlm_res *resp = _resp;
++
+       if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
+               return 0;
+       resp->status = *p++;
+@@ -322,13 +337,13 @@ nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
+ }
+ int
+-nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nlm4svc_decode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_argsize_check(rqstp, p);
+ }
+ int
+-nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nlm4svc_encode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_ressize_check(rqstp, p);
+ }
+diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
+index a8329cc..b3d18fb 100644
+--- a/fs/logfs/dev_bdev.c
++++ b/fs/logfs/dev_bdev.c
+@@ -34,9 +34,8 @@ static int sync_request(struct page *page, struct block_device *bdev, int op)
+       return submit_bio_wait(&bio);
+ }
+-static int bdev_readpage(void *_sb, struct page *page)
++static int bdev_readpage(struct super_block *sb, struct page *page)
+ {
+-      struct super_block *sb = _sb;
+       struct block_device *bdev = logfs_super(sb)->s_bdev;
+       int err;
+@@ -52,6 +51,11 @@ static int bdev_readpage(void *_sb, struct page *page)
+       return err;
+ }
++static int bdev_filler(struct file *file, struct page *page)
++{
++      return bdev_readpage((struct super_block *)file, page);
++}
++
+ static DECLARE_WAIT_QUEUE_HEAD(wq);
+ static void writeseg_end_io(struct bio *bio)
+@@ -251,7 +255,7 @@ static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
+ {
+       struct logfs_super *super = logfs_super(sb);
+       struct address_space *mapping = super->s_mapping_inode->i_mapping;
+-      filler_t *filler = bdev_readpage;
++      filler_t *filler = bdev_filler;
+       *ofs = 0;
+       return read_cache_page(mapping, 0, filler, sb);
+@@ -261,7 +265,7 @@ static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
+ {
+       struct logfs_super *super = logfs_super(sb);
+       struct address_space *mapping = super->s_mapping_inode->i_mapping;
+-      filler_t *filler = bdev_readpage;
++      filler_t *filler = bdev_filler;
+       u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
+       pgoff_t index = pos >> PAGE_SHIFT;
+@@ -292,6 +296,7 @@ static const struct logfs_device_ops bd_devops = {
+       .find_last_sb   = bdev_find_last_sb,
+       .write_sb       = bdev_write_sb,
+       .readpage       = bdev_readpage,
++      .filler         = bdev_filler,
+       .writeseg       = bdev_writeseg,
+       .erase          = bdev_erase,
+       .can_write_buf  = bdev_can_write_buf,
+diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
+index b76a62b..317c6ff 100644
+--- a/fs/logfs/dev_mtd.c
++++ b/fs/logfs/dev_mtd.c
+@@ -122,9 +122,8 @@ static void logfs_mtd_sync(struct super_block *sb)
+       mtd_sync(mtd);
+ }
+-static int logfs_mtd_readpage(void *_sb, struct page *page)
++static int logfs_mtd_readpage(struct super_block *sb, struct page *page)
+ {
+-      struct super_block *sb = _sb;
+       int err;
+       err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
+@@ -145,11 +144,16 @@ static int logfs_mtd_readpage(void *_sb, struct page *page)
+       return err;
+ }
++static int logfs_mtd_filler(struct file *file, struct page *page)
++{
++      return logfs_mtd_readpage((struct super_block *)file, page);
++}
++
+ static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
+ {
+       struct logfs_super *super = logfs_super(sb);
+       struct address_space *mapping = super->s_mapping_inode->i_mapping;
+-      filler_t *filler = logfs_mtd_readpage;
++      filler_t *filler = logfs_mtd_filler;
+       struct mtd_info *mtd = super->s_mtd;
+       *ofs = 0;
+@@ -166,7 +170,7 @@ static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
+ {
+       struct logfs_super *super = logfs_super(sb);
+       struct address_space *mapping = super->s_mapping_inode->i_mapping;
+-      filler_t *filler = logfs_mtd_readpage;
++      filler_t *filler = logfs_mtd_filler;
+       struct mtd_info *mtd = super->s_mtd;
+       *ofs = mtd->size - mtd->erasesize;
+@@ -254,6 +258,7 @@ static const struct logfs_device_ops mtd_devops = {
+       .find_first_sb  = logfs_mtd_find_first_sb,
+       .find_last_sb   = logfs_mtd_find_last_sb,
+       .readpage       = logfs_mtd_readpage,
++      .filler         = logfs_mtd_filler,
+       .writeseg       = logfs_mtd_writeseg,
+       .erase          = logfs_mtd_erase,
+       .can_write_buf  = logfs_mtd_can_write_buf,
+diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
+index 9568064..e188a46 100644
+--- a/fs/logfs/dir.c
++++ b/fs/logfs/dir.c
+@@ -174,7 +174,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
+               if (!logfs_exist_block(dir, index))
+                       continue;
+               page = read_cache_page(dir->i_mapping, index,
+-                              (filler_t *)logfs_readpage, NULL);
++                              logfs_readpage, NULL);
+               if (IS_ERR(page))
+                       return page;
+               dd = kmap_atomic(page);
+@@ -306,7 +306,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
+                       continue;
+               }
+               page = read_cache_page(dir->i_mapping, pos,
+-                              (filler_t *)logfs_readpage, NULL);
++                              logfs_readpage, NULL);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+               dd = kmap(page);
+diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
+index 27d040e..8959149 100644
+--- a/fs/logfs/logfs.h
++++ b/fs/logfs/logfs.h
+@@ -151,7 +151,8 @@ struct logfs_device_ops {
+       struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs);
+       struct page *(*find_last_sb)(struct super_block *sb, u64 *ofs);
+       int (*write_sb)(struct super_block *sb, struct page *page);
+-      int (*readpage)(void *_sb, struct page *page);
++      int (*readpage)(struct super_block *sb, struct page *page);
++      int (*filler)(struct file *file, struct page *page);
+       void (*writeseg)(struct super_block *sb, u64 ofs, size_t len);
+       int (*erase)(struct super_block *sb, loff_t ofs, size_t len,
+                       int ensure_write);
+@@ -617,8 +618,6 @@ static inline int logfs_buf_recover(struct logfs_area *area, u64 ofs,
+ }
+ /* super.c */
+-struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index);
+-void emergency_read_end(struct page *page);
+ void logfs_crash_dump(struct super_block *sb);
+ int logfs_statfs(struct dentry *dentry, struct kstatfs *stats);
+ int logfs_check_ds(struct logfs_disk_super *ds);
+diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
+index 3fb8c6d..83a5133 100644
+--- a/fs/logfs/readwrite.c
++++ b/fs/logfs/readwrite.c
+@@ -1963,7 +1963,7 @@ int logfs_read_inode(struct inode *inode)
+               return -ENODATA;
+       page = read_cache_page(master_inode->i_mapping, ino,
+-                      (filler_t *)logfs_readpage, NULL);
++                      logfs_readpage, NULL);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
+index 1efd605..d712407b 100644
+--- a/fs/logfs/segment.c
++++ b/fs/logfs/segment.c
+@@ -54,7 +54,7 @@ static struct page *get_mapping_page(struct super_block *sb, pgoff_t index,
+ {
+       struct logfs_super *super = logfs_super(sb);
+       struct address_space *mapping = super->s_mapping_inode->i_mapping;
+-      filler_t *filler = super->s_devops->readpage;
++      filler_t *filler = super->s_devops->filler;
+       struct page *page;
+       BUG_ON(mapping_gfp_constraint(mapping, __GFP_FS));
+diff --git a/fs/logfs/super.c b/fs/logfs/super.c
+index 5751082..7619dac 100644
+--- a/fs/logfs/super.c
++++ b/fs/logfs/super.c
+@@ -18,39 +18,6 @@
+ #include <linux/statfs.h>
+ #include <linux/buffer_head.h>
+-static DEFINE_MUTEX(emergency_mutex);
+-static struct page *emergency_page;
+-
+-struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index)
+-{
+-      filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+-      struct page *page;
+-      int err;
+-
+-      page = read_cache_page(mapping, index, filler, NULL);
+-      if (page)
+-              return page;
+-
+-      /* No more pages available, switch to emergency page */
+-      printk(KERN_INFO"Logfs: Using emergency page\n");
+-      mutex_lock(&emergency_mutex);
+-      err = filler(NULL, emergency_page);
+-      if (err) {
+-              mutex_unlock(&emergency_mutex);
+-              printk(KERN_EMERG"Logfs: Error reading emergency page\n");
+-              return ERR_PTR(err);
+-      }
+-      return emergency_page;
+-}
+-
+-void emergency_read_end(struct page *page)
+-{
+-      if (page == emergency_page)
+-              mutex_unlock(&emergency_mutex);
+-      else
+-              put_page(page);
+-}
+-
+ static void dump_segfile(struct super_block *sb)
+ {
+       struct logfs_super *super = logfs_super(sb);
+@@ -614,10 +581,6 @@ static int __init logfs_init(void)
+ {
+       int ret;
+-      emergency_page = alloc_pages(GFP_KERNEL, 0);
+-      if (!emergency_page)
+-              return -ENOMEM;
+-
+       ret = logfs_compr_init();
+       if (ret)
+               goto out1;
+@@ -633,7 +596,6 @@ static int __init logfs_init(void)
+ out2:
+       logfs_compr_exit();
+ out1:
+-      __free_pages(emergency_page, 0);
+       return ret;
+ }
+@@ -642,7 +604,6 @@ static void __exit logfs_exit(void)
+       unregister_filesystem(&logfs_fs_type);
+       logfs_destroy_inode_cache();
+       logfs_compr_exit();
+-      __free_pages(emergency_page, 0);
+ }
+ module_init(logfs_init);
+diff --git a/fs/mount.h b/fs/mount.h
+index 14db05d..687f6d8 100644
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -13,7 +13,7 @@ struct mnt_namespace {
+       u64                     seq;    /* Sequence number to prevent loops */
+       wait_queue_head_t poll;
+       u64 event;
+-};
++} __randomize_layout;
+ struct mnt_pcp {
+       int mnt_count;
+@@ -65,7 +65,7 @@ struct mount {
+       struct hlist_head mnt_pins;
+       struct fs_pin mnt_umount;
+       struct dentry *mnt_ex_mountpoint;
+-};
++} __randomize_layout;
+ #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
+diff --git a/fs/namei.c b/fs/namei.c
+index adb0414..82da447 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -338,17 +338,32 @@ int generic_permission(struct inode *inode, int mask)
+       if (ret != -EACCES)
+               return ret;
++#ifdef CONFIG_GRKERNSEC
++      /* we'll block if we have to log due to a denied capability use */
++      if (mask & MAY_NOT_BLOCK)
++              return -ECHILD;
++#endif
++
+       if (S_ISDIR(inode->i_mode)) {
+               /* DACs are overridable for directories */
+-              if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+-                      return 0;
+               if (!(mask & MAY_WRITE))
+-                      if (capable_wrt_inode_uidgid(inode,
+-                                                   CAP_DAC_READ_SEARCH))
++                      if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
++                          capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
+                               return 0;
++              if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
++                      return 0;
+               return -EACCES;
+       }
+       /*
++       * Searching includes executable on directories, else just read.
++       */
++      mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
++      if (mask == MAY_READ)
++              if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
++                  capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
++                      return 0;
++
++      /*
+        * Read/write DACs are always overridable.
+        * Executable DACs are overridable when there is
+        * at least one exec bit set.
+@@ -357,14 +372,6 @@ int generic_permission(struct inode *inode, int mask)
+               if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+                       return 0;
+-      /*
+-       * Searching includes executable on directories, else just read.
+-       */
+-      mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+-      if (mask == MAY_READ)
+-              if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
+-                      return 0;
+-
+       return -EACCES;
+ }
+ EXPORT_SYMBOL(generic_permission);
+@@ -524,12 +531,35 @@ struct nameidata {
+       struct inode    *link_inode;
+       unsigned        root_seq;
+       int             dfd;
+-};
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      struct path     *symlinkown_stack;
++      struct path     symlinkown_internal[EMBEDDED_LEVELS];
++      unsigned        symlinkown_depth;
++      int             symlinkown_enabled;
++#endif
++} __randomize_layout;
++
++static int gr_handle_nameidata_symlinkowner(const struct nameidata *nd, const struct inode *target)
++{
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      int i;
++
++      for (i = 0; i < nd->symlinkown_depth; i++) {
++              if (gr_handle_symlink_owner(&nd->symlinkown_stack[i], target))
++                      return -EACCES;
++      }
++#endif
++      return 0;
++}
+ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
+ {
+       struct nameidata *old = current->nameidata;
+       p->stack = p->internal;
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      p->symlinkown_stack = p->symlinkown_internal;
++      p->symlinkown_enabled = -1;
++#endif
+       p->dfd = dfd;
+       p->name = name;
+       p->total_link_count = old ? old->total_link_count : 0;
+@@ -546,6 +576,10 @@ static void restore_nameidata(void)
+               old->total_link_count = now->total_link_count;
+       if (now->stack != now->internal)
+               kfree(now->stack);
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      if (now->symlinkown_stack != now->symlinkown_internal)
++              kfree(now->symlinkown_stack);
++#endif
+ }
+ static int __nd_alloc_stack(struct nameidata *nd)
+@@ -565,6 +599,7 @@ static int __nd_alloc_stack(struct nameidata *nd)
+       }
+       memcpy(p, nd->internal, sizeof(nd->internal));
+       nd->stack = p;
++
+       return 0;
+ }
+@@ -586,8 +621,32 @@ static bool path_connected(const struct path *path)
+       return is_subdir(path->dentry, mnt->mnt_root);
+ }
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++static int nd_alloc_symlinkown_stack(struct nameidata *nd)
++{
++      struct path *p;
++
++      if (likely(nd->symlinkown_depth != EMBEDDED_LEVELS))
++              return 0;
++      if (nd->symlinkown_stack != nd->symlinkown_internal)
++              return 0;
++
++      p = kmalloc(MAXSYMLINKS * sizeof(struct path), GFP_KERNEL);
++      if (unlikely(!p))
++              return -ENOMEM;
++      memcpy(p, nd->symlinkown_internal, sizeof(nd->symlinkown_internal));
++      nd->symlinkown_stack = p;
++      return 0;
++}
++#endif
++
+ static inline int nd_alloc_stack(struct nameidata *nd)
+ {
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      if (nd->flags & LOOKUP_RCU)
++              return -ECHILD;
++#endif
++
+       if (likely(nd->depth != EMBEDDED_LEVELS))
+               return 0;
+       if (likely(nd->stack != nd->internal))
+@@ -613,6 +672,14 @@ static void terminate_walk(struct nameidata *nd)
+               path_put(&nd->path);
+               for (i = 0; i < nd->depth; i++)
+                       path_put(&nd->stack[i].link);
++
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      /* we'll only ever set our values in ref-walk mode */
++              for (i = 0; i < nd->symlinkown_depth; i++)
++                      path_put(&nd->symlinkown_stack[i]);
++              nd->symlinkown_depth = 0;
++#endif
++
+               if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+                       path_put(&nd->root);
+                       nd->root.mnt = NULL;
+@@ -1026,6 +1093,9 @@ const char *get_link(struct nameidata *nd)
+       if (unlikely(error))
+               return ERR_PTR(error);
++      if (gr_handle_follow_link(dentry, last->link.mnt))
++              return ERR_PTR(-EACCES);
++
+       nd->last_type = LAST_BIND;
+       res = inode->i_link;
+       if (!res) {
+@@ -1717,6 +1787,23 @@ static int pick_link(struct nameidata *nd, struct path *link,
+               }
+       }
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      if (unlikely(nd->symlinkown_enabled == -1))
++              nd->symlinkown_enabled = gr_get_symlinkown_enabled();
++      if (nd->symlinkown_enabled && gr_is_global_nonroot(inode->i_uid)) {
++              struct path *symlinkownlast;
++              error = nd_alloc_symlinkown_stack(nd);
++              if (unlikely(error)) {
++                      path_put(link);
++                      return error;
++              }
++              symlinkownlast = nd->symlinkown_stack + nd->symlinkown_depth++;
++              symlinkownlast->dentry = link->dentry;
++              symlinkownlast->mnt = link->mnt;
++              path_get(symlinkownlast);
++      }
++#endif
++
+       last = nd->stack + nd->depth++;
+       last->link = *link;
+       clear_delayed_call(&last->done);
+@@ -1931,7 +2018,7 @@ u64 hashlen_string(const void *salt, const char *name)
+ {
+       unsigned long a = 0, x = 0, y = (unsigned long)salt;
+       unsigned long adata, mask, len;
+-      const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
++      static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       len = 0;
+       goto inside;
+@@ -2144,6 +2231,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
+       nd->last_type = LAST_ROOT; /* if there are only slashes... */
+       nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
+       nd->depth = 0;
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      nd->symlinkown_depth = 0;
++#endif
++
+       if (flags & LOOKUP_ROOT) {
+               struct dentry *root = nd->root.dentry;
+               struct inode *inode = root->d_inode;
+@@ -2275,6 +2366,14 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
+       if (!err)
+               err = complete_walk(nd);
++      if (!err && !(nd->flags & LOOKUP_PARENT)) {
++              if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
++                      err = -ENOENT;
++              if (!err)
++                      err = gr_chroot_pathat(nd->dfd, nd->path.dentry,
++                                              nd->path.mnt, nd->flags);
++      }
++
+       if (!err && nd->flags & LOOKUP_DIRECTORY)
+               if (!d_can_lookup(nd->path.dentry))
+                       err = -ENOTDIR;
+@@ -2323,6 +2422,14 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
+       err = link_path_walk(s, nd);
+       if (!err)
+               err = complete_walk(nd);
++
++      if (!err && gr_handle_nameidata_symlinkowner(nd, nd->inode))
++              err = -EACCES;
++
++      if (!err)
++              err = gr_chroot_pathat(nd->dfd, nd->path.dentry,
++                                      nd->path.mnt, nd->flags);
++
+       if (!err) {
+               *parent = nd->path;
+               nd->path.mnt = NULL;
+@@ -2940,6 +3047,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+       if (flag & O_NOATIME && !inode_owner_or_capable(inode))
+               return -EPERM;
++      if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
++              return -EPERM;
++      if (gr_handle_rawio(inode))
++              return -EPERM;
++      if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
++              return -EACCES;
++
+       return 0;
+ }
+@@ -3179,6 +3293,20 @@ no_open:
+       /* Negative dentry, just create the file */
+       if (!dentry->d_inode && (open_flag & O_CREAT)) {
++              error = gr_chroot_pathat(nd->dfd, dentry, nd->path.mnt, nd->flags);
++              if (error)
++                      goto out_dput;
++
++              if (gr_handle_nameidata_symlinkowner(nd, dir_inode)) {
++                      error = -EACCES;
++                      goto out_dput;
++              }
++
++              if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
++                      error = -EACCES;
++                      goto out_dput;
++              }
++
+               *opened |= FILE_CREATED;
+               audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+               if (!dir_inode->i_op->create) {
+@@ -3189,6 +3317,7 @@ no_open:
+                                               open_flag & O_EXCL);
+               if (error)
+                       goto out_dput;
++              gr_handle_create(dentry, nd->path.mnt);
+               fsnotify_create(dir_inode, dentry);
+       }
+       if (unlikely(create_error) && !dentry->d_inode) {
+@@ -3303,6 +3432,11 @@ static int do_last(struct nameidata *nd,
+               goto finish_open_created;
+       }
++      if (!gr_acl_handle_hidden_file(path.dentry, nd->path.mnt)) {
++              path_to_nameidata(&path, nd);
++              return -ENOENT;
++      }
++
+       /*
+        * If atomic_open() acquired write access it is dropped now due to
+        * possible mount and symlink following (this might be optimized away if
+@@ -3322,6 +3456,13 @@ static int do_last(struct nameidata *nd,
+               return -ENOENT;
+       }
++      /* only check if O_CREAT is specified, all other checks need to go
++         into may_open */
++      if (gr_handle_fifo(path.dentry, path.mnt, dir, open_flag, acc_mode)) {
++              path_to_nameidata(&path, nd);
++              return -EACCES;
++      }
++
+       /*
+        * create/update audit record if it already exists.
+        */
+@@ -3350,6 +3491,21 @@ finish_open:
+       error = complete_walk(nd);
+       if (error)
+               return error;
++
++      if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++              error = -ENOENT;
++              goto out;
++      }
++
++      error = gr_chroot_pathat(nd->dfd, nd->path.dentry, nd->path.mnt, nd->flags);
++      if (error)
++              goto out;
++
++      if (gr_handle_nameidata_symlinkowner(nd, nd->inode)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       audit_inode(nd->name, nd->path.dentry, 0);
+       error = -EISDIR;
+       if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
+@@ -3606,9 +3762,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
+               goto unlock;
+       error = -EEXIST;
+-      if (d_is_positive(dentry))
++      if (d_is_positive(dentry)) {
++              if (!gr_acl_handle_hidden_file(dentry, path->mnt))
++                      error = -ENOENT;
+               goto fail;
+-
++      }
+       /*
+        * Special case - lookup gave negative, but... we had foo/bar/
+        * From the vfs_mknod() POV we just have a negative dentry -
+@@ -3662,6 +3820,20 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
+ }
+ EXPORT_SYMBOL(user_path_create);
++static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
++{
++      struct filename *tmp = getname(pathname);
++      struct dentry *res;
++      if (IS_ERR(tmp))
++              return ERR_CAST(tmp);
++      res = kern_path_create(dfd, tmp->name, path, lookup_flags);
++      if (IS_ERR(res))
++              putname(tmp);
++      else
++              *to = tmp;
++      return res;
++}
++
+ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+ {
+       int error = may_create(dir, dentry);
+@@ -3725,6 +3897,17 @@ retry:
+       if (!IS_POSIXACL(path.dentry->d_inode))
+               mode &= ~current_umask();
++
++      if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
++              error = -EPERM;
++              goto out;
++      }
++
++      if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       error = security_path_mknod(&path, dentry, mode, dev);
+       if (error)
+               goto out;
+@@ -3742,6 +3925,8 @@ retry:
+                       error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
+                       break;
+       }
++      if (!error)
++              gr_handle_create(dentry, path.mnt);
+ out:
+       done_path_create(&path, dentry);
+       if (retry_estale(error, lookup_flags)) {
+@@ -3796,9 +3981,16 @@ retry:
+       if (!IS_POSIXACL(path.dentry->d_inode))
+               mode &= ~current_umask();
++      if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
++              error = -EACCES;
++              goto out;
++      }
+       error = security_path_mkdir(&path, dentry, mode);
+       if (!error)
+               error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
++      if (!error)
++              gr_handle_create(dentry, path.mnt);
++out:
+       done_path_create(&path, dentry);
+       if (retry_estale(error, lookup_flags)) {
+               lookup_flags |= LOOKUP_REVAL;
+@@ -3859,6 +4051,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+       struct path path;
+       struct qstr last;
+       int type;
++      u64 saved_ino = 0;
++      dev_t saved_dev = 0;
+       unsigned int lookup_flags = 0;
+ retry:
+       name = user_path_parent(dfd, pathname,
+@@ -3891,10 +4085,20 @@ retry:
+               error = -ENOENT;
+               goto exit3;
+       }
++      saved_ino = gr_get_ino_from_dentry(dentry);
++      saved_dev = gr_get_dev_from_dentry(dentry);
++
++      if (!gr_acl_handle_rmdir(dentry, path.mnt)) {
++              error = -EACCES;
++              goto exit3;
++      }
++
+       error = security_path_rmdir(&path, dentry);
+       if (error)
+               goto exit3;
+       error = vfs_rmdir(path.dentry->d_inode, dentry);
++      if (!error && (saved_dev || saved_ino))
++              gr_handle_delete(saved_ino, saved_dev);
+ exit3:
+       dput(dentry);
+ exit2:
+@@ -3989,6 +4193,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+       int type;
+       struct inode *inode = NULL;
+       struct inode *delegated_inode = NULL;
++      u64 saved_ino = 0;
++      dev_t saved_dev = 0;
+       unsigned int lookup_flags = 0;
+ retry:
+       name = user_path_parent(dfd, pathname,
+@@ -4015,10 +4221,21 @@ retry_deleg:
+               if (d_is_negative(dentry))
+                       goto slashes;
+               ihold(inode);
++              if (inode->i_nlink <= 1) {
++                      saved_ino = gr_get_ino_from_dentry(dentry);
++                      saved_dev = gr_get_dev_from_dentry(dentry);
++              }
++              if (!gr_acl_handle_unlink(dentry, path.mnt)) {
++                      error = -EACCES;
++                      goto exit2;
++              }
++
+               error = security_path_unlink(&path, dentry);
+               if (error)
+                       goto exit2;
+               error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
++              if (!error && (saved_ino || saved_dev))
++                      gr_handle_delete(saved_ino, saved_dev);
+ exit2:
+               dput(dentry);
+       }
+@@ -4107,9 +4324,17 @@ retry:
+       if (IS_ERR(dentry))
+               goto out_putname;
++      if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       error = security_path_symlink(&path, dentry, from->name);
+       if (!error)
+               error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
++      if (!error)
++              gr_handle_create(dentry, path.mnt);
++out:
+       done_path_create(&path, dentry);
+       if (retry_estale(error, lookup_flags)) {
+               lookup_flags |= LOOKUP_REVAL;
+@@ -4220,6 +4445,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+       struct dentry *new_dentry;
+       struct path old_path, new_path;
+       struct inode *delegated_inode = NULL;
++      struct filename *to = NULL;
+       int how = 0;
+       int error;
+@@ -4243,7 +4469,7 @@ retry:
+       if (error)
+               return error;
+-      new_dentry = user_path_create(newdfd, newname, &new_path,
++      new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
+                                       (how & LOOKUP_REVAL));
+       error = PTR_ERR(new_dentry);
+       if (IS_ERR(new_dentry))
+@@ -4255,11 +4481,26 @@ retry:
+       error = may_linkat(&old_path);
+       if (unlikely(error))
+               goto out_dput;
++
++      if (gr_handle_hardlink(old_path.dentry, old_path.mnt, to)) {
++              error = -EACCES;
++              goto out_dput;
++      }
++
++      if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
++                              old_path.dentry, old_path.mnt, to)) {
++              error = -EACCES;
++              goto out_dput;
++      }
++
+       error = security_path_link(old_path.dentry, &new_path, new_dentry);
+       if (error)
+               goto out_dput;
+       error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
++      if (!error)
++              gr_handle_create(new_dentry, new_path.mnt);
+ out_dput:
++      putname(to);
+       done_path_create(&new_path, new_dentry);
+       if (delegated_inode) {
+               error = break_deleg_wait(&delegated_inode);
+@@ -4578,6 +4819,20 @@ retry_deleg:
+       if (new_dentry == trap)
+               goto exit5;
++      if (gr_bad_chroot_rename(old_dentry, old_path.mnt, new_dentry, new_path.mnt)) {
++              /* use EXDEV error to cause 'mv' to switch to an alternative
++               * method for usability
++               */
++              error = -EXDEV;
++              goto exit5;
++      }
++
++      error = gr_acl_handle_rename(new_dentry, new_path.dentry, new_path.mnt,
++                                   old_dentry, d_backing_inode(old_path.dentry), old_path.mnt,
++                                   to, flags);
++      if (error)
++              goto exit5;
++
+       error = security_path_rename(&old_path, old_dentry,
+                                    &new_path, new_dentry, flags);
+       if (error)
+@@ -4585,6 +4840,9 @@ retry_deleg:
+       error = vfs_rename(old_path.dentry->d_inode, old_dentry,
+                          new_path.dentry->d_inode, new_dentry,
+                          &delegated_inode, flags);
++      if (!error)
++              gr_handle_rename(d_backing_inode(old_path.dentry), d_backing_inode(new_path.dentry), old_dentry,
++                               new_dentry, old_path.mnt, d_is_positive(new_dentry) ? 1 : 0, flags);
+ exit5:
+       dput(new_dentry);
+ exit4:
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 7bb2cda..74b3e8f 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1516,6 +1516,9 @@ static int do_umount(struct mount *mnt, int flags)
+               if (!(sb->s_flags & MS_RDONLY))
+                       retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+               up_write(&sb->s_umount);
++
++              gr_log_remount(mnt->mnt_devname, retval);
++
+               return retval;
+       }
+@@ -1538,6 +1541,9 @@ static int do_umount(struct mount *mnt, int flags)
+       }
+       unlock_mount_hash();
+       namespace_unlock();
++
++      gr_log_unmount(mnt->mnt_devname, retval);
++
+       return retval;
+ }
+@@ -1601,7 +1607,7 @@ static inline bool may_mandlock(void)
+  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
+  */
+-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
++SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
+ {
+       struct path path;
+       struct mount *mnt;
+@@ -1646,7 +1652,7 @@ out:
+ /*
+  *    The 2.0 compatible umount. No flags.
+  */
+-SYSCALL_DEFINE1(oldumount, char __user *, name)
++SYSCALL_DEFINE1(oldumount, const char __user *, name)
+ {
+       return sys_umount(name, 0);
+ }
+@@ -2702,6 +2708,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+                  MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+                  MS_STRICTATIME);
++      if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
++              retval = -EPERM;
++              goto dput_out;
++      }
++
++      if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
++              retval = -EPERM;
++              goto dput_out;
++      }
++
+       if (flags & MS_REMOUNT)
+               retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+                                   data_page);
+@@ -2715,7 +2731,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+               retval = do_new_mount(&path, type_page, flags, mnt_flags,
+                                     dev_name, data_page);
+ dput_out:
++      gr_log_mount(dev_name, &path, retval);
++
+       path_put(&path);
++
+       return retval;
+ }
+@@ -2733,7 +2752,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+  * number incrementing at 10Ghz will take 12,427 years to wrap which
+  * is effectively never, so we can ignore the possibility.
+  */
+-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
++static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
+ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+ {
+@@ -2749,7 +2768,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+               return ERR_PTR(ret);
+       }
+       new_ns->ns.ops = &mntns_operations;
+-      new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
++      new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
+       atomic_set(&new_ns->count, 1);
+       new_ns->root = NULL;
+       INIT_LIST_HEAD(&new_ns->list);
+@@ -2759,6 +2778,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+       return new_ns;
+ }
++__latent_entropy
+ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+               struct user_namespace *user_ns, struct fs_struct *new_fs)
+ {
+@@ -2880,8 +2900,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+ }
+ EXPORT_SYMBOL(mount_subtree);
+-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+-              char __user *, type, unsigned long, flags, void __user *, data)
++SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
++              const char __user *, type, unsigned long, flags, void __user *, data)
+ {
+       int ret;
+       char *kernel_type;
+@@ -2987,6 +3007,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+       if (error)
+               goto out2;
++      if (gr_handle_chroot_pivot()) {
++              error = -EPERM;
++              goto out2;
++      }
++
+       get_fs_root(current->fs, &root);
+       old_mp = lock_mount(&old);
+       error = PTR_ERR(old_mp);
+@@ -3326,7 +3351,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+           !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
+               return -EPERM;
+-      if (fs->users != 1)
++      if (atomic_read(&fs->users) != 1)
+               return -EINVAL;
+       get_mnt_ns(mnt_ns);
+diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
+index 5fe1cec..d0f4ac0 100644
+--- a/fs/nfs/callback.h
++++ b/fs/nfs/callback.h
+@@ -114,8 +114,8 @@ struct cb_sequenceres {
+       uint32_t                        csr_target_highestslotid;
+ };
+-extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+-                                     struct cb_sequenceres *res,
++extern __be32 nfs4_callback_sequence(void *_args,
++                                     void *_res,
+                                      struct cb_process_state *cps);
+ #define RCA4_TYPE_MASK_RDATA_DLG      0
+@@ -134,14 +134,14 @@ struct cb_recallanyargs {
+       uint32_t        craa_type_mask;
+ };
+-extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
++extern __be32 nfs4_callback_recallany(void *_args,
+                                       void *dummy,
+                                       struct cb_process_state *cps);
+ struct cb_recallslotargs {
+       uint32_t        crsa_target_highest_slotid;
+ };
+-extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
++extern __be32 nfs4_callback_recallslot(void *_args,
+                                        void *dummy,
+                                        struct cb_process_state *cps);
+@@ -160,7 +160,7 @@ struct cb_layoutrecallargs {
+ };
+ extern __be32 nfs4_callback_layoutrecall(
+-      struct cb_layoutrecallargs *args,
++      void *_args,
+       void *dummy, struct cb_process_state *cps);
+ struct cb_devicenotifyitem {
+@@ -176,15 +176,15 @@ struct cb_devicenotifyargs {
+ };
+ extern __be32 nfs4_callback_devicenotify(
+-      struct cb_devicenotifyargs *args,
++      void *_args,
+       void *dummy, struct cb_process_state *cps);
+ #endif /* CONFIG_NFS_V4_1 */
+ extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
+-extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+-                                  struct cb_getattrres *res,
++extern __be32 nfs4_callback_getattr(void *args,
++                                  void *res,
+                                   struct cb_process_state *cps);
+-extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
++extern __be32 nfs4_callback_recall(void *args, void *dummy,
+                                  struct cb_process_state *cps);
+ #if IS_ENABLED(CONFIG_NFS_V4)
+ extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index f953ef6..3791d58 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -19,10 +19,12 @@
+ #define NFSDBG_FACILITY NFSDBG_CALLBACK
+-__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+-                           struct cb_getattrres *res,
++__be32 nfs4_callback_getattr(void *_args,
++                           void *_res,
+                            struct cb_process_state *cps)
+ {
++      struct cb_getattrargs *args = _args;
++      struct cb_getattrres *res = _res;
+       struct nfs_delegation *delegation;
+       struct nfs_inode *nfsi;
+       struct inode *inode;
+@@ -68,9 +70,10 @@ out:
+       return res->status;
+ }
+-__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
++__be32 nfs4_callback_recall(void *_args, void *dummy,
+                           struct cb_process_state *cps)
+ {
++      struct cb_recallargs *args =  _args;
+       struct inode *inode;
+       __be32 res;
+       
+@@ -294,7 +297,7 @@ static u32 do_callback_layoutrecall(struct nfs_client *clp,
+ }
+-__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
++__be32 nfs4_callback_layoutrecall(void *args,
+                                 void *dummy, struct cb_process_state *cps)
+ {
+       u32 res;
+@@ -321,9 +324,10 @@ static void pnfs_recall_all_layouts(struct nfs_client *clp)
+       do_callback_layoutrecall(clp, &args);
+ }
+-__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
++__be32 nfs4_callback_devicenotify(void *_args,
+                                 void *dummy, struct cb_process_state *cps)
+ {
++      struct cb_devicenotifyargs *args = _args;
+       int i;
+       __be32 res = 0;
+       struct nfs_client *clp = cps->clp;
+@@ -465,10 +469,12 @@ out:
+       return status;
+ }
+-__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+-                            struct cb_sequenceres *res,
++__be32 nfs4_callback_sequence(void *_args,
++                            void *_res,
+                             struct cb_process_state *cps)
+ {
++      struct cb_sequenceargs *args = _args;
++      struct cb_sequenceres *res = _res;
+       struct nfs4_slot_table *tbl;
+       struct nfs4_slot *slot;
+       struct nfs_client *clp;
+@@ -569,9 +575,10 @@ validate_bitmap_values(unsigned long mask)
+       return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
+ }
+-__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
++__be32 nfs4_callback_recallany(void *_args, void *dummy,
+                              struct cb_process_state *cps)
+ {
++      struct cb_recallanyargs *args = _args;
+       __be32 status;
+       fmode_t flags = 0;
+@@ -604,9 +611,10 @@ out:
+ }
+ /* Reduce the fore channel's max_slots to the target value */
+-__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
++__be32 nfs4_callback_recallslot(void *_args, void *dummy,
+                               struct cb_process_state *cps)
+ {
++      struct cb_recallslotargs *args = _args;
+       struct nfs4_slot_table *fc_tbl;
+       __be32 status;
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index 656f68f..79c0026 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -53,7 +53,7 @@ struct callback_op {
+       callback_decode_arg_t decode_args;
+       callback_encode_res_t encode_res;
+       long res_maxsize;
+-};
++} __do_const;
+ static struct callback_op callback_ops[];
+@@ -62,12 +62,12 @@ static __be32 nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp)
+       return htonl(NFS4_OK);
+ }
+-static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++static int nfs4_decode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_argsize_check(rqstp, p);
+ }
+-static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++static int nfs4_encode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_ressize_check(rqstp, p);
+ }
+@@ -199,8 +199,9 @@ static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
+       return 0;
+ }
+-static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args)
++static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, void *_args)
+ {
++      struct cb_getattrargs *args = _args;
+       __be32 status;
+       status = decode_fh(xdr, &args->fh);
+@@ -212,8 +213,9 @@ out:
+       return status;
+ }
+-static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args)
++static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, void *_args)
+ {
++      struct cb_recallargs *args = _args;
+       __be32 *p;
+       __be32 status;
+@@ -241,8 +243,9 @@ static __be32 decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *statei
+ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
+                                      struct xdr_stream *xdr,
+-                                     struct cb_layoutrecallargs *args)
++                                     void *_args)
+ {
++      struct cb_layoutrecallargs *args = _args;
+       __be32 *p;
+       __be32 status = 0;
+       uint32_t iomode;
+@@ -301,8 +304,9 @@ out:
+ static
+ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
+                               struct xdr_stream *xdr,
+-                              struct cb_devicenotifyargs *args)
++                              void *_args)
+ {
++      struct cb_devicenotifyargs *args = _args;
+       __be32 *p;
+       __be32 status = 0;
+       u32 tmp;
+@@ -442,8 +446,9 @@ out:
+ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
+                                       struct xdr_stream *xdr,
+-                                      struct cb_sequenceargs *args)
++                                      void *_args)
+ {
++      struct cb_sequenceargs *args = _args;
+       __be32 *p;
+       int i;
+       __be32 status;
+@@ -504,8 +509,9 @@ out_free:
+ static __be32 decode_recallany_args(struct svc_rqst *rqstp,
+                                     struct xdr_stream *xdr,
+-                                    struct cb_recallanyargs *args)
++                                    void *_args)
+ {
++      struct cb_recallanyargs *args = _args;
+       uint32_t bitmap[2];
+       __be32 *p, status;
+@@ -523,8 +529,9 @@ static __be32 decode_recallany_args(struct svc_rqst *rqstp,
+ static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
+                                       struct xdr_stream *xdr,
+-                                      struct cb_recallslotargs *args)
++                                      void *_args)
+ {
++      struct cb_recallslotargs *args = _args;
+       __be32 *p;
+       p = read_buf(xdr, 4);
+@@ -659,8 +666,9 @@ static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res)
+       return 0;
+ }
+-static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res)
++static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, void *_res)
+ {
++      const struct cb_getattrres *res = _res;
+       __be32 *savep = NULL;
+       __be32 status = res->status;
+       
+@@ -702,8 +710,9 @@ static __be32 encode_sessionid(struct xdr_stream *xdr,
+ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
+                                      struct xdr_stream *xdr,
+-                                     const struct cb_sequenceres *res)
++                                     void *_res)
+ {
++      const struct cb_sequenceres *res = _res;
+       __be32 *p;
+       __be32 status = res->csr_status;
+@@ -967,43 +976,41 @@ static struct callback_op callback_ops[] = {
+               .res_maxsize = CB_OP_HDR_RES_MAXSZ,
+       },
+       [OP_CB_GETATTR] = {
+-              .process_op = (callback_process_op_t)nfs4_callback_getattr,
+-              .decode_args = (callback_decode_arg_t)decode_getattr_args,
+-              .encode_res = (callback_encode_res_t)encode_getattr_res,
++              .process_op = nfs4_callback_getattr,
++              .decode_args = decode_getattr_args,
++              .encode_res = encode_getattr_res,
+               .res_maxsize = CB_OP_GETATTR_RES_MAXSZ,
+       },
+       [OP_CB_RECALL] = {
+-              .process_op = (callback_process_op_t)nfs4_callback_recall,
+-              .decode_args = (callback_decode_arg_t)decode_recall_args,
++              .process_op = nfs4_callback_recall,
++              .decode_args = decode_recall_args,
+               .res_maxsize = CB_OP_RECALL_RES_MAXSZ,
+       },
+ #if defined(CONFIG_NFS_V4_1)
+       [OP_CB_LAYOUTRECALL] = {
+-              .process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
+-              .decode_args =
+-                      (callback_decode_arg_t)decode_layoutrecall_args,
++              .process_op = nfs4_callback_layoutrecall,
++              .decode_args = decode_layoutrecall_args,
+               .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
+       },
+       [OP_CB_NOTIFY_DEVICEID] = {
+-              .process_op = (callback_process_op_t)nfs4_callback_devicenotify,
+-              .decode_args =
+-                      (callback_decode_arg_t)decode_devicenotify_args,
++              .process_op = nfs4_callback_devicenotify,
++              .decode_args = decode_devicenotify_args,
+               .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ,
+       },
+       [OP_CB_SEQUENCE] = {
+-              .process_op = (callback_process_op_t)nfs4_callback_sequence,
+-              .decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
+-              .encode_res = (callback_encode_res_t)encode_cb_sequence_res,
++              .process_op = nfs4_callback_sequence,
++              .decode_args = decode_cb_sequence_args,
++              .encode_res = encode_cb_sequence_res,
+               .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
+       },
+       [OP_CB_RECALL_ANY] = {
+-              .process_op = (callback_process_op_t)nfs4_callback_recallany,
+-              .decode_args = (callback_decode_arg_t)decode_recallany_args,
++              .process_op = nfs4_callback_recallany,
++              .decode_args = decode_recallany_args,
+               .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
+       },
+       [OP_CB_RECALL_SLOT] = {
+-              .process_op = (callback_process_op_t)nfs4_callback_recallslot,
+-              .decode_args = (callback_decode_arg_t)decode_recallslot_args,
++              .process_op = nfs4_callback_recallslot,
++              .decode_args = decode_recallslot_args,
+               .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
+       },
+ #endif /* CONFIG_NFS_V4_1 */
+@@ -1015,13 +1022,13 @@ static struct callback_op callback_ops[] = {
+ static struct svc_procedure nfs4_callback_procedures1[] = {
+       [CB_NULL] = {
+               .pc_func = nfs4_callback_null,
+-              .pc_decode = (kxdrproc_t)nfs4_decode_void,
+-              .pc_encode = (kxdrproc_t)nfs4_encode_void,
++              .pc_decode = nfs4_decode_void,
++              .pc_encode = nfs4_encode_void,
+               .pc_xdrressize = 1,
+       },
+       [CB_COMPOUND] = {
+               .pc_func = nfs4_callback_compound,
+-              .pc_encode = (kxdrproc_t)nfs4_encode_void,
++              .pc_encode = nfs4_encode_void,
+               .pc_argsize = 256,
+               .pc_ressize = 256,
+               .pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 6bc5a68..a7324a1 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -705,8 +705,9 @@ out:
+  * We only need to convert from xdr once so future lookups are much simpler
+  */
+ static
+-int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
++int nfs_readdir_filler(struct file *_desc, struct page* page)
+ {
++      nfs_readdir_descriptor_t *desc = (nfs_readdir_descriptor_t *)_desc;
+       struct inode    *inode = file_inode(desc->file);
+       int ret;
+@@ -741,7 +742,7 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
+       for (;;) {
+               page = read_cache_page(desc->file->f_mapping,
+-                      desc->page_index, (filler_t *)nfs_readdir_filler, desc);
++                      desc->page_index, nfs_readdir_filler, desc);
+               if (IS_ERR(page) || grab_page(page))
+                       break;
+               put_page(page);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index bf4ec5e..39aec95 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1323,16 +1323,16 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
+       return 0;
+ }
+-static atomic_long_t nfs_attr_generation_counter;
++static atomic_long_unchecked_t nfs_attr_generation_counter;
+ static unsigned long nfs_read_attr_generation_counter(void)
+ {
+-      return atomic_long_read(&nfs_attr_generation_counter);
++      return atomic_long_read_unchecked(&nfs_attr_generation_counter);
+ }
+ unsigned long nfs_inc_attr_generation_counter(void)
+ {
+-      return atomic_long_inc_return(&nfs_attr_generation_counter);
++      return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
+ }
+ EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 74935a1..15544e5 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -652,9 +652,10 @@ unsigned long nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
+ static inline
+ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
+ {
+-      sb->s_maxbytes = (loff_t)maxfilesize;
+-      if (sb->s_maxbytes > MAX_LFS_FILESIZE || sb->s_maxbytes <= 0)
++      if (maxfilesize > MAX_LFS_FILESIZE || maxfilesize == 0)
+               sb->s_maxbytes = MAX_LFS_FILESIZE;
++      else
++              sb->s_maxbytes = (loff_t)maxfilesize;
+ }
+ /*
+diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
+index 09b1900..344f4c2 100644
+--- a/fs/nfs/mount_clnt.c
++++ b/fs/nfs/mount_clnt.c
+@@ -303,8 +303,8 @@ static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+       xdr_encode_opaque(p, pathname, pathname_len);
+ }
+-static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              const char *dirpath)
++static void mnt_xdr_enc_dirpath(void *req, struct xdr_stream *xdr,
++                              void *dirpath)
+ {
+       encode_mntdirpath(xdr, dirpath);
+ }
+@@ -355,10 +355,11 @@ static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
+       return 0;
+ }
+-static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
++static int mnt_xdr_dec_mountres(void *req,
+                               struct xdr_stream *xdr,
+-                              struct mountres *res)
++                              void *_res)
+ {
++      struct mountres *res = _res;
+       int status;
+       status = decode_status(xdr, res);
+@@ -447,10 +448,11 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
+       return 0;
+ }
+-static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
++static int mnt_xdr_dec_mountres3(void *req,
+                                struct xdr_stream *xdr,
+-                               struct mountres *res)
++                               void *_res)
+ {
++      struct mountres *res = _res;
+       int status;
+       status = decode_fhs_status(xdr, res);
+@@ -467,8 +469,8 @@ static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
+ static struct rpc_procinfo mnt_procedures[] = {
+       [MOUNTPROC_MNT] = {
+               .p_proc         = MOUNTPROC_MNT,
+-              .p_encode       = (kxdreproc_t)mnt_xdr_enc_dirpath,
+-              .p_decode       = (kxdrdproc_t)mnt_xdr_dec_mountres,
++              .p_encode       = mnt_xdr_enc_dirpath,
++              .p_decode       = mnt_xdr_dec_mountres,
+               .p_arglen       = MNT_enc_dirpath_sz,
+               .p_replen       = MNT_dec_mountres_sz,
+               .p_statidx      = MOUNTPROC_MNT,
+@@ -476,7 +478,7 @@ static struct rpc_procinfo mnt_procedures[] = {
+       },
+       [MOUNTPROC_UMNT] = {
+               .p_proc         = MOUNTPROC_UMNT,
+-              .p_encode       = (kxdreproc_t)mnt_xdr_enc_dirpath,
++              .p_encode       = mnt_xdr_enc_dirpath,
+               .p_arglen       = MNT_enc_dirpath_sz,
+               .p_statidx      = MOUNTPROC_UMNT,
+               .p_name         = "UMOUNT",
+@@ -486,8 +488,8 @@ static struct rpc_procinfo mnt_procedures[] = {
+ static struct rpc_procinfo mnt3_procedures[] = {
+       [MOUNTPROC3_MNT] = {
+               .p_proc         = MOUNTPROC3_MNT,
+-              .p_encode       = (kxdreproc_t)mnt_xdr_enc_dirpath,
+-              .p_decode       = (kxdrdproc_t)mnt_xdr_dec_mountres3,
++              .p_encode       = mnt_xdr_enc_dirpath,
++              .p_decode       = mnt_xdr_dec_mountres3,
+               .p_arglen       = MNT_enc_dirpath_sz,
+               .p_replen       = MNT_dec_mountres3_sz,
+               .p_statidx      = MOUNTPROC3_MNT,
+@@ -495,7 +497,7 @@ static struct rpc_procinfo mnt3_procedures[] = {
+       },
+       [MOUNTPROC3_UMNT] = {
+               .p_proc         = MOUNTPROC3_UMNT,
+-              .p_encode       = (kxdreproc_t)mnt_xdr_enc_dirpath,
++              .p_encode       = mnt_xdr_enc_dirpath,
+               .p_arglen       = MNT_enc_dirpath_sz,
+               .p_statidx      = MOUNTPROC3_UMNT,
+               .p_name         = "UMOUNT",
+diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
+index b4e03ed..6907eb4 100644
+--- a/fs/nfs/nfs2xdr.c
++++ b/fs/nfs/nfs2xdr.c
+@@ -566,9 +566,9 @@ out_default:
+  * "NFS: Network File System Protocol Specification".
+  */
+-static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
++static void nfs2_xdr_enc_fhandle(void *req,
+                                struct xdr_stream *xdr,
+-                               const struct nfs_fh *fh)
++                               void *fh)
+ {
+       encode_fhandle(xdr, fh);
+ }
+@@ -581,25 +581,31 @@ static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
+  *            sattr attributes;
+  *    };
+  */
+-static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_sattrargs(void *req,
+                                  struct xdr_stream *xdr,
+-                                 const struct nfs_sattrargs *args)
++                                 void *_args)
+ {
++      const struct nfs_sattrargs *args = _args;
++
+       encode_fhandle(xdr, args->fh);
+       encode_sattr(xdr, args->sattr);
+ }
+-static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_diropargs(void *req,
+                                  struct xdr_stream *xdr,
+-                                 const struct nfs_diropargs *args)
++                                 void *_args)
+ {
++      const struct nfs_diropargs *args = _args;
++
+       encode_diropargs(xdr, args->fh, args->name, args->len);
+ }
+-static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_readlinkargs(void *req,
+                                     struct xdr_stream *xdr,
+-                                    const struct nfs_readlinkargs *args)
++                                    void *_args)
+ {
++      const struct nfs_readlinkargs *args = _args;
++
+       encode_fhandle(xdr, args->fh);
+       prepare_reply_buffer(req, args->pages, args->pgbase,
+                                       args->pglen, NFS_readlinkres_sz);
+@@ -630,10 +636,13 @@ static void encode_readargs(struct xdr_stream *xdr,
+       *p = cpu_to_be32(count);
+ }
+-static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_readargs(void *_req,
+                                 struct xdr_stream *xdr,
+-                                const struct nfs_pgio_args *args)
++                                void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      const struct nfs_pgio_args *args = _args;
++
+       encode_readargs(xdr, args);
+       prepare_reply_buffer(req, args->pages, args->pgbase,
+                                       args->count, NFS_readres_sz);
+@@ -670,9 +679,9 @@ static void encode_writeargs(struct xdr_stream *xdr,
+       xdr_write_pages(xdr, args->pages, args->pgbase, count);
+ }
+-static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_writeargs(void *req,
+                                  struct xdr_stream *xdr,
+-                                 const struct nfs_pgio_args *args)
++                                 void *args)
+ {
+       encode_writeargs(xdr, args);
+       xdr->buf->flags |= XDRBUF_WRITE;
+@@ -686,18 +695,22 @@ static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
+  *            sattr attributes;
+  *    };
+  */
+-static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_createargs(void *req,
+                                   struct xdr_stream *xdr,
+-                                  const struct nfs_createargs *args)
++                                  void *_args)
+ {
++      const struct nfs_createargs *args = _args;
++
+       encode_diropargs(xdr, args->fh, args->name, args->len);
+       encode_sattr(xdr, args->sattr);
+ }
+-static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_removeargs(void *req,
+                                   struct xdr_stream *xdr,
+-                                  const struct nfs_removeargs *args)
++                                  void *_args)
+ {
++      const struct nfs_removeargs *args = _args;
++
+       encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
+ }
+@@ -709,10 +722,11 @@ static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
+  *            diropargs to;
+  *    };
+  */
+-static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_renameargs(void *req,
+                                   struct xdr_stream *xdr,
+-                                  const struct nfs_renameargs *args)
++                                  void *_args)
+ {
++      const struct nfs_renameargs *args = _args;
+       const struct qstr *old = args->old_name;
+       const struct qstr *new = args->new_name;
+@@ -728,10 +742,12 @@ static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
+  *            diropargs to;
+  *    };
+  */
+-static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_linkargs(void *req,
+                                 struct xdr_stream *xdr,
+-                                const struct nfs_linkargs *args)
++                                void *_args)
+ {
++      const struct nfs_linkargs *args = _args;
++
+       encode_fhandle(xdr, args->fromfh);
+       encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
+ }
+@@ -745,10 +761,12 @@ static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
+  *            sattr attributes;
+  *    };
+  */
+-static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_symlinkargs(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs_symlinkargs *args)
++                                   void *_args)
+ {
++      const struct nfs_symlinkargs *args = _args;
++
+       encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
+       encode_path(xdr, args->pages, args->pathlen);
+       encode_sattr(xdr, args->sattr);
+@@ -775,10 +793,12 @@ static void encode_readdirargs(struct xdr_stream *xdr,
+       *p = cpu_to_be32(args->count);
+ }
+-static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
++static void nfs2_xdr_enc_readdirargs(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs_readdirargs *args)
++                                   void *_args)
+ {
++      const struct nfs_readdirargs *args = _args;
++
+       encode_readdirargs(xdr, args);
+       prepare_reply_buffer(req, args->pages, 0,
+                                       args->count, NFS_readdirres_sz);
+@@ -791,7 +811,7 @@ static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
+  * "NFS: Network File System Protocol Specification".
+  */
+-static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
++static int nfs2_xdr_dec_stat(void *req, struct xdr_stream *xdr,
+                            void *__unused)
+ {
+       enum nfs_stat status;
+@@ -808,14 +828,14 @@ out_default:
+       return nfs_stat_to_errno(status);
+ }
+-static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               struct nfs_fattr *result)
++static int nfs2_xdr_dec_attrstat(void *req, struct xdr_stream *xdr,
++                               void *result)
+ {
+       return decode_attrstat(xdr, result, NULL);
+ }
+-static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               struct nfs_diropok *result)
++static int nfs2_xdr_dec_diropres(void *req, struct xdr_stream *xdr,
++                               void *result)
+ {
+       return decode_diropres(xdr, result);
+ }
+@@ -830,7 +850,7 @@ static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
+  *            void;
+  *    };
+  */
+-static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
++static int nfs2_xdr_dec_readlinkres(void *req,
+                                   struct xdr_stream *xdr, void *__unused)
+ {
+       enum nfs_stat status;
+@@ -859,9 +879,10 @@ out_default:
+  *            void;
+  *    };
+  */
+-static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              struct nfs_pgio_res *result)
++static int nfs2_xdr_dec_readres(void *req, struct xdr_stream *xdr,
++                              void *_result)
+ {
++      struct nfs_pgio_res *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -881,9 +902,11 @@ out_default:
+       return nfs_stat_to_errno(status);
+ }
+-static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               struct nfs_pgio_res *result)
++static int nfs2_xdr_dec_writeres(void *req, struct xdr_stream *xdr,
++                               void *_result)
+ {
++      struct nfs_pgio_res *result = _result;
++
+       /* All NFSv2 writes are "file sync" writes */
+       result->verf->committed = NFS_FILE_SYNC;
+       return decode_attrstat(xdr, result->fattr, &result->op_status);
+@@ -981,7 +1004,7 @@ static int decode_readdirok(struct xdr_stream *xdr)
+       return xdr_read_pages(xdr, xdr->buf->page_len);
+ }
+-static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
++static int nfs2_xdr_dec_readdirres(void *req,
+                                  struct xdr_stream *xdr, void *__unused)
+ {
+       enum nfs_stat status;
+@@ -1033,8 +1056,8 @@ out_overflow:
+       return -EIO;
+ }
+-static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                                struct nfs2_fsstat *result)
++static int nfs2_xdr_dec_statfsres(void *req, struct xdr_stream *xdr,
++                                void *result)
+ {
+       enum nfs_stat status;
+       int error;
+@@ -1118,8 +1141,8 @@ static int nfs_stat_to_errno(enum nfs_stat status)
+ #define PROC(proc, argtype, restype, timer)                           \
+ [NFSPROC_##proc] = {                                                  \
+       .p_proc     =  NFSPROC_##proc,                                  \
+-      .p_encode   =  (kxdreproc_t)nfs2_xdr_enc_##argtype,             \
+-      .p_decode   =  (kxdrdproc_t)nfs2_xdr_dec_##restype,             \
++      .p_encode   =  nfs2_xdr_enc_##argtype,                          \
++      .p_decode   =  nfs2_xdr_dec_##restype,                          \
+       .p_arglen   =  NFS_##argtype##_sz,                              \
+       .p_replen   =  NFS_##restype##_sz,                              \
+       .p_timer    =  timer,                                           \
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index 267126d..19c97b8 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -844,9 +844,9 @@ static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
+  *            nfs_fh3  object;
+  *    };
+  */
+-static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_getattr3args(void *req,
+                                     struct xdr_stream *xdr,
+-                                    const struct nfs_fh *fh)
++                                    void *fh)
+ {
+       encode_nfs_fh3(xdr, fh);
+ }
+@@ -882,10 +882,12 @@ static void encode_sattrguard3(struct xdr_stream *xdr,
+       }
+ }
+-static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_setattr3args(void *req,
+                                     struct xdr_stream *xdr,
+-                                    const struct nfs3_sattrargs *args)
++                                    void *_args)
+ {
++      const struct nfs3_sattrargs *args = _args;
++
+       encode_nfs_fh3(xdr, args->fh);
+       encode_sattr3(xdr, args->sattr);
+       encode_sattrguard3(xdr, args);
+@@ -898,10 +900,12 @@ static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
+  *            diropargs3  what;
+  *    };
+  */
+-static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_lookup3args(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs3_diropargs *args)
++                                   void *_args)
+ {
++      const struct nfs3_diropargs *args = _args;
++
+       encode_diropargs3(xdr, args->fh, args->name, args->len);
+ }
+@@ -920,9 +924,9 @@ static void encode_access3args(struct xdr_stream *xdr,
+       encode_uint32(xdr, args->access);
+ }
+-static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_access3args(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs3_accessargs *args)
++                                   void *args)
+ {
+       encode_access3args(xdr, args);
+ }
+@@ -934,10 +938,11 @@ static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
+  *            nfs_fh3 symlink;
+  *    };
+  */
+-static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_readlink3args(void *req,
+                                      struct xdr_stream *xdr,
+-                                     const struct nfs3_readlinkargs *args)
++                                     void *_args)
+ {
++      const struct nfs3_readlinkargs *args = _args;
+       encode_nfs_fh3(xdr, args->fh);
+       prepare_reply_buffer(req, args->pages, args->pgbase,
+                                       args->pglen, NFS3_readlinkres_sz);
+@@ -964,10 +969,12 @@ static void encode_read3args(struct xdr_stream *xdr,
+       *p = cpu_to_be32(args->count);
+ }
+-static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_read3args(void *_req,
+                                  struct xdr_stream *xdr,
+-                                 const struct nfs_pgio_args *args)
++                                 void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      const struct nfs_pgio_args *args = _args;
+       encode_read3args(xdr, args);
+       prepare_reply_buffer(req, args->pages, args->pgbase,
+                                       args->count, NFS3_readres_sz);
+@@ -1006,9 +1013,9 @@ static void encode_write3args(struct xdr_stream *xdr,
+       xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+ }
+-static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_write3args(void *req,
+                                   struct xdr_stream *xdr,
+-                                  const struct nfs_pgio_args *args)
++                                  void *args)
+ {
+       encode_write3args(xdr, args);
+       xdr->buf->flags |= XDRBUF_WRITE;
+@@ -1053,10 +1060,12 @@ static void encode_createhow3(struct xdr_stream *xdr,
+       }
+ }
+-static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_create3args(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs3_createargs *args)
++                                   void *_args)
+ {
++      const struct nfs3_createargs *args = _args;
++
+       encode_diropargs3(xdr, args->fh, args->name, args->len);
+       encode_createhow3(xdr, args);
+ }
+@@ -1069,10 +1078,12 @@ static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
+  *            sattr3          attributes;
+  *    };
+  */
+-static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_mkdir3args(void *req,
+                                   struct xdr_stream *xdr,
+-                                  const struct nfs3_mkdirargs *args)
++                                  void *_args)
+ {
++      const struct nfs3_mkdirargs *args = _args;
++
+       encode_diropargs3(xdr, args->fh, args->name, args->len);
+       encode_sattr3(xdr, args->sattr);
+ }
+@@ -1097,10 +1108,12 @@ static void encode_symlinkdata3(struct xdr_stream *xdr,
+       encode_nfspath3(xdr, args->pages, args->pathlen);
+ }
+-static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_symlink3args(void *req,
+                                     struct xdr_stream *xdr,
+-                                    const struct nfs3_symlinkargs *args)
++                                    void *_args)
+ {
++      const struct nfs3_symlinkargs *args = _args;
++
+       encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
+       encode_symlinkdata3(xdr, args);
+       xdr->buf->flags |= XDRBUF_WRITE;
+@@ -1158,10 +1171,12 @@ static void encode_mknoddata3(struct xdr_stream *xdr,
+       }
+ }
+-static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_mknod3args(void *req,
+                                   struct xdr_stream *xdr,
+-                                  const struct nfs3_mknodargs *args)
++                                  void *_args)
+ {
++      const struct nfs3_mknodargs *args = _args;
++
+       encode_diropargs3(xdr, args->fh, args->name, args->len);
+       encode_mknoddata3(xdr, args);
+ }
+@@ -1173,10 +1188,12 @@ static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
+  *            diropargs3  object;
+  *    };
+  */
+-static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_remove3args(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs_removeargs *args)
++                                   void *_args)
+ {
++      const struct nfs_removeargs *args = _args;
++
+       encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
+ }
+@@ -1188,10 +1205,11 @@ static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
+  *            diropargs3      to;
+  *    };
+  */
+-static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_rename3args(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs_renameargs *args)
++                                   void *_args)
+ {
++      const struct nfs_renameargs *args = _args;
+       const struct qstr *old = args->old_name;
+       const struct qstr *new = args->new_name;
+@@ -1207,10 +1225,12 @@ static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
+  *            diropargs3      link;
+  *    };
+  */
+-static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_link3args(void *req,
+                                  struct xdr_stream *xdr,
+-                                 const struct nfs3_linkargs *args)
++                                 void *_args)
+ {
++      const struct nfs3_linkargs *args = _args;
++
+       encode_nfs_fh3(xdr, args->fromfh);
+       encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
+ }
+@@ -1238,10 +1258,12 @@ static void encode_readdir3args(struct xdr_stream *xdr,
+       *p = cpu_to_be32(args->count);
+ }
+-static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_readdir3args(void *req,
+                                     struct xdr_stream *xdr,
+-                                    const struct nfs3_readdirargs *args)
++                                    void *_args)
+ {
++      const struct nfs3_readdirargs *args = _args;
++
+       encode_readdir3args(xdr, args);
+       prepare_reply_buffer(req, args->pages, 0,
+                               args->count, NFS3_readdirres_sz);
+@@ -1278,10 +1300,12 @@ static void encode_readdirplus3args(struct xdr_stream *xdr,
+       *p = cpu_to_be32(args->count);
+ }
+-static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_readdirplus3args(void *req,
+                                         struct xdr_stream *xdr,
+-                                        const struct nfs3_readdirargs *args)
++                                        void *_args)
+ {
++      const struct nfs3_readdirargs *args = _args;
++
+       encode_readdirplus3args(xdr, args);
+       prepare_reply_buffer(req, args->pages, 0,
+                               args->count, NFS3_readdirres_sz);
+@@ -1308,19 +1332,21 @@ static void encode_commit3args(struct xdr_stream *xdr,
+       *p = cpu_to_be32(args->count);
+ }
+-static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_commit3args(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs_commitargs *args)
++                                   void *args)
+ {
+       encode_commit3args(xdr, args);
+ }
+ #ifdef CONFIG_NFS_V3_ACL
+-static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_getacl3args(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs3_getaclargs *args)
++                                   void *_args)
+ {
++      const struct nfs3_getaclargs *args = _args;
++
+       encode_nfs_fh3(xdr, args->fh);
+       encode_uint32(xdr, args->mask);
+       if (args->mask & (NFS_ACL | NFS_DFACL))
+@@ -1329,10 +1355,12 @@ static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
+                                       ACL3_getaclres_sz);
+ }
+-static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
++static void nfs3_xdr_enc_setacl3args(void *_req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs3_setaclargs *args)
++                                   void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      const struct nfs3_setaclargs *args = _args;
+       unsigned int base;
+       int error;
+@@ -1380,9 +1408,9 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+  *            void;
+  *    };
+  */
+-static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_getattr3res(void *req,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs_fattr *result)
++                                  void *result)
+ {
+       enum nfs_stat status;
+       int error;
+@@ -1417,9 +1445,9 @@ out_default:
+  *            SETATTR3resfail resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_setattr3res(void *req,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs_fattr *result)
++                                  void *result)
+ {
+       enum nfs_stat status;
+       int error;
+@@ -1458,10 +1486,11 @@ out_status:
+  *            LOOKUP3resfail  resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_lookup3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs3_diropres *result)
++                                 void *_result)
+ {
++      struct nfs3_diropres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -1505,10 +1534,11 @@ out_default:
+  *            ACCESS3resfail  resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_access3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs3_accessres *result)
++                                 void *_result)
+ {
++      struct nfs3_accessres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -1546,9 +1576,9 @@ out_default:
+  *            READLINK3resfail resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_readlink3res(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs_fattr *result)
++                                   void *result)
+ {
+       enum nfs_stat status;
+       int error;
+@@ -1625,9 +1655,10 @@ out_overflow:
+       return -EIO;
+ }
+-static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               struct nfs_pgio_res *result)
++static int nfs3_xdr_dec_read3res(void *req, struct xdr_stream *xdr,
++                               void *_result)
+ {
++      struct nfs_pgio_res *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -1698,9 +1729,10 @@ out_eio:
+       return -EIO;
+ }
+-static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                                struct nfs_pgio_res *result)
++static int nfs3_xdr_dec_write3res(void *req, struct xdr_stream *xdr,
++                                void *_result)
+ {
++      struct nfs_pgio_res *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -1762,10 +1794,11 @@ out:
+       return error;
+ }
+-static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_create3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs3_diropres *result)
++                                 void *_result)
+ {
++      struct nfs3_diropres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -1802,10 +1835,11 @@ out_default:
+  *            REMOVE3resfail resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_remove3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs_removeres *result)
++                                 void *_result)
+ {
++      struct nfs_removeres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -1843,10 +1877,11 @@ out_status:
+  *            RENAME3resfail resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_rename3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs_renameres *result)
++                                 void *_result)
+ {
++      struct nfs_renameres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -1887,9 +1922,10 @@ out_status:
+  *            LINK3resfail    resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               struct nfs3_linkres *result)
++static int nfs3_xdr_dec_link3res(void *req, struct xdr_stream *xdr,
++                               void *_result)
+ {
++      struct nfs3_linkres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -2070,10 +2106,11 @@ out:
+       return error;
+ }
+-static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_readdir3res(void *req,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs3_readdirres *result)
++                                  void *_result)
+ {
++      struct nfs3_readdirres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -2138,10 +2175,11 @@ out_overflow:
+       return -EIO;
+ }
+-static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_fsstat3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs_fsstat *result)
++                                 void *_result)
+ {
++      struct nfs_fsstat *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -2214,10 +2252,11 @@ out_overflow:
+       return -EIO;
+ }
+-static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_fsinfo3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs_fsinfo *result)
++                                 void *_result)
+ {
++      struct nfs_fsinfo *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -2277,10 +2316,11 @@ out_overflow:
+       return -EIO;
+ }
+-static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_pathconf3res(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs_pathconf *result)
++                                   void *_result)
+ {
++      struct nfs_pathconf *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -2318,10 +2358,11 @@ out_status:
+  *            COMMIT3resfail  resfail;
+  *    };
+  */
+-static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_commit3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs_commitres *result)
++                                 void *_result)
+ {
++      struct nfs_commitres *result = _result;
+       enum nfs_stat status;
+       int error;
+@@ -2387,9 +2428,9 @@ out:
+       return error;
+ }
+-static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_getacl3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs3_getaclres *result)
++                                 void *result)
+ {
+       enum nfs_stat status;
+       int error;
+@@ -2406,9 +2447,9 @@ out_default:
+       return nfs3_stat_to_errno(status);
+ }
+-static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
++static int nfs3_xdr_dec_setacl3res(void *req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs_fattr *result)
++                                 void *result)
+ {
+       enum nfs_stat status;
+       int error;
+@@ -2495,8 +2536,8 @@ static int nfs3_stat_to_errno(enum nfs_stat status)
+ #define PROC(proc, argtype, restype, timer)                           \
+ [NFS3PROC_##proc] = {                                                 \
+       .p_proc      = NFS3PROC_##proc,                                 \
+-      .p_encode    = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args,      \
+-      .p_decode    = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res,       \
++      .p_encode    = nfs3_xdr_enc_##argtype##3args,                   \
++      .p_decode    = nfs3_xdr_dec_##restype##3res,                    \
+       .p_arglen    = NFS3_##argtype##args_sz,                         \
+       .p_replen    = NFS3_##restype##res_sz,                          \
+       .p_timer     = timer,                                           \
+@@ -2538,8 +2579,8 @@ const struct rpc_version nfs_version3 = {
+ static struct rpc_procinfo    nfs3_acl_procedures[] = {
+       [ACLPROC3_GETACL] = {
+               .p_proc = ACLPROC3_GETACL,
+-              .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
+-              .p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
++              .p_encode = nfs3_xdr_enc_getacl3args,
++              .p_decode = nfs3_xdr_dec_getacl3res,
+               .p_arglen = ACL3_getaclargs_sz,
+               .p_replen = ACL3_getaclres_sz,
+               .p_timer = 1,
+@@ -2547,8 +2588,8 @@ static struct rpc_procinfo       nfs3_acl_procedures[] = {
+       },
+       [ACLPROC3_SETACL] = {
+               .p_proc = ACLPROC3_SETACL,
+-              .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
+-              .p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
++              .p_encode = nfs3_xdr_enc_setacl3args,
++              .p_decode = nfs3_xdr_dec_setacl3res,
+               .p_arglen = ACL3_setaclargs_sz,
+               .p_replen = ACL3_setaclres_sz,
+               .p_timer = 0,
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index 8b26058..b31170f 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -205,10 +205,12 @@ static void encode_clone(struct xdr_stream *xdr,
+ /*
+  * Encode ALLOCATE request
+  */
+-static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
++static void nfs4_xdr_enc_allocate(void *_req,
+                                 struct xdr_stream *xdr,
+-                                struct nfs42_falloc_args *args)
++                                void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs42_falloc_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -224,10 +226,12 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
+ /*
+  * Encode COPY request
+  */
+-static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
++static void nfs4_xdr_enc_copy(void *_req,
+                             struct xdr_stream *xdr,
+-                            struct nfs42_copy_args *args)
++                            void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs42_copy_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -244,10 +248,12 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
+ /*
+  * Encode DEALLOCATE request
+  */
+-static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
++static void nfs4_xdr_enc_deallocate(void *_req,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs42_falloc_args *args)
++                                  void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs42_falloc_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -263,10 +269,12 @@ static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
+ /*
+  * Encode SEEK request
+  */
+-static void nfs4_xdr_enc_seek(struct rpc_rqst *req,
++static void nfs4_xdr_enc_seek(void *_req,
+                             struct xdr_stream *xdr,
+-                            struct nfs42_seek_args *args)
++                            void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs42_seek_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -281,10 +289,12 @@ static void nfs4_xdr_enc_seek(struct rpc_rqst *req,
+ /*
+  * Encode LAYOUTSTATS request
+  */
+-static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req,
++static void nfs4_xdr_enc_layoutstats(void *_req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs42_layoutstat_args *args)
++                                   void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs42_layoutstat_args *args = _args;
+       int i;
+       struct compound_hdr hdr = {
+@@ -303,10 +313,12 @@ static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req,
+ /*
+  * Encode CLONE request
+  */
+-static void nfs4_xdr_enc_clone(struct rpc_rqst *req,
++static void nfs4_xdr_enc_clone(void *_req,
+                              struct xdr_stream *xdr,
+-                             struct nfs42_clone_args *args)
++                             void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs42_clone_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -430,10 +442,12 @@ static int decode_clone(struct xdr_stream *xdr)
+ /*
+  * Decode ALLOCATE request
+  */
+-static int nfs4_xdr_dec_allocate(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_allocate(void *_rqstp,
+                                struct xdr_stream *xdr,
+-                               struct nfs42_falloc_res *res)
++                               void *_res)
+ {
++      struct rpc_rqst *rqstp = _rqstp;
++      struct nfs42_falloc_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -457,10 +471,12 @@ out:
+ /*
+  * Decode COPY response
+  */
+-static int nfs4_xdr_dec_copy(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_copy(void *_rqstp,
+                            struct xdr_stream *xdr,
+-                           struct nfs42_copy_res *res)
++                           void *_res)
+ {
++      struct rpc_rqst *rqstp = _rqstp;
++      struct nfs42_copy_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -487,10 +503,12 @@ out:
+ /*
+  * Decode DEALLOCATE request
+  */
+-static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_deallocate(void *_rqstp,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs42_falloc_res *res)
++                                 void *_res)
+ {
++      struct rpc_rqst *rqstp = _rqstp;
++      struct nfs42_falloc_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -514,10 +532,12 @@ out:
+ /*
+  * Decode SEEK request
+  */
+-static int nfs4_xdr_dec_seek(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_seek(void *_rqstp,
+                            struct xdr_stream *xdr,
+-                           struct nfs42_seek_res *res)
++                           void *_res)
+ {
++      struct rpc_rqst *rqstp = _rqstp;
++      struct nfs42_seek_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -538,10 +558,12 @@ out:
+ /*
+  * Decode LAYOUTSTATS request
+  */
+-static int nfs4_xdr_dec_layoutstats(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_layoutstats(void *_rqstp,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs42_layoutstat_res *res)
++                                  void *_res)
+ {
++      struct rpc_rqst *rqstp = _rqstp;
++      struct nfs42_layoutstat_res *res = _res;
+       struct compound_hdr hdr;
+       int status, i;
+@@ -568,10 +590,12 @@ out:
+ /*
+  * Decode CLONE request
+  */
+-static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_clone(void *_rqstp,
+                             struct xdr_stream *xdr,
+-                            struct nfs42_clone_res *res)
++                            void *_res)
+ {
++      struct rpc_rqst *rqstp = _rqstp;
++      struct nfs42_clone_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 7bd3a5c0..0c408e8 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2081,9 +2081,10 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
+ /*
+  * Encode an ACCESS request
+  */
+-static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              const struct nfs4_accessargs *args)
++static void nfs4_xdr_enc_access(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      const struct nfs4_accessargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2099,9 +2100,10 @@ static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode LOOKUP request
+  */
+-static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              const struct nfs4_lookup_arg *args)
++static void nfs4_xdr_enc_lookup(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      const struct nfs4_lookup_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2118,10 +2120,11 @@ static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode LOOKUP_ROOT request
+  */
+-static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
++static void nfs4_xdr_enc_lookup_root(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs4_lookup_root_arg *args)
++                                   void *_args)
+ {
++      const struct nfs4_lookup_root_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2137,9 +2140,10 @@ static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
+ /*
+  * Encode REMOVE request
+  */
+-static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              const struct nfs_removeargs *args)
++static void nfs4_xdr_enc_remove(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      const struct nfs_removeargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2154,9 +2158,10 @@ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode RENAME request
+  */
+-static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              const struct nfs_renameargs *args)
++static void nfs4_xdr_enc_rename(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      const struct nfs_renameargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2173,9 +2178,10 @@ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode LINK request
+  */
+-static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                           const struct nfs4_link_arg *args)
++static void nfs4_xdr_enc_link(void *req, struct xdr_stream *xdr,
++                           void *_args)
+ {
++      const struct nfs4_link_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2194,9 +2200,10 @@ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode CREATE request
+  */
+-static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              const struct nfs4_create_arg *args)
++static void nfs4_xdr_enc_create(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      const struct nfs4_create_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2213,8 +2220,8 @@ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode SYMLINK request
+  */
+-static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               const struct nfs4_create_arg *args)
++static void nfs4_xdr_enc_symlink(void *req, struct xdr_stream *xdr,
++                               void *args)
+ {
+       nfs4_xdr_enc_create(req, xdr, args);
+ }
+@@ -2222,9 +2229,10 @@ static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode GETATTR request
+  */
+-static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               const struct nfs4_getattr_arg *args)
++static void nfs4_xdr_enc_getattr(void *req, struct xdr_stream *xdr,
++                               void *_args)
+ {
++      const struct nfs4_getattr_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2239,9 +2247,10 @@ static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode a CLOSE request
+  */
+-static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                             struct nfs_closeargs *args)
++static void nfs4_xdr_enc_close(void *req, struct xdr_stream *xdr,
++                             void *_args)
+ {
++      struct nfs_closeargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2257,9 +2266,10 @@ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode an OPEN request
+  */
+-static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                            struct nfs_openargs *args)
++static void nfs4_xdr_enc_open(void *req, struct xdr_stream *xdr,
++                            void *_args)
+ {
++      struct nfs_openargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2278,10 +2288,11 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode an OPEN_CONFIRM request
+  */
+-static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
++static void nfs4_xdr_enc_open_confirm(void *req,
+                                     struct xdr_stream *xdr,
+-                                    struct nfs_open_confirmargs *args)
++                                    void *_args)
+ {
++      struct nfs_open_confirmargs *args = _args;
+       struct compound_hdr hdr = {
+               .nops   = 0,
+       };
+@@ -2295,10 +2306,11 @@ static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
+ /*
+  * Encode an OPEN request with no attributes.
+  */
+-static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
++static void nfs4_xdr_enc_open_noattr(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs_openargs *args)
++                                   void *_args)
+ {
++      struct nfs_openargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2316,10 +2328,11 @@ static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
+ /*
+  * Encode an OPEN_DOWNGRADE request
+  */
+-static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
++static void nfs4_xdr_enc_open_downgrade(void *req,
+                                       struct xdr_stream *xdr,
+-                                      struct nfs_closeargs *args)
++                                      void *_args)
+ {
++      struct nfs_closeargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2335,9 +2348,10 @@ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
+ /*
+  * Encode a LOCK request
+  */
+-static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                            struct nfs_lock_args *args)
++static void nfs4_xdr_enc_lock(void *req, struct xdr_stream *xdr,
++                            void *_args)
+ {
++      struct nfs_lock_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2352,9 +2366,10 @@ static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode a LOCKT request
+  */
+-static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                             struct nfs_lockt_args *args)
++static void nfs4_xdr_enc_lockt(void *req, struct xdr_stream *xdr,
++                             void *_args)
+ {
++      struct nfs_lockt_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2369,9 +2384,10 @@ static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode a LOCKU request
+  */
+-static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                             struct nfs_locku_args *args)
++static void nfs4_xdr_enc_locku(void *req, struct xdr_stream *xdr,
++                             void *_args)
+ {
++      struct nfs_locku_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2383,10 +2399,11 @@ static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
+       encode_nops(&hdr);
+ }
+-static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
++static void nfs4_xdr_enc_release_lockowner(void *req,
+                                          struct xdr_stream *xdr,
+-                                      struct nfs_release_lockowner_args *args)
++                                      void *_args)
+ {
++      struct nfs_release_lockowner_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = 0,
+       };
+@@ -2399,9 +2416,11 @@ static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
+ /*
+  * Encode a READLINK request
+  */
+-static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                                const struct nfs4_readlink *args)
++static void nfs4_xdr_enc_readlink(void *_req, struct xdr_stream *xdr,
++                                void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      const struct nfs4_readlink *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2419,9 +2438,11 @@ static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode a READDIR request
+  */
+-static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               const struct nfs4_readdir_arg *args)
++static void nfs4_xdr_enc_readdir(void *_req, struct xdr_stream *xdr,
++                               void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      const struct nfs4_readdir_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2442,9 +2463,11 @@ static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode a READ request
+  */
+-static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                            struct nfs_pgio_args *args)
++static void nfs4_xdr_enc_read(void *_req, struct xdr_stream *xdr,
++                            void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs_pgio_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2463,9 +2486,11 @@ static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode an SETATTR request
+  */
+-static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               struct nfs_setattrargs *args)
++static void nfs4_xdr_enc_setattr(void *_req, struct xdr_stream *xdr,
++                               void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs_setattrargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2481,9 +2506,11 @@ static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode a GETACL request
+  */
+-static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              struct nfs_getaclargs *args)
++static void nfs4_xdr_enc_getacl(void *_req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs_getaclargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2504,9 +2531,11 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Encode a WRITE request
+  */
+-static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                             struct nfs_pgio_args *args)
++static void nfs4_xdr_enc_write(void *_req, struct xdr_stream *xdr,
++                             void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs_pgio_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2524,9 +2553,10 @@ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  *  a COMMIT request
+  */
+-static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              struct nfs_commitargs *args)
++static void nfs4_xdr_enc_commit(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      struct nfs_commitargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2541,9 +2571,10 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * FSINFO request
+  */
+-static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              struct nfs4_fsinfo_arg *args)
++static void nfs4_xdr_enc_fsinfo(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      struct nfs4_fsinfo_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2558,9 +2589,10 @@ static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * a PATHCONF request
+  */
+-static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                                const struct nfs4_pathconf_arg *args)
++static void nfs4_xdr_enc_pathconf(void *req, struct xdr_stream *xdr,
++                                void *_args)
+ {
++      const struct nfs4_pathconf_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2576,9 +2608,10 @@ static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * a STATFS request
+  */
+-static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              const struct nfs4_statfs_arg *args)
++static void nfs4_xdr_enc_statfs(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      const struct nfs4_statfs_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2594,10 +2627,11 @@ static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * GETATTR_BITMAP request
+  */
+-static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
++static void nfs4_xdr_enc_server_caps(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs4_server_caps_arg *args)
++                                   void *_args)
+ {
++      struct nfs4_server_caps_arg *args = _args;
+       const u32 *bitmask = args->bitmask;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+@@ -2613,9 +2647,10 @@ static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
+ /*
+  * a RENEW request
+  */
+-static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                             struct nfs_client *clp)
++static void nfs4_xdr_enc_renew(void *req, struct xdr_stream *xdr,
++                             void *_clp)
+ {
++      struct nfs_client *clp = _clp;
+       struct compound_hdr hdr = {
+               .nops   = 0,
+       };
+@@ -2628,9 +2663,9 @@ static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * a SETCLIENTID request
+  */
+-static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
++static void nfs4_xdr_enc_setclientid(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs4_setclientid *sc)
++                                   void *sc)
+ {
+       struct compound_hdr hdr = {
+               .nops   = 0,
+@@ -2644,9 +2679,9 @@ static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
+ /*
+  * a SETCLIENTID_CONFIRM request
+  */
+-static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
++static void nfs4_xdr_enc_setclientid_confirm(void *req,
+                                            struct xdr_stream *xdr,
+-                                           struct nfs4_setclientid_res *arg)
++                                           void *arg)
+ {
+       struct compound_hdr hdr = {
+               .nops   = 0,
+@@ -2660,10 +2695,11 @@ static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
+ /*
+  * DELEGRETURN request
+  */
+-static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
++static void nfs4_xdr_enc_delegreturn(void *req,
+                                    struct xdr_stream *xdr,
+-                                   const struct nfs4_delegreturnargs *args)
++                                   void *_args)
+ {
++      const struct nfs4_delegreturnargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2679,10 +2715,12 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
+ /*
+  * Encode FS_LOCATIONS request
+  */
+-static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
++static void nfs4_xdr_enc_fs_locations(void *_req,
+                                     struct xdr_stream *xdr,
+-                                    struct nfs4_fs_locations_arg *args)
++                                    void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs4_fs_locations_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2712,10 +2750,11 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
+ /*
+  * Encode SECINFO request
+  */
+-static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
++static void nfs4_xdr_enc_secinfo(void *req,
+                               struct xdr_stream *xdr,
+-                              struct nfs4_secinfo_arg *args)
++                              void *_args)
+ {
++      struct nfs4_secinfo_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2730,10 +2769,11 @@ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
+ /*
+  * Encode FSID_PRESENT request
+  */
+-static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
++static void nfs4_xdr_enc_fsid_present(void *req,
+                                     struct xdr_stream *xdr,
+-                                    struct nfs4_fsid_present_arg *args)
++                                    void *_args)
+ {
++      struct nfs4_fsid_present_arg *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2751,10 +2791,11 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
+ /*
+  * BIND_CONN_TO_SESSION request
+  */
+-static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
++static void nfs4_xdr_enc_bind_conn_to_session(void *req,
+                               struct xdr_stream *xdr,
+-                              struct nfs41_bind_conn_to_session_args *args)
++                              void *_args)
+ {
++      struct nfs41_bind_conn_to_session_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = args->client->cl_mvops->minor_version,
+       };
+@@ -2767,10 +2808,11 @@ static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
+ /*
+  * EXCHANGE_ID request
+  */
+-static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
++static void nfs4_xdr_enc_exchange_id(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs41_exchange_id_args *args)
++                                   void *_args)
+ {
++      struct nfs41_exchange_id_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = args->client->cl_mvops->minor_version,
+       };
+@@ -2783,10 +2825,11 @@ static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
+ /*
+  * a CREATE_SESSION request
+  */
+-static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
++static void nfs4_xdr_enc_create_session(void *req,
+                                       struct xdr_stream *xdr,
+-                                      struct nfs41_create_session_args *args)
++                                      void *_args)
+ {
++      struct nfs41_create_session_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = args->client->cl_mvops->minor_version,
+       };
+@@ -2799,10 +2842,11 @@ static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
+ /*
+  * a DESTROY_SESSION request
+  */
+-static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
++static void nfs4_xdr_enc_destroy_session(void *req,
+                                        struct xdr_stream *xdr,
+-                                       struct nfs4_session *session)
++                                       void *_session)
+ {
++      struct nfs4_session *session = _session;
+       struct compound_hdr hdr = {
+               .minorversion = session->clp->cl_mvops->minor_version,
+       };
+@@ -2815,10 +2859,11 @@ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
+ /*
+  * a DESTROY_CLIENTID request
+  */
+-static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
++static void nfs4_xdr_enc_destroy_clientid(void *req,
+                                        struct xdr_stream *xdr,
+-                                       struct nfs_client *clp)
++                                       void *_clp)
+ {
++      struct nfs_client *clp = _clp;
+       struct compound_hdr hdr = {
+               .minorversion = clp->cl_mvops->minor_version,
+       };
+@@ -2831,8 +2876,8 @@ static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
+ /*
+  * a SEQUENCE request
+  */
+-static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                                struct nfs4_sequence_args *args)
++static void nfs4_xdr_enc_sequence(void *req, struct xdr_stream *xdr,
++                                void *args)
+ {
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(args),
+@@ -2846,10 +2891,11 @@ static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * a GET_LEASE_TIME request
+  */
+-static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
++static void nfs4_xdr_enc_get_lease_time(void *req,
+                                       struct xdr_stream *xdr,
+-                                      struct nfs4_get_lease_time_args *args)
++                                      void *_args)
+ {
++      struct nfs4_get_lease_time_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
+       };
+@@ -2865,10 +2911,11 @@ static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
+ /*
+  * a RECLAIM_COMPLETE request
+  */
+-static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
++static void nfs4_xdr_enc_reclaim_complete(void *req,
+                                         struct xdr_stream *xdr,
+-                              struct nfs41_reclaim_complete_args *args)
++                                        void *_args)
+ {
++      struct nfs41_reclaim_complete_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args)
+       };
+@@ -2882,10 +2929,12 @@ static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
+ /*
+  * Encode GETDEVICEINFO request
+  */
+-static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
++static void nfs4_xdr_enc_getdeviceinfo(void *_req,
+                                      struct xdr_stream *xdr,
+-                                     struct nfs4_getdeviceinfo_args *args)
++                                     void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs4_getdeviceinfo_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2906,10 +2955,12 @@ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
+ /*
+  *  Encode LAYOUTGET request
+  */
+-static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
++static void nfs4_xdr_enc_layoutget(void *_req,
+                                  struct xdr_stream *xdr,
+-                                 struct nfs4_layoutget_args *args)
++                                 void *_args)
+ {
++      struct rpc_rqst *req = _req;
++      struct nfs4_layoutget_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2928,10 +2979,11 @@ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
+ /*
+  *  Encode LAYOUTCOMMIT request
+  */
+-static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
++static void nfs4_xdr_enc_layoutcommit(void *req,
+                                     struct xdr_stream *xdr,
+-                                    struct nfs4_layoutcommit_args *args)
++                                    void *_args)
+ {
++      struct nfs4_layoutcommit_args *args = _args;
+       struct nfs4_layoutcommit_data *data =
+               container_of(args, struct nfs4_layoutcommit_data, args);
+       struct compound_hdr hdr = {
+@@ -2949,10 +3001,11 @@ static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
+ /*
+  * Encode LAYOUTRETURN request
+  */
+-static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
++static void nfs4_xdr_enc_layoutreturn(void *req,
+                                     struct xdr_stream *xdr,
+-                                    struct nfs4_layoutreturn_args *args)
++                                    void *_args)
+ {
++      struct nfs4_layoutreturn_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2967,10 +3020,11 @@ static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
+ /*
+  * Encode SECINFO_NO_NAME request
+  */
+-static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
++static void nfs4_xdr_enc_secinfo_no_name(void *req,
+                                       struct xdr_stream *xdr,
+-                                      struct nfs41_secinfo_no_name_args *args)
++                                      void *_args)
+ {
++      struct nfs41_secinfo_no_name_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -2980,16 +3034,16 @@ static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
+       encode_putrootfh(xdr, &hdr);
+       encode_secinfo_no_name(xdr, args, &hdr);
+       encode_nops(&hdr);
+-      return 0;
+ }
+ /*
+  *  Encode TEST_STATEID request
+  */
+-static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
++static void nfs4_xdr_enc_test_stateid(void *req,
+                                     struct xdr_stream *xdr,
+-                                    struct nfs41_test_stateid_args *args)
++                                    void *_args)
+ {
++      struct nfs41_test_stateid_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -3003,10 +3057,11 @@ static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
+ /*
+  *  Encode FREE_STATEID request
+  */
+-static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
++static void nfs4_xdr_enc_free_stateid(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs41_free_stateid_args *args)
++                                   void *_args)
+ {
++      struct nfs41_free_stateid_args *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -6096,10 +6151,11 @@ static int decode_free_stateid(struct xdr_stream *xdr,
+ /*
+  * Decode OPEN_DOWNGRADE response
+  */
+-static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_open_downgrade(void *rqstp,
+                                      struct xdr_stream *xdr,
+-                                     struct nfs_closeres *res)
++                                     void *_res)
+ {
++      struct nfs_closeres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6123,9 +6179,10 @@ out:
+ /*
+  * Decode ACCESS response
+  */
+-static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                             struct nfs4_accessres *res)
++static int nfs4_xdr_dec_access(void *rqstp, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs4_accessres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6149,9 +6206,10 @@ out:
+ /*
+  * Decode LOOKUP response
+  */
+-static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                             struct nfs4_lookup_res *res)
++static int nfs4_xdr_dec_lookup(void *rqstp, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs4_lookup_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6178,10 +6236,11 @@ out:
+ /*
+  * Decode LOOKUP_ROOT response
+  */
+-static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_lookup_root(void *rqstp,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs4_lookup_res *res)
++                                  void *_res)
+ {
++      struct nfs4_lookup_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6205,9 +6264,10 @@ out:
+ /*
+  * Decode REMOVE response
+  */
+-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                             struct nfs_removeres *res)
++static int nfs4_xdr_dec_remove(void *rqstp, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs_removeres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6228,9 +6288,10 @@ out:
+ /*
+  * Decode RENAME response
+  */
+-static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                             struct nfs_renameres *res)
++static int nfs4_xdr_dec_rename(void *rqstp, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs_renameres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6257,9 +6318,10 @@ out:
+ /*
+  * Decode LINK response
+  */
+-static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                           struct nfs4_link_res *res)
++static int nfs4_xdr_dec_link(void *rqstp, struct xdr_stream *xdr,
++                           void *_res)
+ {
++      struct nfs4_link_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6296,9 +6358,10 @@ out:
+ /*
+  * Decode CREATE response
+  */
+-static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                             struct nfs4_create_res *res)
++static int nfs4_xdr_dec_create(void *rqstp, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs4_create_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6325,8 +6388,8 @@ out:
+ /*
+  * Decode SYMLINK response
+  */
+-static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                              struct nfs4_create_res *res)
++static int nfs4_xdr_dec_symlink(void *rqstp, struct xdr_stream *xdr,
++                              void *res)
+ {
+       return nfs4_xdr_dec_create(rqstp, xdr, res);
+ }
+@@ -6334,9 +6397,10 @@ static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ /*
+  * Decode GETATTR response
+  */
+-static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                              struct nfs4_getattr_res *res)
++static int nfs4_xdr_dec_getattr(void *rqstp, struct xdr_stream *xdr,
++                              void *_res)
+ {
++      struct nfs4_getattr_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6357,9 +6421,10 @@ out:
+ /*
+  * Encode an SETACL request
+  */
+-static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              struct nfs_setaclargs *args)
++static void nfs4_xdr_enc_setacl(void *req, struct xdr_stream *xdr,
++                              void *_args)
+ {
++      struct nfs_setaclargs *args = _args;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+@@ -6375,9 +6440,10 @@ static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+  * Decode SETACL response
+  */
+ static int
+-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                  struct nfs_setaclres *res)
++nfs4_xdr_dec_setacl(void *rqstp, struct xdr_stream *xdr,
++                  void *_res)
+ {
++      struct nfs_setaclres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6399,9 +6465,10 @@ out:
+  * Decode GETACL response
+  */
+ static int
+-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                  struct nfs_getaclres *res)
++nfs4_xdr_dec_getacl(void *rqstp, struct xdr_stream *xdr,
++                  void *_res)
+ {
++      struct nfs_getaclres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6427,9 +6494,10 @@ out:
+ /*
+  * Decode CLOSE response
+  */
+-static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                            struct nfs_closeres *res)
++static int nfs4_xdr_dec_close(void *rqstp, struct xdr_stream *xdr,
++                            void *_res)
+ {
++      struct nfs_closeres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6459,9 +6527,10 @@ out:
+ /*
+  * Decode OPEN response
+  */
+-static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                           struct nfs_openres *res)
++static int nfs4_xdr_dec_open(void *rqstp, struct xdr_stream *xdr,
++                           void *_res)
+ {
++      struct nfs_openres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6490,9 +6559,9 @@ out:
+ /*
+  * Decode OPEN_CONFIRM response
+  */
+-static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_open_confirm(void *rqstp,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs_open_confirmres *res)
++                                   void *res)
+ {
+       struct compound_hdr hdr;
+       int status;
+@@ -6511,10 +6580,11 @@ out:
+ /*
+  * Decode OPEN response
+  */
+-static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_open_noattr(void *rqstp,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs_openres *res)
++                                  void *_res)
+ {
++      struct nfs_openres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6540,10 +6610,11 @@ out:
+ /*
+  * Decode SETATTR response
+  */
+-static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_setattr(void *rqstp,
+                               struct xdr_stream *xdr,
+-                              struct nfs_setattrres *res)
++                              void *_res)
+ {
++      struct nfs_setattrres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6567,9 +6638,10 @@ out:
+ /*
+  * Decode LOCK response
+  */
+-static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                           struct nfs_lock_res *res)
++static int nfs4_xdr_dec_lock(void *rqstp, struct xdr_stream *xdr,
++                           void *_res)
+ {
++      struct nfs_lock_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6590,9 +6662,10 @@ out:
+ /*
+  * Decode LOCKT response
+  */
+-static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                            struct nfs_lockt_res *res)
++static int nfs4_xdr_dec_lockt(void *rqstp, struct xdr_stream *xdr,
++                            void *_res)
+ {
++      struct nfs_lockt_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6613,9 +6686,10 @@ out:
+ /*
+  * Decode LOCKU response
+  */
+-static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                            struct nfs_locku_res *res)
++static int nfs4_xdr_dec_locku(void *rqstp, struct xdr_stream *xdr,
++                            void *_res)
+ {
++      struct nfs_locku_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6633,7 +6707,7 @@ out:
+       return status;
+ }
+-static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_release_lockowner(void *rqstp,
+                                         struct xdr_stream *xdr, void *dummy)
+ {
+       struct compound_hdr hdr;
+@@ -6648,10 +6722,11 @@ static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
+ /*
+  * Decode READLINK response
+  */
+-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_readlink(void *rqstp,
+                                struct xdr_stream *xdr,
+-                               struct nfs4_readlink_res *res)
++                               void *_res)
+ {
++      struct nfs4_readlink_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6672,9 +6747,10 @@ out:
+ /*
+  * Decode READDIR response
+  */
+-static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                              struct nfs4_readdir_res *res)
++static int nfs4_xdr_dec_readdir(void *rqstp, struct xdr_stream *xdr,
++                              void *_res)
+ {
++      struct nfs4_readdir_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6695,9 +6771,10 @@ out:
+ /*
+  * Decode Read response
+  */
+-static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                           struct nfs_pgio_res *res)
++static int nfs4_xdr_dec_read(void *rqstp, struct xdr_stream *xdr,
++                           void *_res)
+ {
++      struct nfs_pgio_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6721,9 +6798,10 @@ out:
+ /*
+  * Decode WRITE response
+  */
+-static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                            struct nfs_pgio_res *res)
++static int nfs4_xdr_dec_write(void *rqstp, struct xdr_stream *xdr,
++                            void *_res)
+ {
++      struct nfs_pgio_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6751,9 +6829,10 @@ out:
+ /*
+  * Decode COMMIT response
+  */
+-static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+-                             struct nfs_commitres *res)
++static int nfs4_xdr_dec_commit(void *rqstp, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs_commitres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6775,9 +6854,10 @@ out:
+ /*
+  * Decode FSINFO response
+  */
+-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                             struct nfs4_fsinfo_res *res)
++static int nfs4_xdr_dec_fsinfo(void *req, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs4_fsinfo_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6794,9 +6874,10 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Decode PATHCONF response
+  */
+-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               struct nfs4_pathconf_res *res)
++static int nfs4_xdr_dec_pathconf(void *req, struct xdr_stream *xdr,
++                               void *_res)
+ {
++      struct nfs4_pathconf_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6813,9 +6894,10 @@ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Decode STATFS response
+  */
+-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                             struct nfs4_statfs_res *res)
++static int nfs4_xdr_dec_statfs(void *req, struct xdr_stream *xdr,
++                             void *_res)
+ {
++      struct nfs4_statfs_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6832,10 +6914,11 @@ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * Decode GETATTR_BITMAP response
+  */
+-static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
++static int nfs4_xdr_dec_server_caps(void *req,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs4_server_caps_res *res)
++                                  void *_res)
+ {
++      struct nfs4_server_caps_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6856,7 +6939,7 @@ out:
+ /*
+  * Decode RENEW response
+  */
+-static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
++static int nfs4_xdr_dec_renew(void *rqstp, struct xdr_stream *xdr,
+                             void *__unused)
+ {
+       struct compound_hdr hdr;
+@@ -6871,9 +6954,9 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ /*
+  * Decode SETCLIENTID response
+  */
+-static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
++static int nfs4_xdr_dec_setclientid(void *req,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs4_setclientid_res *res)
++                                  void *res)
+ {
+       struct compound_hdr hdr;
+       int status;
+@@ -6887,8 +6970,9 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
+ /*
+  * Decode SETCLIENTID_CONFIRM response
+  */
+-static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
+-                                          struct xdr_stream *xdr)
++static int nfs4_xdr_dec_setclientid_confirm(void *req,
++                                          struct xdr_stream *xdr,
++                                          void *res)
+ {
+       struct compound_hdr hdr;
+       int status;
+@@ -6902,10 +6986,11 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
+ /*
+  * Decode DELEGRETURN response
+  */
+-static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_delegreturn(void *rqstp,
+                                   struct xdr_stream *xdr,
+-                                  struct nfs4_delegreturnres *res)
++                                  void *_res)
+ {
++      struct nfs4_delegreturnres *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6929,10 +7014,11 @@ out:
+ /*
+  * Decode FS_LOCATIONS response
+  */
+-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
++static int nfs4_xdr_dec_fs_locations(void *req,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs4_fs_locations_res *res)
++                                   void *_res)
+ {
++      struct nfs4_fs_locations_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6972,10 +7058,11 @@ out:
+ /*
+  * Decode SECINFO response
+  */
+-static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_secinfo(void *rqstp,
+                               struct xdr_stream *xdr,
+-                              struct nfs4_secinfo_res *res)
++                              void *_res)
+ {
++      struct nfs4_secinfo_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -6996,10 +7083,11 @@ out:
+ /*
+  * Decode FSID_PRESENT response
+  */
+-static int nfs4_xdr_dec_fsid_present(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_fsid_present(void *rqstp,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs4_fsid_present_res *res)
++                                   void *_res)
+ {
++      struct nfs4_fsid_present_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7025,7 +7113,7 @@ out:
+ /*
+  * Decode BIND_CONN_TO_SESSION response
+  */
+-static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_bind_conn_to_session(void *rqstp,
+                                       struct xdr_stream *xdr,
+                                       void *res)
+ {
+@@ -7041,7 +7129,7 @@ static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp,
+ /*
+  * Decode EXCHANGE_ID response
+  */
+-static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_exchange_id(void *rqstp,
+                                   struct xdr_stream *xdr,
+                                   void *res)
+ {
+@@ -7057,9 +7145,9 @@ static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
+ /*
+  * Decode CREATE_SESSION response
+  */
+-static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_create_session(void *rqstp,
+                                      struct xdr_stream *xdr,
+-                                     struct nfs41_create_session_res *res)
++                                     void *res)
+ {
+       struct compound_hdr hdr;
+       int status;
+@@ -7073,7 +7161,7 @@ static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
+ /*
+  * Decode DESTROY_SESSION response
+  */
+-static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_destroy_session(void *rqstp,
+                                       struct xdr_stream *xdr,
+                                       void *res)
+ {
+@@ -7089,7 +7177,7 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
+ /*
+  * Decode DESTROY_CLIENTID response
+  */
+-static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_destroy_clientid(void *rqstp,
+                                       struct xdr_stream *xdr,
+                                       void *res)
+ {
+@@ -7105,9 +7193,9 @@ static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
+ /*
+  * Decode SEQUENCE response
+  */
+-static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_sequence(void *rqstp,
+                                struct xdr_stream *xdr,
+-                               struct nfs4_sequence_res *res)
++                               void *res)
+ {
+       struct compound_hdr hdr;
+       int status;
+@@ -7121,10 +7209,11 @@ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
+ /*
+  * Decode GET_LEASE_TIME response
+  */
+-static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_get_lease_time(void *rqstp,
+                                      struct xdr_stream *xdr,
+-                                     struct nfs4_get_lease_time_res *res)
++                                     void *_res)
+ {
++      struct nfs4_get_lease_time_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7141,10 +7230,11 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
+ /*
+  * Decode RECLAIM_COMPLETE response
+  */
+-static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_reclaim_complete(void *rqstp,
+                                        struct xdr_stream *xdr,
+-                                       struct nfs41_reclaim_complete_res *res)
++                                       void *_res)
+ {
++      struct nfs41_reclaim_complete_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7159,10 +7249,11 @@ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
+ /*
+  * Decode GETDEVINFO response
+  */
+-static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_getdeviceinfo(void *rqstp,
+                                     struct xdr_stream *xdr,
+-                                    struct nfs4_getdeviceinfo_res *res)
++                                    void *_res)
+ {
++      struct nfs4_getdeviceinfo_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7180,10 +7271,11 @@ out:
+ /*
+  * Decode LAYOUTGET response
+  */
+-static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_layoutget(void *rqstp,
+                                 struct xdr_stream *xdr,
+-                                struct nfs4_layoutget_res *res)
++                                void *_res)
+ {
++      struct nfs4_layoutget_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7204,10 +7296,11 @@ out:
+ /*
+  * Decode LAYOUTRETURN response
+  */
+-static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_layoutreturn(void *rqstp,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs4_layoutreturn_res *res)
++                                   void *_res)
+ {
++      struct nfs4_layoutreturn_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7228,10 +7321,11 @@ out:
+ /*
+  * Decode LAYOUTCOMMIT response
+  */
+-static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_layoutcommit(void *rqstp,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs4_layoutcommit_res *res)
++                                   void *_res)
+ {
++      struct nfs4_layoutcommit_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7255,10 +7349,11 @@ out:
+ /*
+  * Decode SECINFO_NO_NAME response
+  */
+-static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_secinfo_no_name(void *rqstp,
+                                       struct xdr_stream *xdr,
+-                                      struct nfs4_secinfo_res *res)
++                                      void *_res)
+ {
++      struct nfs4_secinfo_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7279,10 +7374,11 @@ out:
+ /*
+  * Decode TEST_STATEID response
+  */
+-static int nfs4_xdr_dec_test_stateid(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_test_stateid(void *rqstp,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs41_test_stateid_res *res)
++                                   void *_res)
+ {
++      struct nfs41_test_stateid_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7300,10 +7396,11 @@ out:
+ /*
+  * Decode FREE_STATEID response
+  */
+-static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_free_stateid(void *rqstp,
+                                    struct xdr_stream *xdr,
+-                                   struct nfs41_free_stateid_res *res)
++                                   void *_res)
+ {
++      struct nfs41_free_stateid_res *res = _res;
+       struct compound_hdr hdr;
+       int status;
+@@ -7468,8 +7565,8 @@ nfs4_stat_to_errno(int stat)
+ #define PROC(proc, argtype, restype)                          \
+ [NFSPROC4_CLNT_##proc] = {                                    \
+       .p_proc   = NFSPROC4_COMPOUND,                          \
+-      .p_encode = (kxdreproc_t)nfs4_xdr_##argtype,            \
+-      .p_decode = (kxdrdproc_t)nfs4_xdr_##restype,            \
++      .p_encode = nfs4_xdr_##argtype,                         \
++      .p_decode = nfs4_xdr_##restype,                         \
+       .p_arglen = NFS4_##argtype##_sz,                        \
+       .p_replen = NFS4_##restype##_sz,                        \
+       .p_statidx = NFSPROC4_CLNT_##proc,                      \
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 572e5b3..5245a0a 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -346,7 +346,7 @@ struct nfs_readdesc {
+ };
+ static int
+-readpage_async_filler(void *data, struct page *page)
++readpage_async_filler(struct file *data, struct page *page)
+ {
+       struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
+       struct nfs_page *new;
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 4fe3eea..8922b2b 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -25,9 +25,10 @@
+  * and straight-forward than readdir caching.
+  */
+-static int nfs_symlink_filler(struct inode *inode, struct page *page)
++static int nfs_symlink_filler(struct file *_inode, struct page *page)
+ {
+       int error;
++      struct inode *inode = (struct inode *)_inode;
+       error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
+       if (error < 0)
+@@ -64,8 +65,7 @@ static const char *nfs_get_link(struct dentry *dentry,
+               err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
+               if (err)
+                       return err;
+-              page = read_cache_page(&inode->i_data, 0,
+-                                      (filler_t *)nfs_symlink_filler, inode);
++              page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler, inode);
+               if (IS_ERR(page))
+                       return ERR_CAST(page);
+       }
+diff --git a/fs/nfsd/current_stateid.h b/fs/nfsd/current_stateid.h
+index 4123551..813b403 100644
+--- a/fs/nfsd/current_stateid.h
++++ b/fs/nfsd/current_stateid.h
+@@ -8,21 +8,21 @@ extern void clear_current_stateid(struct nfsd4_compound_state *cstate);
+ /*
+  * functions to set current state id
+  */
+-extern void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
+-extern void nfsd4_set_openstateid(struct nfsd4_compound_state *, struct nfsd4_open *);
+-extern void nfsd4_set_lockstateid(struct nfsd4_compound_state *, struct nfsd4_lock *);
+-extern void nfsd4_set_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
++extern void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, void *);
++extern void nfsd4_set_openstateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_set_lockstateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_set_closestateid(struct nfsd4_compound_state *, void *);
+ /*
+  * functions to consume current state id
+  */
+-extern void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
+-extern void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *, struct nfsd4_delegreturn *);
+-extern void nfsd4_get_freestateid(struct nfsd4_compound_state *, struct nfsd4_free_stateid *);
+-extern void nfsd4_get_setattrstateid(struct nfsd4_compound_state *, struct nfsd4_setattr *);
+-extern void nfsd4_get_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
+-extern void nfsd4_get_lockustateid(struct nfsd4_compound_state *, struct nfsd4_locku *);
+-extern void nfsd4_get_readstateid(struct nfsd4_compound_state *, struct nfsd4_read *);
+-extern void nfsd4_get_writestateid(struct nfsd4_compound_state *, struct nfsd4_write *);
++extern void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, void *);
++extern void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_get_freestateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_get_setattrstateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_get_closestateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_get_lockustateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_get_readstateid(struct nfsd4_compound_state *, void *);
++extern void nfsd4_get_writestateid(struct nfsd4_compound_state *, void *);
+ #endif   /* _NFSD4_CURRENT_STATE_H */
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
+index d08cd88..5d74e50 100644
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -27,9 +27,10 @@ nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+ /*
+  * Get the Access and/or Default ACL of a file.
+  */
+-static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp,
+-              struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp)
++static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_getaclargs *argp = _argp;
++      struct nfsd3_getaclres *resp = _resp;
+       struct posix_acl *acl;
+       struct inode *inode;
+       svc_fh *fh;
+@@ -87,10 +88,10 @@ fail:
+ /*
+  * Set the Access and/or Default ACL of a file.
+  */
+-static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
+-              struct nfsd3_setaclargs *argp,
+-              struct nfsd_attrstat *resp)
++static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_setaclargs *argp = _argp;
++      struct nfsd_attrstat *resp = _resp;
+       struct inode *inode;
+       svc_fh *fh;
+       __be32 nfserr = 0;
+@@ -141,9 +142,10 @@ out_errno:
+ /*
+  * Check file attributes
+  */
+-static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp,
+-              struct nfsd_fhandle *argp, struct nfsd_attrstat *resp)
++static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd_fhandle *argp = _argp;
++      struct nfsd_attrstat *resp = _resp;
+       __be32 nfserr;
+       dprintk("nfsd: GETATTR  %s\n", SVCFH_fmt(&argp->fh));
+@@ -158,9 +160,10 @@ static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp,
+ /*
+  * Check file access
+  */
+-static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
+-              struct nfsd3_accessres *resp)
++static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_accessargs *argp = _argp;
++      struct nfsd3_accessres *resp = _resp;
+       __be32 nfserr;
+       dprintk("nfsd: ACCESS(2acl)   %s 0x%x\n",
+@@ -179,9 +182,10 @@ static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessarg
+ /*
+  * XDR decode functions
+  */
+-static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_getaclargs *argp)
++static int nfsaclsvc_decode_getaclargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      struct nfsd3_getaclargs *argp = _argp;
++
+       p = nfs2svc_decode_fh(p, &argp->fh);
+       if (!p)
+               return 0;
+@@ -191,9 +195,10 @@ static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+-static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_setaclargs *argp)
++static int nfsaclsvc_decode_setaclargs(void *_rqstp, __be32 *p, void *_argp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_setaclargs *argp = _argp;
+       struct kvec *head = rqstp->rq_arg.head;
+       unsigned int base;
+       int n;
+@@ -217,18 +222,20 @@ static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
+       return (n > 0);
+ }
+-static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd_fhandle *argp)
++static int nfsaclsvc_decode_fhandleargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      struct nfsd_fhandle *argp = _argp;
++
+       p = nfs2svc_decode_fh(p, &argp->fh);
+       if (!p)
+               return 0;
+       return xdr_argsize_check(rqstp, p);
+ }
+-static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_accessargs *argp)
++static int nfsaclsvc_decode_accessargs(void *rqstp, __be32 *p, void *_argp)
+ {
++      struct nfsd3_accessargs *argp = _argp;
++
+       p = nfs2svc_decode_fh(p, &argp->fh);
+       if (!p)
+               return 0;
+@@ -245,15 +252,16 @@ static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
+  * There must be an encoding function for void results so svc_process
+  * will work properly.
+  */
+-static int nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++static int nfsaclsvc_encode_voidres(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_ressize_check(rqstp, p);
+ }
+ /* GETACL */
+-static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_getaclres *resp)
++static int nfsaclsvc_encode_getaclres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_getaclres *resp = _resp;
+       struct dentry *dentry = resp->fh.fh_dentry;
+       struct inode *inode;
+       struct kvec *head = rqstp->rq_res.head;
+@@ -296,17 +304,19 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
+       return (n > 0);
+ }
+-static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd_attrstat *resp)
++static int nfsaclsvc_encode_attrstatres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd_attrstat *resp = _resp;
++
+       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
+       return xdr_ressize_check(rqstp, p);
+ }
+ /* ACCESS */
+-static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_accessres *resp)
++static int nfsaclsvc_encode_accessres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_accessres *resp = _resp;
++
+       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
+       *p++ = htonl(resp->access);
+       return xdr_ressize_check(rqstp, p);
+@@ -315,27 +325,30 @@ static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
+ /*
+  * XDR release functions
+  */
+-static int nfsaclsvc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_getaclres *resp)
++static int nfsaclsvc_release_getacl(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_getaclres *resp = _resp;
++
+       fh_put(&resp->fh);
+       posix_acl_release(resp->acl_access);
+       posix_acl_release(resp->acl_default);
+       return 1;
+ }
+-static int nfsaclsvc_release_attrstat(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd_attrstat *resp)
++static int nfsaclsvc_release_attrstat(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd_attrstat *resp = _resp;
++
+       fh_put(&resp->fh);
+       return 1;
+ }
+-static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p,
+-               struct nfsd3_accessres *resp)
++static int nfsaclsvc_release_access(void *rqstp, __be32 *p, void *_resp)
+ {
+-       fh_put(&resp->fh);
+-       return 1;
++      struct nfsd3_accessres *resp = _resp;
++
++      fh_put(&resp->fh);
++      return 1;
+ }
+ #define nfsaclsvc_decode_voidargs     NULL
+@@ -346,10 +359,10 @@ static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p,
+ struct nfsd3_voidargs { int dummy; };
+ #define PROC(name, argt, rest, relt, cache, respsize) \
+- { (svc_procfunc) nfsacld_proc_##name,                \
+-   (kxdrproc_t) nfsaclsvc_decode_##argt##args,        \
+-   (kxdrproc_t) nfsaclsvc_encode_##rest##res, \
+-   (kxdrproc_t) nfsaclsvc_release_##relt,             \
++ { nfsacld_proc_##name,                               \
++   nfsaclsvc_decode_##argt##args,             \
++   nfsaclsvc_encode_##rest##res,              \
++   nfsaclsvc_release_##relt,                  \
+    sizeof(struct nfsd3_##argt##args),         \
+    sizeof(struct nfsd3_##rest##res),          \
+    0,                                         \
+diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
+index 0c89034..36a8d76 100644
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -26,9 +26,10 @@ nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+ /*
+  * Get the Access and/or Default ACL of a file.
+  */
+-static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp,
+-              struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp)
++static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_getaclargs *argp = _argp;
++      struct nfsd3_getaclres *resp = _resp;
+       struct posix_acl *acl;
+       struct inode *inode;
+       svc_fh *fh;
+@@ -80,10 +81,10 @@ fail:
+ /*
+  * Set the Access and/or Default ACL of a file.
+  */
+-static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
+-              struct nfsd3_setaclargs *argp,
+-              struct nfsd3_attrstat *resp)
++static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_setaclargs *argp = _argp;
++      struct nfsd3_attrstat *resp = _resp;
+       struct inode *inode;
+       svc_fh *fh;
+       __be32 nfserr = 0;
+@@ -123,9 +124,10 @@ out:
+ /*
+  * XDR decode functions
+  */
+-static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_getaclargs *args)
++static int nfs3svc_decode_getaclargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_getaclargs *args = _args;
++
+       p = nfs3svc_decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -135,9 +137,10 @@ static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+-static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_setaclargs *args)
++static int nfs3svc_decode_setaclargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_setaclargs *args = _args;
+       struct kvec *head = rqstp->rq_arg.head;
+       unsigned int base;
+       int n;
+@@ -166,9 +169,10 @@ static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
+  */
+ /* GETACL */
+-static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_getaclres *resp)
++static int nfs3svc_encode_getaclres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_getaclres *resp = _resp;
+       struct dentry *dentry = resp->fh.fh_dentry;
+       p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
+@@ -211,9 +215,10 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
+ }
+ /* SETACL */
+-static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_attrstat *resp)
++static int nfs3svc_encode_setaclres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_attrstat *resp = _resp;
++
+       p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
+       return xdr_ressize_check(rqstp, p);
+@@ -222,9 +227,10 @@ static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p,
+ /*
+  * XDR release functions
+  */
+-static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
+-              struct nfsd3_getaclres *resp)
++static int nfs3svc_release_getacl(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_getaclres *resp = _resp;
++
+       fh_put(&resp->fh);
+       posix_acl_release(resp->acl_access);
+       posix_acl_release(resp->acl_default);
+@@ -238,10 +244,10 @@ static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
+ struct nfsd3_voidargs { int dummy; };
+ #define PROC(name, argt, rest, relt, cache, respsize) \
+- { (svc_procfunc) nfsd3_proc_##name,          \
+-   (kxdrproc_t) nfs3svc_decode_##argt##args,  \
+-   (kxdrproc_t) nfs3svc_encode_##rest##res,   \
+-   (kxdrproc_t) nfs3svc_release_##relt,               \
++ { nfsd3_proc_##name,                         \
++   nfs3svc_decode_##argt##args,                       \
++   nfs3svc_encode_##rest##res,                        \
++   nfs3svc_release_##relt,                    \
+    sizeof(struct nfsd3_##argt##args),         \
+    sizeof(struct nfsd3_##rest##res),          \
+    0,                                         \
+diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
+index d818e4f..bb15590 100644
+--- a/fs/nfsd/nfs3proc.c
++++ b/fs/nfsd/nfs3proc.c
+@@ -40,9 +40,10 @@ nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
+  * Get a file's attributes
+  */
+ static __be32
+-nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
+-                                         struct nfsd3_attrstat *resp)
++nfsd3_proc_getattr(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd_fhandle *argp = _argp;
++      struct nfsd3_attrstat *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: GETATTR(3)  %s\n",
+@@ -63,9 +64,10 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
+  * Set a file's attributes
+  */
+ static __be32
+-nfsd3_proc_setattr(struct svc_rqst *rqstp, struct nfsd3_sattrargs *argp,
+-                                         struct nfsd3_attrstat  *resp)
++nfsd3_proc_setattr(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_sattrargs *argp = _argp;
++      struct nfsd3_attrstat *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: SETATTR(3)  %s\n",
+@@ -81,9 +83,10 @@ nfsd3_proc_setattr(struct svc_rqst *rqstp, struct nfsd3_sattrargs *argp,
+  * Look up a path name component
+  */
+ static __be32
+-nfsd3_proc_lookup(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+-                                        struct nfsd3_diropres  *resp)
++nfsd3_proc_lookup(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_diropargs *argp = _argp;
++      struct nfsd3_diropres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: LOOKUP(3)   %s %.*s\n",
+@@ -105,9 +108,10 @@ nfsd3_proc_lookup(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+  * Check file access
+  */
+ static __be32
+-nfsd3_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
+-                                        struct nfsd3_accessres *resp)
++nfsd3_proc_access(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_accessargs *argp = _argp;
++      struct nfsd3_accessres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: ACCESS(3)   %s 0x%x\n",
+@@ -124,9 +128,10 @@ nfsd3_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
+  * Read a symlink.
+  */
+ static __be32
+-nfsd3_proc_readlink(struct svc_rqst *rqstp, struct nfsd3_readlinkargs *argp,
+-                                         struct nfsd3_readlinkres *resp)
++nfsd3_proc_readlink(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_readlinkargs *argp = _argp;
++      struct nfsd3_readlinkres *resp = _resp;
+       __be32 nfserr;
+       dprintk("nfsd: READLINK(3) %s\n", SVCFH_fmt(&argp->fh));
+@@ -142,9 +147,10 @@ nfsd3_proc_readlink(struct svc_rqst *rqstp, struct nfsd3_readlinkargs *argp,
+  * Read a portion of a file.
+  */
+ static __be32
+-nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
+-                                      struct nfsd3_readres  *resp)
++nfsd3_proc_read(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_readargs *argp = _argp;
++      struct nfsd3_readres  *resp = _resp;
+       __be32  nfserr;
+       u32     max_blocksize = svc_max_payload(rqstp);
+       unsigned long cnt = min(argp->count, max_blocksize);
+@@ -179,9 +185,10 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
+  * Write data to a file
+  */
+ static __be32
+-nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp,
+-                                       struct nfsd3_writeres  *resp)
++nfsd3_proc_write(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_writeargs *argp = _argp;
++      struct nfsd3_writeres *resp = _resp;
+       __be32  nfserr;
+       unsigned long cnt = argp->len;
+@@ -208,9 +215,10 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp,
+  * first reports about SunOS compatibility problems start to pour in...
+  */
+ static __be32
+-nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
+-                                        struct nfsd3_diropres   *resp)
++nfsd3_proc_create(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_createargs *argp = _argp;
++      struct nfsd3_diropres *resp = _resp;
+       svc_fh          *dirfhp, *newfhp = NULL;
+       struct iattr    *attr;
+       __be32          nfserr;
+@@ -245,9 +253,10 @@ nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
+  * Make directory. This operation is not idempotent.
+  */
+ static __be32
+-nfsd3_proc_mkdir(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
+-                                       struct nfsd3_diropres   *resp)
++nfsd3_proc_mkdir(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_createargs *argp = _argp;
++      struct nfsd3_diropres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: MKDIR(3)    %s %.*s\n",
+@@ -265,9 +274,10 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
+ }
+ static __be32
+-nfsd3_proc_symlink(struct svc_rqst *rqstp, struct nfsd3_symlinkargs *argp,
+-                                         struct nfsd3_diropres    *resp)
++nfsd3_proc_symlink(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_symlinkargs *argp = _argp;
++      struct nfsd3_diropres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: SYMLINK(3)  %s %.*s -> %.*s\n",
+@@ -286,9 +296,10 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp, struct nfsd3_symlinkargs *argp,
+  * Make socket/fifo/device.
+  */
+ static __be32
+-nfsd3_proc_mknod(struct svc_rqst *rqstp, struct nfsd3_mknodargs *argp,
+-                                       struct nfsd3_diropres  *resp)
++nfsd3_proc_mknod(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_mknodargs *argp = _argp;
++      struct nfsd3_diropres *resp = _resp;
+       __be32  nfserr;
+       int type;
+       dev_t   rdev = 0;
+@@ -323,9 +334,10 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp, struct nfsd3_mknodargs *argp,
+  * Remove file/fifo/socket etc.
+  */
+ static __be32
+-nfsd3_proc_remove(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+-                                        struct nfsd3_attrstat  *resp)
++nfsd3_proc_remove(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_diropargs *argp = _argp;
++      struct nfsd3_attrstat *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: REMOVE(3)   %s %.*s\n",
+@@ -344,9 +356,10 @@ nfsd3_proc_remove(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+  * Remove a directory
+  */
+ static __be32
+-nfsd3_proc_rmdir(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+-                                       struct nfsd3_attrstat  *resp)
++nfsd3_proc_rmdir(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_diropargs *argp = _argp;
++      struct nfsd3_attrstat *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: RMDIR(3)    %s %.*s\n",
+@@ -361,9 +374,10 @@ nfsd3_proc_rmdir(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp,
+ }
+ static __be32
+-nfsd3_proc_rename(struct svc_rqst *rqstp, struct nfsd3_renameargs *argp,
+-                                        struct nfsd3_renameres  *resp)
++nfsd3_proc_rename(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_renameargs *argp = _argp;
++      struct nfsd3_renameres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: RENAME(3)   %s %.*s ->\n",
+@@ -383,9 +397,10 @@ nfsd3_proc_rename(struct svc_rqst *rqstp, struct nfsd3_renameargs *argp,
+ }
+ static __be32
+-nfsd3_proc_link(struct svc_rqst *rqstp, struct nfsd3_linkargs *argp,
+-                                      struct nfsd3_linkres  *resp)
++nfsd3_proc_link(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_linkargs *argp = _argp;
++      struct nfsd3_linkres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: LINK(3)     %s ->\n",
+@@ -406,9 +421,10 @@ nfsd3_proc_link(struct svc_rqst *rqstp, struct nfsd3_linkargs *argp,
+  * Read a portion of a directory.
+  */
+ static __be32
+-nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+-                                         struct nfsd3_readdirres  *resp)
++nfsd3_proc_readdir(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_readdirargs *argp = _argp;
++      struct nfsd3_readdirres  *resp = _resp;
+       __be32          nfserr;
+       int             count;
+@@ -442,9 +458,10 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+  * For now, we choose to ignore the dircount parameter.
+  */
+ static __be32
+-nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+-                                             struct nfsd3_readdirres  *resp)
++nfsd3_proc_readdirplus(struct svc_rqst *rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_readdirargs *argp = _argp;
++      struct nfsd3_readdirres  *resp = _resp;
+       __be32  nfserr;
+       int     count = 0;
+       loff_t  offset;
+@@ -509,9 +526,10 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+  * Get file system stats
+  */
+ static __be32
+-nfsd3_proc_fsstat(struct svc_rqst * rqstp, struct nfsd_fhandle    *argp,
+-                                         struct nfsd3_fsstatres *resp)
++nfsd3_proc_fsstat(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd_fhandle *argp = _argp;
++      struct nfsd3_fsstatres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: FSSTAT(3)   %s\n",
+@@ -526,9 +544,10 @@ nfsd3_proc_fsstat(struct svc_rqst * rqstp, struct nfsd_fhandle    *argp,
+  * Get file system info
+  */
+ static __be32
+-nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle    *argp,
+-                                         struct nfsd3_fsinfores *resp)
++nfsd3_proc_fsinfo(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd_fhandle *argp = _argp;
++      struct nfsd3_fsinfores *resp = _resp;
+       __be32  nfserr;
+       u32     max_blocksize = svc_max_payload(rqstp);
+@@ -569,9 +588,10 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle    *argp,
+  * Get pathconf info for the specified file
+  */
+ static __be32
+-nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle      *argp,
+-                                           struct nfsd3_pathconfres *resp)
++nfsd3_proc_pathconf(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd_fhandle *argp =  _argp;
++      struct nfsd3_pathconfres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: PATHCONF(3) %s\n",
+@@ -612,9 +632,10 @@ nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle      *argp,
+  * Commit a file (range) to stable storage.
+  */
+ static __be32
+-nfsd3_proc_commit(struct svc_rqst * rqstp, struct nfsd3_commitargs *argp,
+-                                         struct nfsd3_commitres  *resp)
++nfsd3_proc_commit(struct svc_rqst * rqstp, void *_argp, void *_resp)
+ {
++      struct nfsd3_commitargs *argp = _argp;
++      struct nfsd3_commitres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: COMMIT(3)   %s %u@%Lu\n",
+@@ -669,213 +690,213 @@ struct nfsd3_voidargs { int dummy; };
+ static struct svc_procedure           nfsd_procedures3[22] = {
+       [NFS3PROC_NULL] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_null,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_voidres,
++              .pc_func = nfsd3_proc_null,
++              .pc_encode = nfs3svc_encode_voidres,
+               .pc_argsize = sizeof(struct nfsd3_voidargs),
+               .pc_ressize = sizeof(struct nfsd3_voidres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST,
+       },
+       [NFS3PROC_GETATTR] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_getattr,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_attrstatres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_getattr,
++              .pc_decode = nfs3svc_decode_fhandleargs,
++              .pc_encode = nfs3svc_encode_attrstatres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+               .pc_ressize = sizeof(struct nfsd3_attrstatres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+AT,
+       },
+       [NFS3PROC_SETATTR] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_setattr,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_sattrargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_setattr,
++              .pc_decode = nfs3svc_decode_sattrargs,
++              .pc_encode = nfs3svc_encode_wccstatres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_sattrargs),
+               .pc_ressize = sizeof(struct nfsd3_wccstatres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+WC,
+       },
+       [NFS3PROC_LOOKUP] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_lookup,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_diropres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
++              .pc_func = nfsd3_proc_lookup,
++              .pc_decode = nfs3svc_decode_diropargs,
++              .pc_encode = nfs3svc_encode_diropres,
++              .pc_release = nfs3svc_release_fhandle2,
+               .pc_argsize = sizeof(struct nfsd3_diropargs),
+               .pc_ressize = sizeof(struct nfsd3_diropres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+FH+pAT+pAT,
+       },
+       [NFS3PROC_ACCESS] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_access,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_accessargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_accessres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_access,
++              .pc_decode = nfs3svc_decode_accessargs,
++              .pc_encode = nfs3svc_encode_accessres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_accessargs),
+               .pc_ressize = sizeof(struct nfsd3_accessres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+pAT+1,
+       },
+       [NFS3PROC_READLINK] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_readlink,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_readlinkargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_readlinkres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_readlink,
++              .pc_decode = nfs3svc_decode_readlinkargs,
++              .pc_encode = nfs3svc_encode_readlinkres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_readlinkargs),
+               .pc_ressize = sizeof(struct nfsd3_readlinkres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
+       },
+       [NFS3PROC_READ] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_read,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_readargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_readres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_read,
++              .pc_decode = nfs3svc_decode_readargs,
++              .pc_encode = nfs3svc_encode_readres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_readargs),
+               .pc_ressize = sizeof(struct nfsd3_readres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
+       },
+       [NFS3PROC_WRITE] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_write,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_writeargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_writeres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_write,
++              .pc_decode = nfs3svc_decode_writeargs,
++              .pc_encode = nfs3svc_encode_writeres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_writeargs),
+               .pc_ressize = sizeof(struct nfsd3_writeres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+WC+4,
+       },
+       [NFS3PROC_CREATE] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_create,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_createargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
++              .pc_func = nfsd3_proc_create,
++              .pc_decode = nfs3svc_decode_createargs,
++              .pc_encode = nfs3svc_encode_createres,
++              .pc_release = nfs3svc_release_fhandle2,
+               .pc_argsize = sizeof(struct nfsd3_createargs),
+               .pc_ressize = sizeof(struct nfsd3_createres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+(1+FH+pAT)+WC,
+       },
+       [NFS3PROC_MKDIR] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_mkdir,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_mkdirargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
++              .pc_func = nfsd3_proc_mkdir,
++              .pc_decode = nfs3svc_decode_mkdirargs,
++              .pc_encode = nfs3svc_encode_createres,
++              .pc_release = nfs3svc_release_fhandle2,
+               .pc_argsize = sizeof(struct nfsd3_mkdirargs),
+               .pc_ressize = sizeof(struct nfsd3_createres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+(1+FH+pAT)+WC,
+       },
+       [NFS3PROC_SYMLINK] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_symlink,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_symlinkargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
++              .pc_func = nfsd3_proc_symlink,
++              .pc_decode = nfs3svc_decode_symlinkargs,
++              .pc_encode = nfs3svc_encode_createres,
++              .pc_release = nfs3svc_release_fhandle2,
+               .pc_argsize = sizeof(struct nfsd3_symlinkargs),
+               .pc_ressize = sizeof(struct nfsd3_createres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+(1+FH+pAT)+WC,
+       },
+       [NFS3PROC_MKNOD] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_mknod,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_mknodargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
++              .pc_func = nfsd3_proc_mknod,
++              .pc_decode = nfs3svc_decode_mknodargs,
++              .pc_encode = nfs3svc_encode_createres,
++              .pc_release = nfs3svc_release_fhandle2,
+               .pc_argsize = sizeof(struct nfsd3_mknodargs),
+               .pc_ressize = sizeof(struct nfsd3_createres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+(1+FH+pAT)+WC,
+       },
+       [NFS3PROC_REMOVE] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_remove,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_remove,
++              .pc_decode = nfs3svc_decode_diropargs,
++              .pc_encode = nfs3svc_encode_wccstatres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_diropargs),
+               .pc_ressize = sizeof(struct nfsd3_wccstatres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+WC,
+       },
+       [NFS3PROC_RMDIR] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_rmdir,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_rmdir,
++              .pc_decode = nfs3svc_decode_diropargs,
++              .pc_encode = nfs3svc_encode_wccstatres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_diropargs),
+               .pc_ressize = sizeof(struct nfsd3_wccstatres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+WC,
+       },
+       [NFS3PROC_RENAME] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_rename,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_renameargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_renameres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
++              .pc_func = nfsd3_proc_rename,
++              .pc_decode = nfs3svc_decode_renameargs,
++              .pc_encode = nfs3svc_encode_renameres,
++              .pc_release = nfs3svc_release_fhandle2,
+               .pc_argsize = sizeof(struct nfsd3_renameargs),
+               .pc_ressize = sizeof(struct nfsd3_renameres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+WC+WC,
+       },
+       [NFS3PROC_LINK] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_link,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_linkargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_linkres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
++              .pc_func = nfsd3_proc_link,
++              .pc_decode = nfs3svc_decode_linkargs,
++              .pc_encode = nfs3svc_encode_linkres,
++              .pc_release = nfs3svc_release_fhandle2,
+               .pc_argsize = sizeof(struct nfsd3_linkargs),
+               .pc_ressize = sizeof(struct nfsd3_linkres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+pAT+WC,
+       },
+       [NFS3PROC_READDIR] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_readdir,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_readdir,
++              .pc_decode = nfs3svc_decode_readdirargs,
++              .pc_encode = nfs3svc_encode_readdirres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_readdirargs),
+               .pc_ressize = sizeof(struct nfsd3_readdirres),
+               .pc_cachetype = RC_NOCACHE,
+       },
+       [NFS3PROC_READDIRPLUS] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_readdirplus,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirplusargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_readdirplus,
++              .pc_decode = nfs3svc_decode_readdirplusargs,
++              .pc_encode = nfs3svc_encode_readdirres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_readdirplusargs),
+               .pc_ressize = sizeof(struct nfsd3_readdirres),
+               .pc_cachetype = RC_NOCACHE,
+       },
+       [NFS3PROC_FSSTAT] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_fsstat,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_fsstatres,
++              .pc_func = nfsd3_proc_fsstat,
++              .pc_decode = nfs3svc_decode_fhandleargs,
++              .pc_encode = nfs3svc_encode_fsstatres,
+               .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+               .pc_ressize = sizeof(struct nfsd3_fsstatres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+pAT+2*6+1,
+       },
+       [NFS3PROC_FSINFO] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_fsinfo,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_fsinfores,
++              .pc_func = nfsd3_proc_fsinfo,
++              .pc_decode = nfs3svc_decode_fhandleargs,
++              .pc_encode = nfs3svc_encode_fsinfores,
+               .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+               .pc_ressize = sizeof(struct nfsd3_fsinfores),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+pAT+12,
+       },
+       [NFS3PROC_PATHCONF] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_pathconf,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_pathconfres,
++              .pc_func = nfsd3_proc_pathconf,
++              .pc_decode = nfs3svc_decode_fhandleargs,
++              .pc_encode = nfs3svc_encode_pathconfres,
+               .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+               .pc_ressize = sizeof(struct nfsd3_pathconfres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+pAT+6,
+       },
+       [NFS3PROC_COMMIT] = {
+-              .pc_func = (svc_procfunc) nfsd3_proc_commit,
+-              .pc_decode = (kxdrproc_t) nfs3svc_decode_commitargs,
+-              .pc_encode = (kxdrproc_t) nfs3svc_encode_commitres,
+-              .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
++              .pc_func = nfsd3_proc_commit,
++              .pc_decode = nfs3svc_decode_commitargs,
++              .pc_encode = nfs3svc_encode_commitres,
++              .pc_release = nfs3svc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd3_commitargs),
+               .pc_ressize = sizeof(struct nfsd3_commitres),
+               .pc_cachetype = RC_NOCACHE,
+diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
+index dba2ff8..9ac9eba 100644
+--- a/fs/nfsd/nfs3xdr.c
++++ b/fs/nfsd/nfs3xdr.c
+@@ -273,8 +273,10 @@ void fill_post_wcc(struct svc_fh *fhp)
+  * XDR decode functions
+  */
+ int
+-nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args)
++nfs3svc_decode_fhandle(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_fhandle *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -282,9 +284,10 @@ nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *a
+ }
+ int
+-nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_sattrargs *args)
++nfs3svc_decode_sattrargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_sattrargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -300,9 +303,10 @@ nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_diropargs *args)
++nfs3svc_decode_diropargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_diropargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->fh))
+        || !(p = decode_filename(p, &args->name, &args->len)))
+               return 0;
+@@ -311,9 +315,10 @@ nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_accessargs *args)
++nfs3svc_decode_accessargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_accessargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -323,9 +328,10 @@ nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_readargs *args)
++nfs3svc_decode_readargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_readargs *args = _args;
+       unsigned int len;
+       int v;
+       u32 max_blocksize = svc_max_payload(rqstp);
+@@ -353,9 +359,10 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_writeargs *args)
++nfs3svc_decode_writeargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_writeargs *args = _args;
+       unsigned int len, v, hdr, dlen;
+       u32 max_blocksize = svc_max_payload(rqstp);
+@@ -410,9 +417,11 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_createargs *args)
++nfs3svc_decode_createargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_createargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->fh))
+        || !(p = decode_filename(p, &args->name, &args->len)))
+               return 0;
+@@ -433,9 +442,10 @@ nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
+       return xdr_argsize_check(rqstp, p);
+ }
+ int
+-nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_createargs *args)
++nfs3svc_decode_mkdirargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_createargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->fh)) ||
+           !(p = decode_filename(p, &args->name, &args->len)))
+               return 0;
+@@ -445,9 +455,10 @@ nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_symlinkargs *args)
++nfs3svc_decode_symlinkargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_symlinkargs *args = _args;
+       unsigned int len, avail;
+       char *old, *new;
+       struct kvec *vec;
+@@ -495,9 +506,10 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_mknodargs *args)
++nfs3svc_decode_mknodargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_mknodargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->fh))
+        || !(p = decode_filename(p, &args->name, &args->len)))
+               return 0;
+@@ -517,9 +529,10 @@ nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_renameargs *args)
++nfs3svc_decode_renameargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_renameargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->ffh))
+        || !(p = decode_filename(p, &args->fname, &args->flen))
+        || !(p = decode_fh(p, &args->tfh))
+@@ -530,9 +543,11 @@ nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_readlinkargs *args)
++nfs3svc_decode_readlinkargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_readlinkargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -542,9 +557,10 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_linkargs *args)
++nfs3svc_decode_linkargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_linkargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->ffh))
+        || !(p = decode_fh(p, &args->tfh))
+        || !(p = decode_filename(p, &args->tname, &args->tlen)))
+@@ -554,9 +570,11 @@ nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_readdirargs *args)
++nfs3svc_decode_readdirargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_readdirargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -571,9 +589,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_readdirargs *args)
++nfs3svc_decode_readdirplusargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_readdirargs *args = _args;
+       int len;
+       u32 max_blocksize = svc_max_payload(rqstp);
+@@ -597,9 +616,10 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_commitargs *args)
++nfs3svc_decode_commitargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd3_commitargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -617,16 +637,17 @@ nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p,
+  * will work properly.
+  */
+ int
+-nfs3svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nfs3svc_encode_voidres(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_ressize_check(rqstp, p);
+ }
+ /* GETATTR */
+ int
+-nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_attrstat *resp)
++nfs3svc_encode_attrstat(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_attrstat *resp = _resp;
++
+       if (resp->status == 0) {
+               lease_get_mtime(d_inode(resp->fh.fh_dentry),
+                               &resp->stat.mtime);
+@@ -637,18 +658,20 @@ nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
+ /* SETATTR, REMOVE, RMDIR */
+ int
+-nfs3svc_encode_wccstat(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_attrstat *resp)
++nfs3svc_encode_wccstat(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_attrstat *resp = _resp;
++
+       p = encode_wcc_data(rqstp, p, &resp->fh);
+       return xdr_ressize_check(rqstp, p);
+ }
+ /* LOOKUP */
+ int
+-nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_diropres *resp)
++nfs3svc_encode_diropres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_diropres *resp = _resp;
++
+       if (resp->status == 0) {
+               p = encode_fh(p, &resp->fh);
+               p = encode_post_op_attr(rqstp, p, &resp->fh);
+@@ -659,9 +682,10 @@ nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p,
+ /* ACCESS */
+ int
+-nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_accessres *resp)
++nfs3svc_encode_accessres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_accessres *resp = _resp;
++
+       p = encode_post_op_attr(rqstp, p, &resp->fh);
+       if (resp->status == 0)
+               *p++ = htonl(resp->access);
+@@ -670,9 +694,11 @@ nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
+ /* READLINK */
+ int
+-nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_readlinkres *resp)
++nfs3svc_encode_readlinkres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_readlinkres *resp = _resp;
++
+       p = encode_post_op_attr(rqstp, p, &resp->fh);
+       if (resp->status == 0) {
+               *p++ = htonl(resp->len);
+@@ -691,9 +717,11 @@ nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
+ /* READ */
+ int
+-nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_readres *resp)
++nfs3svc_encode_readres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_readres *resp = _resp;
++
+       p = encode_post_op_attr(rqstp, p, &resp->fh);
+       if (resp->status == 0) {
+               *p++ = htonl(resp->count);
+@@ -715,9 +743,10 @@ nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
+ /* WRITE */
+ int
+-nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_writeres *resp)
++nfs3svc_encode_writeres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_writeres *resp = _resp;
+       struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+       p = encode_wcc_data(rqstp, p, &resp->fh);
+@@ -732,9 +761,10 @@ nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p,
+ /* CREATE, MKDIR, SYMLINK, MKNOD */
+ int
+-nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_diropres *resp)
++nfs3svc_encode_createres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_diropres *resp = _resp;
++
+       if (resp->status == 0) {
+               *p++ = xdr_one;
+               p = encode_fh(p, &resp->fh);
+@@ -746,9 +776,10 @@ nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p,
+ /* RENAME */
+ int
+-nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_renameres *resp)
++nfs3svc_encode_renameres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_renameres *resp = _resp;
++
+       p = encode_wcc_data(rqstp, p, &resp->ffh);
+       p = encode_wcc_data(rqstp, p, &resp->tfh);
+       return xdr_ressize_check(rqstp, p);
+@@ -756,9 +787,10 @@ nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p,
+ /* LINK */
+ int
+-nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_linkres *resp)
++nfs3svc_encode_linkres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_linkres *resp = _resp;
++
+       p = encode_post_op_attr(rqstp, p, &resp->fh);
+       p = encode_wcc_data(rqstp, p, &resp->tfh);
+       return xdr_ressize_check(rqstp, p);
+@@ -766,9 +798,11 @@ nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p,
+ /* READDIR */
+ int
+-nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_readdirres *resp)
++nfs3svc_encode_readdirres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_readdirres *resp = _resp;
++
+       p = encode_post_op_attr(rqstp, p, &resp->fh);
+       if (resp->status == 0) {
+@@ -1016,9 +1050,9 @@ nfs3svc_encode_entry_plus(void *cd, const char *name,
+ /* FSSTAT */
+ int
+-nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_fsstatres *resp)
++nfs3svc_encode_fsstatres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_fsstatres *resp = _resp;
+       struct kstatfs  *s = &resp->stats;
+       u64             bs = s->f_bsize;
+@@ -1038,9 +1072,10 @@ nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p,
+ /* FSINFO */
+ int
+-nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_fsinfores *resp)
++nfs3svc_encode_fsinfores(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_fsinfores *resp = _resp;
++
+       *p++ = xdr_zero;        /* no post_op_attr */
+       if (resp->status == 0) {
+@@ -1062,9 +1097,10 @@ nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p,
+ /* PATHCONF */
+ int
+-nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_pathconfres *resp)
++nfs3svc_encode_pathconfres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_pathconfres *resp = _resp;
++
+       *p++ = xdr_zero;        /* no post_op_attr */
+       if (resp->status == 0) {
+@@ -1081,9 +1117,10 @@ nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p,
+ /* COMMIT */
+ int
+-nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_commitres *resp)
++nfs3svc_encode_commitres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd3_commitres *resp = _resp;
+       struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+       p = encode_wcc_data(rqstp, p, &resp->fh);
+@@ -1099,17 +1136,19 @@ nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p,
+  * XDR release functions
+  */
+ int
+-nfs3svc_release_fhandle(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_attrstat *resp)
++nfs3svc_release_fhandle(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_attrstat *resp = _resp;
++
+       fh_put(&resp->fh);
+       return 1;
+ }
+ int
+-nfs3svc_release_fhandle2(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd3_fhandle_pair *resp)
++nfs3svc_release_fhandle2(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd3_fhandle_pair *resp = _resp;
++
+       fh_put(&resp->fh1);
+       fh_put(&resp->fh2);
+       return 1;
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 04c68d9..cc49866 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -470,8 +470,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr,
+ /*
+  * NB: Without this zero space reservation, callbacks over krb5p fail
+  */
+-static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                               void *__unused)
++static void nfs4_xdr_enc_cb_null(void *req, struct xdr_stream *xdr, void *__unused)
+ {
+       xdr_reserve_space(xdr, 0);
+ }
+@@ -479,9 +478,9 @@ static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+  */
+-static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                                 const struct nfsd4_callback *cb)
++static void nfs4_xdr_enc_cb_recall(void *req, struct xdr_stream *xdr, void *_cb)
+ {
++      const struct nfsd4_callback *cb = _cb;
+       const struct nfs4_delegation *dp = cb_to_delegation(cb);
+       struct nfs4_cb_compound_hdr hdr = {
+               .ident = cb->cb_clp->cl_cb_ident,
+@@ -504,8 +503,7 @@ static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
+  * Protocol".
+  */
+-static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+-                              void *__unused)
++static int nfs4_xdr_dec_cb_null(void *req, struct xdr_stream *xdr, void *__unused)
+ {
+       return 0;
+ }
+@@ -513,10 +511,11 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+ /*
+  * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+  */
+-static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_cb_recall(void *rqstp,
+                                 struct xdr_stream *xdr,
+-                                struct nfsd4_callback *cb)
++                                void *_cb)
+ {
++      struct nfsd4_callback *cb = _cb;
+       struct nfs4_cb_compound_hdr hdr;
+       int status;
+@@ -586,10 +585,12 @@ static void encode_cb_layout4args(struct xdr_stream *xdr,
+       hdr->nops++;
+ }
+-static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
++static void nfs4_xdr_enc_cb_layout(void *_req,
+                                  struct xdr_stream *xdr,
+-                                 const struct nfsd4_callback *cb)
++                                 void *_cb)
+ {
++      struct rpc_rqst *req = _req;
++      const struct nfsd4_callback *cb = _cb;
+       const struct nfs4_layout_stateid *ls =
+               container_of(cb, struct nfs4_layout_stateid, ls_recall);
+       struct nfs4_cb_compound_hdr hdr = {
+@@ -603,10 +604,12 @@ static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
+       encode_cb_nops(&hdr);
+ }
+-static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
++static int nfs4_xdr_dec_cb_layout(void *_rqstp,
+                                 struct xdr_stream *xdr,
+-                                struct nfsd4_callback *cb)
++                                void *_cb)
+ {
++      struct rpc_rqst *rqstp = _rqstp;
++      struct nfsd4_callback *cb = _cb;
+       struct nfs4_cb_compound_hdr hdr;
+       int status;
+@@ -629,8 +632,8 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
+ #define PROC(proc, call, argtype, restype)                            \
+ [NFSPROC4_CLNT_##proc] = {                                            \
+       .p_proc    = NFSPROC4_CB_##call,                                \
+-      .p_encode  = (kxdreproc_t)nfs4_xdr_enc_##argtype,               \
+-      .p_decode  = (kxdrdproc_t)nfs4_xdr_dec_##restype,               \
++      .p_encode  = nfs4_xdr_enc_##argtype,                    \
++      .p_decode  = nfs4_xdr_dec_##restype,                    \
+       .p_arglen  = NFS4_enc_##argtype##_sz,                           \
+       .p_replen  = NFS4_dec_##restype##_sz,                           \
+       .p_statidx = NFSPROC4_CB_##call,                                \
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 1fb2227..150c145 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -358,8 +358,9 @@ copy_clientid(clientid_t *clid, struct nfsd4_session *session)
+ static __be32
+ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-         struct nfsd4_open *open)
++         void *_open)
+ {
++      struct nfsd4_open *open = _open;
+       __be32 status;
+       struct svc_fh *resfh = NULL;
+       struct net *net = SVC_NET(rqstp);
+@@ -496,8 +497,10 @@ static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_stat
+  */
+ static __be32
+ nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-          struct svc_fh **getfh)
++          void *_getfh)
+ {
++      struct svc_fh **getfh = (struct svc_fh **)_getfh;
++
+       if (!cstate->current_fh.fh_dentry)
+               return nfserr_nofilehandle;
+@@ -507,8 +510,10 @@ nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-          struct nfsd4_putfh *putfh)
++          void *_putfh)
+ {
++      struct nfsd4_putfh *putfh = _putfh;
++
+       fh_put(&cstate->current_fh);
+       cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
+       memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
+@@ -562,8 +567,10 @@ nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+  */
+ static __be32
+ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_access *access)
++           void *_access)
+ {
++      struct nfsd4_access *access = _access;
++
+       if (access->ac_req_access & ~NFS3_ACCESS_FULL)
+               return nfserr_inval;
+@@ -588,8 +595,10 @@ static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
+ static __be32
+ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_commit *commit)
++           void *_commit)
+ {
++      struct nfsd4_commit *commit = _commit;
++
+       gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
+       return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
+                            commit->co_count);
+@@ -597,8 +606,9 @@ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_create *create)
++           void *_create)
+ {
++      struct nfsd4_create *create = _create;
+       struct svc_fh resfh;
+       __be32 status;
+       dev_t rdev;
+@@ -684,8 +694,9 @@ out:
+ static __be32
+ nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-            struct nfsd4_getattr *getattr)
++            void *_getattr)
+ {
++      struct nfsd4_getattr *getattr = _getattr;
+       __be32 status;
+       status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
+@@ -705,8 +716,9 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-         struct nfsd4_link *link)
++         void *_link)
+ {
++      struct nfsd4_link *link = _link;
+       __be32 status = nfserr_nofilehandle;
+       if (!cstate->save_fh.fh_dentry)
+@@ -744,8 +756,9 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_lookup *lookup)
++           void *_lookup)
+ {
++      struct nfsd4_lookup *lookup = _lookup;
+       return nfsd_lookup(rqstp, &cstate->current_fh,
+                          lookup->lo_name, lookup->lo_len,
+                          &cstate->current_fh);
+@@ -753,8 +766,9 @@ nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-         struct nfsd4_read *read)
++         void *_read)
+ {
++      struct nfsd4_read *read = _read;
+       __be32 status;
+       read->rd_filp = NULL;
+@@ -789,8 +803,9 @@ out:
+ static __be32
+ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-            struct nfsd4_readdir *readdir)
++            void *_readdir)
+ {
++      struct nfsd4_readdir *readdir = _readdir;
+       u64 cookie = readdir->rd_cookie;
+       static const nfs4_verifier zeroverf;
+@@ -814,8 +829,10 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-             struct nfsd4_readlink *readlink)
++             void *_readlink)
+ {
++      struct nfsd4_readlink *readlink = _readlink;
++
+       readlink->rl_rqstp = rqstp;
+       readlink->rl_fhp = &cstate->current_fh;
+       return nfs_ok;
+@@ -823,8 +840,9 @@ nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_remove *remove)
++           void *_remove)
+ {
++      struct nfsd4_remove *remove = _remove;
+       __be32 status;
+       if (opens_in_grace(SVC_NET(rqstp)))
+@@ -840,8 +858,9 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_rename *rename)
++           void *_rename)
+ {
++      struct nfsd4_rename *rename = _rename;
+       __be32 status = nfserr_nofilehandle;
+       if (!cstate->save_fh.fh_dentry)
+@@ -861,8 +880,9 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-            struct nfsd4_secinfo *secinfo)
++            void *_secinfo)
+ {
++      struct nfsd4_secinfo *secinfo = _secinfo;
+       struct svc_export *exp;
+       struct dentry *dentry;
+       __be32 err;
+@@ -890,8 +910,9 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-            struct nfsd4_secinfo_no_name *sin)
++            void *_sin)
+ {
++      struct nfsd4_secinfo_no_name *sin = _sin;
+       __be32 err;
+       switch (sin->sin_style) {
+@@ -913,8 +934,9 @@ nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstat
+ static __be32
+ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-            struct nfsd4_setattr *setattr)
++            void *_setattr)
+ {
++      struct nfsd4_setattr *setattr = _setattr;
+       __be32 status = nfs_ok;
+       int err;
+@@ -974,8 +996,9 @@ static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
+ static __be32
+ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-          struct nfsd4_write *write)
++          void *_write)
+ {
++      struct nfsd4_write *write = _write;
+       stateid_t *stateid = &write->wr_stateid;
+       struct file *filp = NULL;
+       __be32 status = nfs_ok;
+@@ -1011,8 +1034,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-              struct nfsd4_clone *clone)
++              void *_clone)
+ {
++      struct nfsd4_clone *clone = _clone;
+       struct file *src, *dst;
+       __be32 status;
+@@ -1075,23 +1099,28 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-             struct nfsd4_fallocate *fallocate)
++             void *_fallocate)
+ {
++      struct nfsd4_fallocate *fallocate = _fallocate;
++
+       return nfsd4_fallocate(rqstp, cstate, fallocate, 0);
+ }
+ static __be32
+ nfsd4_deallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-               struct nfsd4_fallocate *fallocate)
++               void *_fallocate)
+ {
++      struct nfsd4_fallocate *fallocate = _fallocate;
++
+       return nfsd4_fallocate(rqstp, cstate, fallocate,
+                              FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
+ }
+ static __be32
+ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-              struct nfsd4_seek *seek)
++              void *_seek)
+ {
++      struct nfsd4_seek *seek = (struct nfsd4_seek *)_seek;
+       int whence;
+       __be32 status;
+       struct file *file;
+@@ -1138,8 +1167,9 @@ out:
+  */
+ static __be32
+ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_verify *verify)
++           void *_verify)
+ {
++      struct nfsd4_verify *verify = _verify;
+       __be32 *buf, *p;
+       int count;
+       __be32 status;
+@@ -1196,8 +1226,9 @@ out_kfree:
+ static __be32
+ nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-            struct nfsd4_verify *verify)
++            void *_verify)
+ {
++      struct nfsd4_verify *verify = _verify;
+       __be32 status;
+       status = _nfsd4_verify(rqstp, cstate, verify);
+@@ -1206,8 +1237,9 @@ nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ static __be32
+ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-           struct nfsd4_verify *verify)
++           void *_verify)
+ {
++      struct nfsd4_verify *verify = _verify;
+       __be32 status;
+       status = _nfsd4_verify(rqstp, cstate, verify);
+@@ -1235,8 +1267,9 @@ nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
+ static __be32
+ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *cstate,
+-              struct nfsd4_getdeviceinfo *gdp)
++              void *_gdp)
+ {
++      struct nfsd4_getdeviceinfo *gdp = _gdp;
+       const struct nfsd4_layout_ops *ops;
+       struct nfsd4_deviceid_map *map;
+       struct svc_export *exp;
+@@ -1281,8 +1314,9 @@ out:
+ static __be32
+ nfsd4_layoutget(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *cstate,
+-              struct nfsd4_layoutget *lgp)
++              void *_lgp)
+ {
++      struct nfsd4_layoutget *lgp = _lgp;
+       struct svc_fh *current_fh = &cstate->current_fh;
+       const struct nfsd4_layout_ops *ops;
+       struct nfs4_layout_stateid *ls;
+@@ -1361,8 +1395,9 @@ out:
+ static __be32
+ nfsd4_layoutcommit(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *cstate,
+-              struct nfsd4_layoutcommit *lcp)
++              void *_lcp)
+ {
++      struct nfsd4_layoutcommit *lcp = _lcp;
+       const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
+       struct svc_fh *current_fh = &cstate->current_fh;
+       const struct nfsd4_layout_ops *ops;
+@@ -1425,8 +1460,9 @@ out:
+ static __be32
+ nfsd4_layoutreturn(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *cstate,
+-              struct nfsd4_layoutreturn *lrp)
++              void *_lrp)
+ {
++      struct nfsd4_layoutreturn *lrp = _lrp;
+       struct svc_fh *current_fh = &cstate->current_fh;
+       __be32 nfserr;
+@@ -1528,7 +1564,7 @@ struct nfsd4_operation {
+       nfsd4op_rsize op_rsize_bop;
+       stateid_getter op_get_currentstateid;
+       stateid_setter op_set_currentstateid;
+-};
++} __do_const;
+ static struct nfsd4_operation nfsd4_ops[];
+@@ -1636,10 +1672,10 @@ static void svcxdr_init_encode(struct svc_rqst *rqstp,
+  * COMPOUND call.
+  */
+ static __be32
+-nfsd4_proc_compound(struct svc_rqst *rqstp,
+-                  struct nfsd4_compoundargs *args,
+-                  struct nfsd4_compoundres *resp)
++nfsd4_proc_compound(struct svc_rqst *rqstp, void *_args, void *_resp)
+ {
++      struct nfsd4_compoundargs *args = _args;
++      struct nfsd4_compoundres *resp = _resp;
+       struct nfsd4_op *op;
+       struct nfsd4_operation *opdesc;
+       struct nfsd4_compound_state *cstate = &resp->cstate;
+@@ -1998,338 +2034,338 @@ static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_
+ static struct nfsd4_operation nfsd4_ops[] = {
+       [OP_ACCESS] = {
+-              .op_func = (nfsd4op_func)nfsd4_access,
++              .op_func = nfsd4_access,
+               .op_name = "OP_ACCESS",
+       },
+       [OP_CLOSE] = {
+-              .op_func = (nfsd4op_func)nfsd4_close,
++              .op_func = nfsd4_close,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_CLOSE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_closestateid,
+-              .op_set_currentstateid = (stateid_setter)nfsd4_set_closestateid,
++              .op_rsize_bop = nfsd4_status_stateid_rsize,
++              .op_get_currentstateid = nfsd4_get_closestateid,
++              .op_set_currentstateid = nfsd4_set_closestateid,
+       },
+       [OP_COMMIT] = {
+-              .op_func = (nfsd4op_func)nfsd4_commit,
++              .op_func = nfsd4_commit,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_COMMIT",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_commit_rsize,
++              .op_rsize_bop = nfsd4_commit_rsize,
+       },
+       [OP_CREATE] = {
+-              .op_func = (nfsd4op_func)nfsd4_create,
++              .op_func = nfsd4_create,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME | OP_CLEAR_STATEID,
+               .op_name = "OP_CREATE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
++              .op_rsize_bop = nfsd4_create_rsize,
+       },
+       [OP_DELEGRETURN] = {
+-              .op_func = (nfsd4op_func)nfsd4_delegreturn,
++              .op_func = nfsd4_delegreturn,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_DELEGRETURN",
+               .op_rsize_bop = nfsd4_only_status_rsize,
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_delegreturnstateid,
++              .op_get_currentstateid = nfsd4_get_delegreturnstateid,
+       },
+       [OP_GETATTR] = {
+-              .op_func = (nfsd4op_func)nfsd4_getattr,
++              .op_func = nfsd4_getattr,
+               .op_flags = ALLOWED_ON_ABSENT_FS,
+               .op_rsize_bop = nfsd4_getattr_rsize,
+               .op_name = "OP_GETATTR",
+       },
+       [OP_GETFH] = {
+-              .op_func = (nfsd4op_func)nfsd4_getfh,
++              .op_func = nfsd4_getfh,
+               .op_name = "OP_GETFH",
+       },
+       [OP_LINK] = {
+-              .op_func = (nfsd4op_func)nfsd4_link,
++              .op_func = nfsd4_link,
+               .op_flags = ALLOWED_ON_ABSENT_FS | OP_MODIFIES_SOMETHING
+                               | OP_CACHEME,
+               .op_name = "OP_LINK",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_link_rsize,
++              .op_rsize_bop = nfsd4_link_rsize,
+       },
+       [OP_LOCK] = {
+-              .op_func = (nfsd4op_func)nfsd4_lock,
++              .op_func = nfsd4_lock,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_LOCK",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
+-              .op_set_currentstateid = (stateid_setter)nfsd4_set_lockstateid,
++              .op_rsize_bop = nfsd4_lock_rsize,
++              .op_set_currentstateid = nfsd4_set_lockstateid,
+       },
+       [OP_LOCKT] = {
+-              .op_func = (nfsd4op_func)nfsd4_lockt,
++              .op_func = nfsd4_lockt,
+               .op_name = "OP_LOCKT",
+       },
+       [OP_LOCKU] = {
+-              .op_func = (nfsd4op_func)nfsd4_locku,
++              .op_func = nfsd4_locku,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_LOCKU",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_lockustateid,
++              .op_rsize_bop = nfsd4_status_stateid_rsize,
++              .op_get_currentstateid = nfsd4_get_lockustateid,
+       },
+       [OP_LOOKUP] = {
+-              .op_func = (nfsd4op_func)nfsd4_lookup,
++              .op_func = nfsd4_lookup,
+               .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
+               .op_name = "OP_LOOKUP",
+       },
+       [OP_LOOKUPP] = {
+-              .op_func = (nfsd4op_func)nfsd4_lookupp,
++              .op_func = nfsd4_lookupp,
+               .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
+               .op_name = "OP_LOOKUPP",
+       },
+       [OP_NVERIFY] = {
+-              .op_func = (nfsd4op_func)nfsd4_nverify,
++              .op_func = nfsd4_nverify,
+               .op_name = "OP_NVERIFY",
+       },
+       [OP_OPEN] = {
+-              .op_func = (nfsd4op_func)nfsd4_open,
++              .op_func = nfsd4_open,
+               .op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_OPEN",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
+-              .op_set_currentstateid = (stateid_setter)nfsd4_set_openstateid,
++              .op_rsize_bop = nfsd4_open_rsize,
++              .op_set_currentstateid = nfsd4_set_openstateid,
+       },
+       [OP_OPEN_CONFIRM] = {
+-              .op_func = (nfsd4op_func)nfsd4_open_confirm,
++              .op_func = nfsd4_open_confirm,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_OPEN_CONFIRM",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
++              .op_rsize_bop = nfsd4_status_stateid_rsize,
+       },
+       [OP_OPEN_DOWNGRADE] = {
+-              .op_func = (nfsd4op_func)nfsd4_open_downgrade,
++              .op_func = nfsd4_open_downgrade,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_OPEN_DOWNGRADE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_opendowngradestateid,
+-              .op_set_currentstateid = (stateid_setter)nfsd4_set_opendowngradestateid,
++              .op_rsize_bop = nfsd4_status_stateid_rsize,
++              .op_get_currentstateid = nfsd4_get_opendowngradestateid,
++              .op_set_currentstateid = nfsd4_set_opendowngradestateid,
+       },
+       [OP_PUTFH] = {
+-              .op_func = (nfsd4op_func)nfsd4_putfh,
++              .op_func = nfsd4_putfh,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
+               .op_name = "OP_PUTFH",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_PUTPUBFH] = {
+-              .op_func = (nfsd4op_func)nfsd4_putrootfh,
++              .op_func = nfsd4_putrootfh,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
+               .op_name = "OP_PUTPUBFH",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_PUTROOTFH] = {
+-              .op_func = (nfsd4op_func)nfsd4_putrootfh,
++              .op_func = nfsd4_putrootfh,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_IS_PUTFH_LIKE | OP_CLEAR_STATEID,
+               .op_name = "OP_PUTROOTFH",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_READ] = {
+-              .op_func = (nfsd4op_func)nfsd4_read,
++              .op_func = nfsd4_read,
+               .op_name = "OP_READ",
+               .op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_readstateid,
++              .op_get_currentstateid = nfsd4_get_readstateid,
+       },
+       [OP_READDIR] = {
+-              .op_func = (nfsd4op_func)nfsd4_readdir,
++              .op_func = nfsd4_readdir,
+               .op_name = "OP_READDIR",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_readdir_rsize,
++              .op_rsize_bop = nfsd4_readdir_rsize,
+       },
+       [OP_READLINK] = {
+-              .op_func = (nfsd4op_func)nfsd4_readlink,
++              .op_func = nfsd4_readlink,
+               .op_name = "OP_READLINK",
+       },
+       [OP_REMOVE] = {
+-              .op_func = (nfsd4op_func)nfsd4_remove,
++              .op_func = nfsd4_remove,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_REMOVE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_remove_rsize,
++              .op_rsize_bop = nfsd4_remove_rsize,
+       },
+       [OP_RENAME] = {
+-              .op_func = (nfsd4op_func)nfsd4_rename,
++              .op_func = nfsd4_rename,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_RENAME",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_rename_rsize,
++              .op_rsize_bop = nfsd4_rename_rsize,
+       },
+       [OP_RENEW] = {
+-              .op_func = (nfsd4op_func)nfsd4_renew,
++              .op_func = nfsd4_renew,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_RENEW",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_RESTOREFH] = {
+-              .op_func = (nfsd4op_func)nfsd4_restorefh,
++              .op_func = nfsd4_restorefh,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_RESTOREFH",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_SAVEFH] = {
+-              .op_func = (nfsd4op_func)nfsd4_savefh,
++              .op_func = nfsd4_savefh,
+               .op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_SAVEFH",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_SECINFO] = {
+-              .op_func = (nfsd4op_func)nfsd4_secinfo,
++              .op_func = nfsd4_secinfo,
+               .op_flags = OP_HANDLES_WRONGSEC,
+               .op_name = "OP_SECINFO",
+       },
+       [OP_SETATTR] = {
+-              .op_func = (nfsd4op_func)nfsd4_setattr,
++              .op_func = nfsd4_setattr,
+               .op_name = "OP_SETATTR",
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_setattrstateid,
++              .op_rsize_bop = nfsd4_setattr_rsize,
++              .op_get_currentstateid = nfsd4_get_setattrstateid,
+       },
+       [OP_SETCLIENTID] = {
+-              .op_func = (nfsd4op_func)nfsd4_setclientid,
++              .op_func = nfsd4_setclientid,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_SETCLIENTID",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_setclientid_rsize,
++              .op_rsize_bop = nfsd4_setclientid_rsize,
+       },
+       [OP_SETCLIENTID_CONFIRM] = {
+-              .op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
++              .op_func = nfsd4_setclientid_confirm,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_SETCLIENTID_CONFIRM",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_VERIFY] = {
+-              .op_func = (nfsd4op_func)nfsd4_verify,
++              .op_func = nfsd4_verify,
+               .op_name = "OP_VERIFY",
+       },
+       [OP_WRITE] = {
+-              .op_func = (nfsd4op_func)nfsd4_write,
++              .op_func = nfsd4_write,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_WRITE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_writestateid,
++              .op_rsize_bop = nfsd4_write_rsize,
++              .op_get_currentstateid = nfsd4_get_writestateid,
+       },
+       [OP_RELEASE_LOCKOWNER] = {
+-              .op_func = (nfsd4op_func)nfsd4_release_lockowner,
++              .op_func = nfsd4_release_lockowner,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
+                               | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_RELEASE_LOCKOWNER",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       /* NFSv4.1 operations */
+       [OP_EXCHANGE_ID] = {
+-              .op_func = (nfsd4op_func)nfsd4_exchange_id,
++              .op_func = nfsd4_exchange_id,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+                               | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_EXCHANGE_ID",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_exchange_id_rsize,
++              .op_rsize_bop = nfsd4_exchange_id_rsize,
+       },
+       [OP_BACKCHANNEL_CTL] = {
+-              .op_func = (nfsd4op_func)nfsd4_backchannel_ctl,
++              .op_func = nfsd4_backchannel_ctl,
+               .op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_BACKCHANNEL_CTL",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_BIND_CONN_TO_SESSION] = {
+-              .op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
++              .op_func = nfsd4_bind_conn_to_session,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+                               | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_BIND_CONN_TO_SESSION",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_bind_conn_to_session_rsize,
++              .op_rsize_bop = nfsd4_bind_conn_to_session_rsize,
+       },
+       [OP_CREATE_SESSION] = {
+-              .op_func = (nfsd4op_func)nfsd4_create_session,
++              .op_func = nfsd4_create_session,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+                               | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_CREATE_SESSION",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_session_rsize,
++              .op_rsize_bop = nfsd4_create_session_rsize,
+       },
+       [OP_DESTROY_SESSION] = {
+-              .op_func = (nfsd4op_func)nfsd4_destroy_session,
++              .op_func = nfsd4_destroy_session,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+                               | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_DESTROY_SESSION",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_SEQUENCE] = {
+-              .op_func = (nfsd4op_func)nfsd4_sequence,
++              .op_func = nfsd4_sequence,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+               .op_name = "OP_SEQUENCE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
++              .op_rsize_bop = nfsd4_sequence_rsize,
+       },
+       [OP_DESTROY_CLIENTID] = {
+-              .op_func = (nfsd4op_func)nfsd4_destroy_clientid,
++              .op_func = nfsd4_destroy_clientid,
+               .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
+                               | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_DESTROY_CLIENTID",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_RECLAIM_COMPLETE] = {
+-              .op_func = (nfsd4op_func)nfsd4_reclaim_complete,
++              .op_func = nfsd4_reclaim_complete,
+               .op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_RECLAIM_COMPLETE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_SECINFO_NO_NAME] = {
+-              .op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
++              .op_func = nfsd4_secinfo_no_name,
+               .op_flags = OP_HANDLES_WRONGSEC,
+               .op_name = "OP_SECINFO_NO_NAME",
+       },
+       [OP_TEST_STATEID] = {
+-              .op_func = (nfsd4op_func)nfsd4_test_stateid,
++              .op_func = nfsd4_test_stateid,
+               .op_flags = ALLOWED_WITHOUT_FH,
+               .op_name = "OP_TEST_STATEID",
+       },
+       [OP_FREE_STATEID] = {
+-              .op_func = (nfsd4op_func)nfsd4_free_stateid,
++              .op_func = nfsd4_free_stateid,
+               .op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
+               .op_name = "OP_FREE_STATEID",
+-              .op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid,
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_get_currentstateid = nfsd4_get_freestateid,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+ #ifdef CONFIG_NFSD_PNFS
+       [OP_GETDEVICEINFO] = {
+-              .op_func = (nfsd4op_func)nfsd4_getdeviceinfo,
++              .op_func = nfsd4_getdeviceinfo,
+               .op_flags = ALLOWED_WITHOUT_FH,
+               .op_name = "OP_GETDEVICEINFO",
+       },
+       [OP_LAYOUTGET] = {
+-              .op_func = (nfsd4op_func)nfsd4_layoutget,
++              .op_func = nfsd4_layoutget,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_LAYOUTGET",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutget_rsize,
++              .op_rsize_bop = nfsd4_layoutget_rsize,
+       },
+       [OP_LAYOUTCOMMIT] = {
+-              .op_func = (nfsd4op_func)nfsd4_layoutcommit,
++              .op_func = nfsd4_layoutcommit,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_LAYOUTCOMMIT",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutcommit_rsize,
++              .op_rsize_bop = nfsd4_layoutcommit_rsize,
+       },
+       [OP_LAYOUTRETURN] = {
+-              .op_func = (nfsd4op_func)nfsd4_layoutreturn,
++              .op_func = nfsd4_layoutreturn,
+               .op_flags = OP_MODIFIES_SOMETHING,
+               .op_name = "OP_LAYOUTRETURN",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutreturn_rsize,
++              .op_rsize_bop = nfsd4_layoutreturn_rsize,
+       },
+ #endif /* CONFIG_NFSD_PNFS */
+       /* NFSv4.2 operations */
+       [OP_ALLOCATE] = {
+-              .op_func = (nfsd4op_func)nfsd4_allocate,
++              .op_func = nfsd4_allocate,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_ALLOCATE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_DEALLOCATE] = {
+-              .op_func = (nfsd4op_func)nfsd4_deallocate,
++              .op_func = nfsd4_deallocate,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_DEALLOCATE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_CLONE] = {
+-              .op_func = (nfsd4op_func)nfsd4_clone,
++              .op_func = nfsd4_clone,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_CLONE",
+-              .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
++              .op_rsize_bop = nfsd4_only_status_rsize,
+       },
+       [OP_SEEK] = {
+-              .op_func = (nfsd4op_func)nfsd4_seek,
++              .op_func = nfsd4_seek,
+               .op_name = "OP_SEEK",
+       },
+ };
+@@ -2406,17 +2442,17 @@ struct nfsd4_voidargs { int dummy; };
+ static struct svc_procedure           nfsd_procedures4[2] = {
+       [NFSPROC4_NULL] = {
+-              .pc_func = (svc_procfunc) nfsd4_proc_null,
+-              .pc_encode = (kxdrproc_t) nfs4svc_encode_voidres,
++              .pc_func = nfsd4_proc_null,
++              .pc_encode = nfs4svc_encode_voidres,
+               .pc_argsize = sizeof(struct nfsd4_voidargs),
+               .pc_ressize = sizeof(struct nfsd4_voidres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = 1,
+       },
+       [NFSPROC4_COMPOUND] = {
+-              .pc_func = (svc_procfunc) nfsd4_proc_compound,
+-              .pc_decode = (kxdrproc_t) nfs4svc_decode_compoundargs,
+-              .pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
++              .pc_func = nfsd4_proc_compound,
++              .pc_decode = nfs4svc_decode_compoundargs,
++              .pc_encode = nfs4svc_encode_compoundres,
+               .pc_argsize = sizeof(struct nfsd4_compoundargs),
+               .pc_ressize = sizeof(struct nfsd4_compoundres),
+               .pc_release = nfsd4_release_compoundargs,
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index a204d7e..f97b734 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2341,8 +2341,9 @@ static bool client_has_state(struct nfs4_client *clp)
+ __be32
+ nfsd4_exchange_id(struct svc_rqst *rqstp,
+                 struct nfsd4_compound_state *cstate,
+-                struct nfsd4_exchange_id *exid)
++                void *_exid)
+ {
++      struct nfsd4_exchange_id *exid = _exid;
+       struct nfs4_client *conf, *new;
+       struct nfs4_client *unconf = NULL;
+       __be32 status;
+@@ -2636,8 +2637,9 @@ static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
+ __be32
+ nfsd4_create_session(struct svc_rqst *rqstp,
+                    struct nfsd4_compound_state *cstate,
+-                   struct nfsd4_create_session *cr_ses)
++                   void *_cr_ses)
+ {
++      struct nfsd4_create_session *cr_ses = _cr_ses;
+       struct sockaddr *sa = svc_addr(rqstp);
+       struct nfs4_client *conf, *unconf;
+       struct nfs4_client *old = NULL;
+@@ -2761,8 +2763,9 @@ static __be32 nfsd4_map_bcts_dir(u32 *dir)
+       return nfserr_inval;
+ }
+-__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
++__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, void *_bc)
+ {
++      struct nfsd4_backchannel_ctl *bc = _bc;
+       struct nfsd4_session *session = cstate->session;
+       struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+       __be32 status;
+@@ -2782,8 +2785,9 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state
+ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
+                    struct nfsd4_compound_state *cstate,
+-                   struct nfsd4_bind_conn_to_session *bcts)
++                   void *_bcts)
+ {
++      struct nfsd4_bind_conn_to_session *bcts = _bcts;
+       __be32 status;
+       struct nfsd4_conn *conn;
+       struct nfsd4_session *session;
+@@ -2825,8 +2829,9 @@ static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4
+ __be32
+ nfsd4_destroy_session(struct svc_rqst *r,
+                     struct nfsd4_compound_state *cstate,
+-                    struct nfsd4_destroy_session *sessionid)
++                    void *_sessionid)
+ {
++      struct nfsd4_destroy_session *sessionid = _sessionid;
+       struct nfsd4_session *ses;
+       __be32 status;
+       int ref_held_by_me = 0;
+@@ -2922,8 +2927,9 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
+ __be32
+ nfsd4_sequence(struct svc_rqst *rqstp,
+              struct nfsd4_compound_state *cstate,
+-             struct nfsd4_sequence *seq)
++             void *_seq)
+ {
++      struct nfsd4_sequence *seq = _seq;
+       struct nfsd4_compoundres *resp = rqstp->rq_resp;
+       struct xdr_stream *xdr = &resp->xdr;
+       struct nfsd4_session *session;
+@@ -3057,8 +3063,9 @@ nfsd4_sequence_done(struct nfsd4_compoundres *resp)
+ }
+ __be32
+-nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
++nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, void *_dc)
+ {
++      struct nfsd4_destroy_clientid *dc = _dc;
+       struct nfs4_client *conf, *unconf;
+       struct nfs4_client *clp = NULL;
+       __be32 status = 0;
+@@ -3098,8 +3105,9 @@ out:
+ }
+ __be32
+-nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
++nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, void *_rc)
+ {
++      struct nfsd4_reclaim_complete *rc = _rc;
+       __be32 status = 0;
+       if (rc->rca_one_fs) {
+@@ -3136,8 +3144,9 @@ out:
+ __be32
+ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-                struct nfsd4_setclientid *setclid)
++                void *_setclid)
+ {
++      struct nfsd4_setclientid *setclid = _setclid;
+       struct xdr_netobj       clname = setclid->se_name;
+       nfs4_verifier           clverifier = setclid->se_verf;
+       struct nfs4_client      *conf, *new;
+@@ -3195,8 +3204,9 @@ out:
+ __be32
+ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
+                        struct nfsd4_compound_state *cstate,
+-                       struct nfsd4_setclientid_confirm *setclientid_confirm)
++                       void *_setclientid_confirm)
+ {
++      struct nfsd4_setclientid_confirm *setclientid_confirm = _setclientid_confirm;
+       struct nfs4_client *conf, *unconf;
+       struct nfs4_client *old = NULL;
+       nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
+@@ -4440,8 +4450,9 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
+ __be32
+ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-          clientid_t *clid)
++          void *_clid)
+ {
++      clientid_t *clid = _clid;
+       struct nfs4_client *clp;
+       __be32 status;
+       struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+@@ -4891,8 +4902,9 @@ out:
+  */
+ __be32
+ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-                 struct nfsd4_test_stateid *test_stateid)
++                 void *_test_stateid)
+ {
++      struct nfsd4_test_stateid *test_stateid = _test_stateid;
+       struct nfsd4_test_stateid_id *stateid;
+       struct nfs4_client *cl = cstate->session->se_client;
+@@ -4931,8 +4943,9 @@ out:
+ __be32
+ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-                 struct nfsd4_free_stateid *free_stateid)
++                 void *_free_stateid)
+ {
++      struct nfsd4_free_stateid *free_stateid = _free_stateid;
+       stateid_t *stateid = &free_stateid->fr_stateid;
+       struct nfs4_stid *s;
+       struct nfs4_delegation *dp;
+@@ -5060,8 +5073,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
+ __be32
+ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-                 struct nfsd4_open_confirm *oc)
++                 void *_oc)
+ {
++      struct nfsd4_open_confirm *oc = _oc;
+       __be32 status;
+       struct nfs4_openowner *oo;
+       struct nfs4_ol_stateid *stp;
+@@ -5129,8 +5143,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
+ __be32
+ nfsd4_open_downgrade(struct svc_rqst *rqstp,
+                    struct nfsd4_compound_state *cstate,
+-                   struct nfsd4_open_downgrade *od)
++                   void *_od)
+ {
++      struct nfsd4_open_downgrade *od = _od;
+       __be32 status;
+       struct nfs4_ol_stateid *stp;
+       struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+@@ -5198,8 +5213,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+  */
+ __be32
+ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-          struct nfsd4_close *close)
++          void *_close)
+ {
++      struct nfsd4_close *close = _close;
+       __be32 status;
+       struct nfs4_ol_stateid *stp;
+       struct net *net = SVC_NET(rqstp);
+@@ -5228,8 +5244,9 @@ out:
+ __be32
+ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-                struct nfsd4_delegreturn *dr)
++                void *_dr)
+ {
++      struct nfsd4_delegreturn *dr = _dr;
+       struct nfs4_delegation *dp;
+       stateid_t *stateid = &dr->dr_stateid;
+       struct nfs4_stid *s;
+@@ -5580,8 +5597,9 @@ out:
+  */
+ __be32
+ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-         struct nfsd4_lock *lock)
++         void *_lock)
+ {
++      struct nfsd4_lock *lock = _lock;
+       struct nfs4_openowner *open_sop = NULL;
+       struct nfs4_lockowner *lock_sop = NULL;
+       struct nfs4_ol_stateid *lock_stp = NULL;
+@@ -5782,8 +5800,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
+  */
+ __be32
+ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-          struct nfsd4_lockt *lockt)
++          void *_lockt)
+ {
++      struct nfsd4_lockt *lockt = _lockt;
+       struct file_lock *file_lock = NULL;
+       struct nfs4_lockowner *lo = NULL;
+       __be32 status;
+@@ -5855,8 +5874,9 @@ out:
+ __be32
+ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+-          struct nfsd4_locku *locku)
++          void *_locku)
+ {
++      struct nfsd4_locku *locku = _locku;
+       struct nfs4_ol_stateid *stp;
+       struct file *filp = NULL;
+       struct file_lock *file_lock = NULL;
+@@ -5962,8 +5982,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ __be32
+ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+                       struct nfsd4_compound_state *cstate,
+-                      struct nfsd4_release_lockowner *rlockowner)
++                      void *_rlockowner)
+ {
++      struct nfsd4_release_lockowner *rlockowner = _rlockowner;
+       clientid_t *clid = &rlockowner->rl_clientid;
+       struct nfs4_stateowner *sop;
+       struct nfs4_lockowner *lo = NULL;
+@@ -6922,26 +6943,34 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
+  * functions to set current state id
+  */
+ void
+-nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
++nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, void *_odp)
+ {
++      struct nfsd4_open_downgrade *odp = _odp;
++
+       put_stateid(cstate, &odp->od_stateid);
+ }
+ void
+-nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
++nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, void *_open)
+ {
++      struct nfsd4_open *open = _open;
++
+       put_stateid(cstate, &open->op_stateid);
+ }
+ void
+-nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
++nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, void *_close)
+ {
++      struct nfsd4_close *close = _close;
++
+       put_stateid(cstate, &close->cl_stateid);
+ }
+ void
+-nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
++nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, void *_lock)
+ {
++      struct nfsd4_lock *lock = _lock;
++
+       put_stateid(cstate, &lock->lk_resp_stateid);
+ }
+@@ -6950,49 +6979,65 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
+  */
+ void
+-nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
++nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, void *_odp)
+ {
++      struct nfsd4_open_downgrade *odp = _odp;
++
+       get_stateid(cstate, &odp->od_stateid);
+ }
+ void
+-nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
++nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, void *_drp)
+ {
++      struct nfsd4_delegreturn *drp = _drp;
++
+       get_stateid(cstate, &drp->dr_stateid);
+ }
+ void
+-nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
++nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, void *_fsp)
+ {
++      struct nfsd4_free_stateid *fsp = _fsp;
++
+       get_stateid(cstate, &fsp->fr_stateid);
+ }
+ void
+-nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
++nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, void *_setattr)
+ {
++      struct nfsd4_setattr *setattr = _setattr;
++
+       get_stateid(cstate, &setattr->sa_stateid);
+ }
+ void
+-nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
++nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, void *_close)
+ {
++      struct nfsd4_close *close = _close;
++
+       get_stateid(cstate, &close->cl_stateid);
+ }
+ void
+-nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
++nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, void *_locku)
+ {
++      struct nfsd4_locku *locku = _locku;
++
+       get_stateid(cstate, &locku->lu_stateid);
+ }
+ void
+-nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
++nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, void *_read)
+ {
++      struct nfsd4_read *read = _read;
++
+       get_stateid(cstate, &read->rd_stateid);
+ }
+ void
+-nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
++nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, void *_write)
+ {
++      struct nfsd4_write *write = _write;
++
+       get_stateid(cstate, &write->wr_stateid);
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 0aa0236..6381bd7 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -447,8 +447,9 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ }
+ static __be32
+-nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, stateid_t *sid)
++nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, void *_sid)
+ {
++      stateid_t *sid = _sid;
+       DECODE_HEAD;
+       READ_BUF(sizeof(stateid_t));
+@@ -459,8 +460,9 @@ nfsd4_decode_stateid(struct nfsd4_compoundargs *argp, stateid_t *sid)
+ }
+ static __be32
+-nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access)
++nfsd4_decode_access(struct nfsd4_compoundargs *argp, void *_access)
+ {
++      struct nfsd4_access *access = _access;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -469,8 +471,9 @@ nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access
+       DECODE_TAIL;
+ }
+-static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
++static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, void *_cbs)
+ {
++      struct nfsd4_cb_sec *cbs = _cbs;
+       DECODE_HEAD;
+       u32 dummy, uid, gid;
+       char *machine_name;
+@@ -549,8 +552,9 @@ static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_
+       DECODE_TAIL;
+ }
+-static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, struct nfsd4_backchannel_ctl *bc)
++static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, void *_bc)
+ {
++      struct nfsd4_backchannel_ctl *bc = _bc;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -560,8 +564,9 @@ static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, stru
+       DECODE_TAIL;
+ }
+-static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
++static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, void *_bcts)
+ {
++      struct nfsd4_bind_conn_to_session *bcts = _bcts;
+       DECODE_HEAD;
+       READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
+@@ -573,8 +578,9 @@ static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp,
+ }
+ static __be32
+-nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
++nfsd4_decode_close(struct nfsd4_compoundargs *argp, void *_close)
+ {
++      struct nfsd4_close *close = _close;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -586,8 +592,9 @@ nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
+ static __be32
+-nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit)
++nfsd4_decode_commit(struct nfsd4_compoundargs *argp, void *_commit)
+ {
++      struct nfsd4_commit *commit = _commit;
+       DECODE_HEAD;
+       READ_BUF(12);
+@@ -598,8 +605,9 @@ nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit
+ }
+ static __be32
+-nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create)
++nfsd4_decode_create(struct nfsd4_compoundargs *argp, void *_create)
+ {
++      struct nfsd4_create *create = _create;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -642,20 +650,25 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
+ }
+ static inline __be32
+-nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr)
++nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, void *_dr)
+ {
++      struct nfsd4_delegreturn *dr = _dr;
++
+       return nfsd4_decode_stateid(argp, &dr->dr_stateid);
+ }
+ static inline __be32
+-nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr)
++nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, void *_getattr)
+ {
++      struct nfsd4_getattr *getattr = _getattr;
++
+       return nfsd4_decode_bitmap(argp, getattr->ga_bmval);
+ }
+ static __be32
+-nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
++nfsd4_decode_link(struct nfsd4_compoundargs *argp, void *_link)
+ {
++      struct nfsd4_link *link = _link;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -669,8 +682,9 @@ nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
+ }
+ static __be32
+-nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
++nfsd4_decode_lock(struct nfsd4_compoundargs *argp, void *_lock)
+ {
++      struct nfsd4_lock *lock = _lock;
+       DECODE_HEAD;
+       /*
+@@ -709,8 +723,9 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
+ }
+ static __be32
+-nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
++nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, void *_lockt)
+ {
++      struct nfsd4_lockt *lockt = _lockt;
+       DECODE_HEAD;
+                       
+       READ_BUF(32);
+@@ -728,8 +743,9 @@ nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
+ }
+ static __be32
+-nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
++nfsd4_decode_locku(struct nfsd4_compoundargs *argp, void *_locku)
+ {
++      struct nfsd4_locku *locku = _locku;
+       DECODE_HEAD;
+       READ_BUF(8);
+@@ -748,8 +764,9 @@ nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
+ }
+ static __be32
+-nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup)
++nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, void *_lookup)
+ {
++      struct nfsd4_lookup *lookup = _lookup;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -847,8 +864,9 @@ xdr_error:
+ }
+ static __be32
+-nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
++nfsd4_decode_open(struct nfsd4_compoundargs *argp, void *_open)
+ {
++      struct nfsd4_open *open = _open;
+       DECODE_HEAD;
+       u32 dummy;
+@@ -960,8 +978,9 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
+ }
+ static __be32
+-nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
++nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, void *_open_conf)
+ {
++      struct nfsd4_open_confirm *open_conf = _open_conf;
+       DECODE_HEAD;
+       if (argp->minorversion >= 1)
+@@ -977,8 +996,9 @@ nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_con
+ }
+ static __be32
+-nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down)
++nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, void *_open_down)
+ {
++      struct nfsd4_open_downgrade *open_down = _open_down;
+       DECODE_HEAD;
+                   
+       status = nfsd4_decode_stateid(argp, &open_down->od_stateid);
+@@ -997,8 +1017,9 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
+ }
+ static __be32
+-nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
++nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, void *_putfh)
+ {
++      struct nfsd4_putfh *putfh = _putfh;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -1020,8 +1041,9 @@ nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
+ }
+ static __be32
+-nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
++nfsd4_decode_read(struct nfsd4_compoundargs *argp, void *_read)
+ {
++      struct nfsd4_read *read = _read;
+       DECODE_HEAD;
+       status = nfsd4_decode_stateid(argp, &read->rd_stateid);
+@@ -1035,8 +1057,9 @@ nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
+ }
+ static __be32
+-nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir)
++nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, void *_readdir)
+ {
++      struct nfsd4_readdir *readdir = _readdir;
+       DECODE_HEAD;
+       READ_BUF(24);
+@@ -1051,8 +1074,9 @@ nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *read
+ }
+ static __be32
+-nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove)
++nfsd4_decode_remove(struct nfsd4_compoundargs *argp, void *_remove)
+ {
++      struct nfsd4_remove *remove = _remove;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -1066,8 +1090,9 @@ nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove
+ }
+ static __be32
+-nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename)
++nfsd4_decode_rename(struct nfsd4_compoundargs *argp, void *_rename)
+ {
++      struct nfsd4_rename *rename = _rename;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -1087,7 +1112,7 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
+ }
+ static __be32
+-nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
++nfsd4_decode_renew(struct nfsd4_compoundargs *argp, void *clientid)
+ {
+       DECODE_HEAD;
+@@ -1102,8 +1127,9 @@ nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
+ static __be32
+ nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
+-                   struct nfsd4_secinfo *secinfo)
++                   void *_secinfo)
+ {
++      struct nfsd4_secinfo *secinfo = _secinfo;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -1118,8 +1144,9 @@ nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
+-                   struct nfsd4_secinfo_no_name *sin)
++                   void *_sin)
+ {
++      struct nfsd4_secinfo_no_name *sin = _sin;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -1128,8 +1155,9 @@ nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
+ }
+ static __be32
+-nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
++nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, void *_setattr)
+ {
++      struct nfsd4_setattr *setattr = _setattr;
+       __be32 status;
+       status = nfsd4_decode_stateid(argp, &setattr->sa_stateid);
+@@ -1140,8 +1168,9 @@ nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *seta
+ }
+ static __be32
+-nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
++nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, void *_setclientid)
+ {
++      struct nfsd4_setclientid *setclientid = _setclientid;
+       DECODE_HEAD;
+       if (argp->minorversion >= 1)
+@@ -1170,8 +1199,9 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
+ }
+ static __be32
+-nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c)
++nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, void *_scd_c)
+ {
++      struct nfsd4_setclientid_confirm *scd_c = _scd_c;
+       DECODE_HEAD;
+       if (argp->minorversion >= 1)
+@@ -1186,8 +1216,9 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
+ /* Also used for NVERIFY */
+ static __be32
+-nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
++nfsd4_decode_verify(struct nfsd4_compoundargs *argp, void *_verify)
+ {
++      struct nfsd4_verify *verify = _verify;
+       DECODE_HEAD;
+       if ((status = nfsd4_decode_bitmap(argp, verify->ve_bmval)))
+@@ -1205,8 +1236,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
+ }
+ static __be32
+-nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
++nfsd4_decode_write(struct nfsd4_compoundargs *argp, void *_write)
+ {
++      struct nfsd4_write *write = _write;
+       int avail;
+       int len;
+       DECODE_HEAD;
+@@ -1256,8 +1288,9 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
+ }
+ static __be32
+-nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner)
++nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, void *_rlockowner)
+ {
++      struct nfsd4_release_lockowner *rlockowner = _rlockowner;
+       DECODE_HEAD;
+       if (argp->minorversion >= 1)
+@@ -1276,8 +1309,9 @@ nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_rel
+ static __be32
+ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
+-                       struct nfsd4_exchange_id *exid)
++                       void *_exid)
+ {
++      struct nfsd4_exchange_id *exid = _exid;
+       int dummy, tmp;
+       DECODE_HEAD;
+@@ -1378,8 +1412,9 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
+-                          struct nfsd4_create_session *sess)
++                          void *_sess)
+ {
++      struct nfsd4_create_session *sess = _sess;
+       DECODE_HEAD;
+       u32 dummy;
+@@ -1430,8 +1465,9 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
+-                           struct nfsd4_destroy_session *destroy_session)
++                           void *_destroy_session)
+ {
++      struct nfsd4_destroy_session *destroy_session = _destroy_session;
+       DECODE_HEAD;
+       READ_BUF(NFS4_MAX_SESSIONID_LEN);
+       COPYMEM(destroy_session->sessionid.data, NFS4_MAX_SESSIONID_LEN);
+@@ -1441,8 +1477,9 @@ nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
+-                        struct nfsd4_free_stateid *free_stateid)
++                        void *_free_stateid)
+ {
++      struct nfsd4_free_stateid *free_stateid = _free_stateid;
+       DECODE_HEAD;
+       READ_BUF(sizeof(stateid_t));
+@@ -1454,8 +1491,9 @@ nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
+-                    struct nfsd4_sequence *seq)
++                    void *_seq)
+ {
++      struct nfsd4_sequence *seq = _seq;
+       DECODE_HEAD;
+       READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
+@@ -1469,8 +1507,9 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
+ }
+ static __be32
+-nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
++nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, void *_test_stateid)
+ {
++      struct nfsd4_test_stateid *test_stateid = _test_stateid;
+       int i;
+       __be32 *p, status;
+       struct nfsd4_test_stateid_id *stateid;
+@@ -1504,8 +1543,9 @@ xdr_error:
+       goto out;
+ }
+-static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, struct nfsd4_destroy_clientid *dc)
++static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, void *_dc)
+ {
++      struct nfsd4_destroy_clientid *dc = _dc;
+       DECODE_HEAD;
+       READ_BUF(8);
+@@ -1514,8 +1554,9 @@ static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp, str
+       DECODE_TAIL;
+ }
+-static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
++static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, void *_rc)
+ {
++      struct nfsd4_reclaim_complete *rc = _rc;
+       DECODE_HEAD;
+       READ_BUF(4);
+@@ -1527,8 +1568,9 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, str
+ #ifdef CONFIG_NFSD_PNFS
+ static __be32
+ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
+-              struct nfsd4_getdeviceinfo *gdev)
++              void *_gdev)
+ {
++      struct nfsd4_getdeviceinfo *gdev = _gdev;
+       DECODE_HEAD;
+       u32 num, i;
+@@ -1552,8 +1594,9 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
+-              struct nfsd4_layoutget *lgp)
++              void *_lgp)
+ {
++      struct nfsd4_layoutget *lgp = _lgp;
+       DECODE_HEAD;
+       READ_BUF(36);
+@@ -1576,8 +1619,9 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
+-              struct nfsd4_layoutcommit *lcp)
++              void *_lcp)
+ {
++      struct nfsd4_layoutcommit *lcp = _lcp;
+       DECODE_HEAD;
+       u32 timechange;
+@@ -1624,8 +1668,9 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
+-              struct nfsd4_layoutreturn *lrp)
++              void *_lrp)
+ {
++      struct nfsd4_layoutreturn *lrp = _lrp;
+       DECODE_HEAD;
+       READ_BUF(16);
+@@ -1659,8 +1704,9 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
+ static __be32
+ nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
+-                     struct nfsd4_fallocate *fallocate)
++                     void *_fallocate)
+ {
++      struct nfsd4_fallocate *fallocate = _fallocate;
+       DECODE_HEAD;
+       status = nfsd4_decode_stateid(argp, &fallocate->falloc_stateid);
+@@ -1675,8 +1721,9 @@ nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
+ }
+ static __be32
+-nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
++nfsd4_decode_clone(struct nfsd4_compoundargs *argp, void *_clone)
+ {
++      struct nfsd4_clone *clone = _clone;
+       DECODE_HEAD;
+       status = nfsd4_decode_stateid(argp, &clone->cl_src_stateid);
+@@ -1694,8 +1741,9 @@ nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
+ }
+ static __be32
+-nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
++nfsd4_decode_seek(struct nfsd4_compoundargs *argp, void *_seek)
+ {
++      struct nfsd4_seek *seek = _seek;
+       DECODE_HEAD;
+       status = nfsd4_decode_stateid(argp, &seek->seek_stateid);
+@@ -1723,88 +1771,88 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
+ typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
+-static nfsd4_dec nfsd4_dec_ops[] = {
+-      [OP_ACCESS]             = (nfsd4_dec)nfsd4_decode_access,
+-      [OP_CLOSE]              = (nfsd4_dec)nfsd4_decode_close,
+-      [OP_COMMIT]             = (nfsd4_dec)nfsd4_decode_commit,
+-      [OP_CREATE]             = (nfsd4_dec)nfsd4_decode_create,
+-      [OP_DELEGPURGE]         = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_DELEGRETURN]        = (nfsd4_dec)nfsd4_decode_delegreturn,
+-      [OP_GETATTR]            = (nfsd4_dec)nfsd4_decode_getattr,
+-      [OP_GETFH]              = (nfsd4_dec)nfsd4_decode_noop,
+-      [OP_LINK]               = (nfsd4_dec)nfsd4_decode_link,
+-      [OP_LOCK]               = (nfsd4_dec)nfsd4_decode_lock,
+-      [OP_LOCKT]              = (nfsd4_dec)nfsd4_decode_lockt,
+-      [OP_LOCKU]              = (nfsd4_dec)nfsd4_decode_locku,
+-      [OP_LOOKUP]             = (nfsd4_dec)nfsd4_decode_lookup,
+-      [OP_LOOKUPP]            = (nfsd4_dec)nfsd4_decode_noop,
+-      [OP_NVERIFY]            = (nfsd4_dec)nfsd4_decode_verify,
+-      [OP_OPEN]               = (nfsd4_dec)nfsd4_decode_open,
+-      [OP_OPENATTR]           = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_OPEN_CONFIRM]       = (nfsd4_dec)nfsd4_decode_open_confirm,
+-      [OP_OPEN_DOWNGRADE]     = (nfsd4_dec)nfsd4_decode_open_downgrade,
+-      [OP_PUTFH]              = (nfsd4_dec)nfsd4_decode_putfh,
+-      [OP_PUTPUBFH]           = (nfsd4_dec)nfsd4_decode_putpubfh,
+-      [OP_PUTROOTFH]          = (nfsd4_dec)nfsd4_decode_noop,
+-      [OP_READ]               = (nfsd4_dec)nfsd4_decode_read,
+-      [OP_READDIR]            = (nfsd4_dec)nfsd4_decode_readdir,
+-      [OP_READLINK]           = (nfsd4_dec)nfsd4_decode_noop,
+-      [OP_REMOVE]             = (nfsd4_dec)nfsd4_decode_remove,
+-      [OP_RENAME]             = (nfsd4_dec)nfsd4_decode_rename,
+-      [OP_RENEW]              = (nfsd4_dec)nfsd4_decode_renew,
+-      [OP_RESTOREFH]          = (nfsd4_dec)nfsd4_decode_noop,
+-      [OP_SAVEFH]             = (nfsd4_dec)nfsd4_decode_noop,
+-      [OP_SECINFO]            = (nfsd4_dec)nfsd4_decode_secinfo,
+-      [OP_SETATTR]            = (nfsd4_dec)nfsd4_decode_setattr,
+-      [OP_SETCLIENTID]        = (nfsd4_dec)nfsd4_decode_setclientid,
+-      [OP_SETCLIENTID_CONFIRM] = (nfsd4_dec)nfsd4_decode_setclientid_confirm,
+-      [OP_VERIFY]             = (nfsd4_dec)nfsd4_decode_verify,
+-      [OP_WRITE]              = (nfsd4_dec)nfsd4_decode_write,
+-      [OP_RELEASE_LOCKOWNER]  = (nfsd4_dec)nfsd4_decode_release_lockowner,
++static const nfsd4_dec nfsd4_dec_ops[] = {
++      [OP_ACCESS]             = nfsd4_decode_access,
++      [OP_CLOSE]              = nfsd4_decode_close,
++      [OP_COMMIT]             = nfsd4_decode_commit,
++      [OP_CREATE]             = nfsd4_decode_create,
++      [OP_DELEGPURGE]         = nfsd4_decode_notsupp,
++      [OP_DELEGRETURN]        = nfsd4_decode_delegreturn,
++      [OP_GETATTR]            = nfsd4_decode_getattr,
++      [OP_GETFH]              = nfsd4_decode_noop,
++      [OP_LINK]               = nfsd4_decode_link,
++      [OP_LOCK]               = nfsd4_decode_lock,
++      [OP_LOCKT]              = nfsd4_decode_lockt,
++      [OP_LOCKU]              = nfsd4_decode_locku,
++      [OP_LOOKUP]             = nfsd4_decode_lookup,
++      [OP_LOOKUPP]            = nfsd4_decode_noop,
++      [OP_NVERIFY]            = nfsd4_decode_verify,
++      [OP_OPEN]               = nfsd4_decode_open,
++      [OP_OPENATTR]           = nfsd4_decode_notsupp,
++      [OP_OPEN_CONFIRM]       = nfsd4_decode_open_confirm,
++      [OP_OPEN_DOWNGRADE]     = nfsd4_decode_open_downgrade,
++      [OP_PUTFH]              = nfsd4_decode_putfh,
++      [OP_PUTPUBFH]           = nfsd4_decode_putpubfh,
++      [OP_PUTROOTFH]          = nfsd4_decode_noop,
++      [OP_READ]               = nfsd4_decode_read,
++      [OP_READDIR]            = nfsd4_decode_readdir,
++      [OP_READLINK]           = nfsd4_decode_noop,
++      [OP_REMOVE]             = nfsd4_decode_remove,
++      [OP_RENAME]             = nfsd4_decode_rename,
++      [OP_RENEW]              = nfsd4_decode_renew,
++      [OP_RESTOREFH]          = nfsd4_decode_noop,
++      [OP_SAVEFH]             = nfsd4_decode_noop,
++      [OP_SECINFO]            = nfsd4_decode_secinfo,
++      [OP_SETATTR]            = nfsd4_decode_setattr,
++      [OP_SETCLIENTID]        = nfsd4_decode_setclientid,
++      [OP_SETCLIENTID_CONFIRM] = nfsd4_decode_setclientid_confirm,
++      [OP_VERIFY]             = nfsd4_decode_verify,
++      [OP_WRITE]              = nfsd4_decode_write,
++      [OP_RELEASE_LOCKOWNER]  = nfsd4_decode_release_lockowner,
+       /* new operations for NFSv4.1 */
+-      [OP_BACKCHANNEL_CTL]    = (nfsd4_dec)nfsd4_decode_backchannel_ctl,
+-      [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
+-      [OP_EXCHANGE_ID]        = (nfsd4_dec)nfsd4_decode_exchange_id,
+-      [OP_CREATE_SESSION]     = (nfsd4_dec)nfsd4_decode_create_session,
+-      [OP_DESTROY_SESSION]    = (nfsd4_dec)nfsd4_decode_destroy_session,
+-      [OP_FREE_STATEID]       = (nfsd4_dec)nfsd4_decode_free_stateid,
+-      [OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
++      [OP_BACKCHANNEL_CTL]    = nfsd4_decode_backchannel_ctl,
++      [OP_BIND_CONN_TO_SESSION]= nfsd4_decode_bind_conn_to_session,
++      [OP_EXCHANGE_ID]        = nfsd4_decode_exchange_id,
++      [OP_CREATE_SESSION]     = nfsd4_decode_create_session,
++      [OP_DESTROY_SESSION]    = nfsd4_decode_destroy_session,
++      [OP_FREE_STATEID]       = nfsd4_decode_free_stateid,
++      [OP_GET_DIR_DELEGATION] = nfsd4_decode_notsupp,
+ #ifdef CONFIG_NFSD_PNFS
+-      [OP_GETDEVICEINFO]      = (nfsd4_dec)nfsd4_decode_getdeviceinfo,
+-      [OP_GETDEVICELIST]      = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_LAYOUTCOMMIT]       = (nfsd4_dec)nfsd4_decode_layoutcommit,
+-      [OP_LAYOUTGET]          = (nfsd4_dec)nfsd4_decode_layoutget,
+-      [OP_LAYOUTRETURN]       = (nfsd4_dec)nfsd4_decode_layoutreturn,
++      [OP_GETDEVICEINFO]      = nfsd4_decode_getdeviceinfo,
++      [OP_GETDEVICELIST]      = nfsd4_decode_notsupp,
++      [OP_LAYOUTCOMMIT]       = nfsd4_decode_layoutcommit,
++      [OP_LAYOUTGET]          = nfsd4_decode_layoutget,
++      [OP_LAYOUTRETURN]       = nfsd4_decode_layoutreturn,
+ #else
+-      [OP_GETDEVICEINFO]      = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_GETDEVICELIST]      = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_LAYOUTCOMMIT]       = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_LAYOUTGET]          = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_LAYOUTRETURN]       = (nfsd4_dec)nfsd4_decode_notsupp,
++      [OP_GETDEVICEINFO]      = nfsd4_decode_notsupp,
++      [OP_GETDEVICELIST]      = nfsd4_decode_notsupp,
++      [OP_LAYOUTCOMMIT]       = nfsd4_decode_notsupp,
++      [OP_LAYOUTGET]          = nfsd4_decode_notsupp,
++      [OP_LAYOUTRETURN]       = nfsd4_decode_notsupp,
+ #endif
+-      [OP_SECINFO_NO_NAME]    = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
+-      [OP_SEQUENCE]           = (nfsd4_dec)nfsd4_decode_sequence,
+-      [OP_SET_SSV]            = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_TEST_STATEID]       = (nfsd4_dec)nfsd4_decode_test_stateid,
+-      [OP_WANT_DELEGATION]    = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_DESTROY_CLIENTID]   = (nfsd4_dec)nfsd4_decode_destroy_clientid,
+-      [OP_RECLAIM_COMPLETE]   = (nfsd4_dec)nfsd4_decode_reclaim_complete,
++      [OP_SECINFO_NO_NAME]    = nfsd4_decode_secinfo_no_name,
++      [OP_SEQUENCE]           = nfsd4_decode_sequence,
++      [OP_SET_SSV]            = nfsd4_decode_notsupp,
++      [OP_TEST_STATEID]       = nfsd4_decode_test_stateid,
++      [OP_WANT_DELEGATION]    = nfsd4_decode_notsupp,
++      [OP_DESTROY_CLIENTID]   = nfsd4_decode_destroy_clientid,
++      [OP_RECLAIM_COMPLETE]   = nfsd4_decode_reclaim_complete,
+       /* new operations for NFSv4.2 */
+-      [OP_ALLOCATE]           = (nfsd4_dec)nfsd4_decode_fallocate,
+-      [OP_COPY]               = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_COPY_NOTIFY]        = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_DEALLOCATE]         = (nfsd4_dec)nfsd4_decode_fallocate,
+-      [OP_IO_ADVISE]          = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_LAYOUTERROR]        = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_LAYOUTSTATS]        = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_OFFLOAD_CANCEL]     = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_OFFLOAD_STATUS]     = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_READ_PLUS]          = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_SEEK]               = (nfsd4_dec)nfsd4_decode_seek,
+-      [OP_WRITE_SAME]         = (nfsd4_dec)nfsd4_decode_notsupp,
+-      [OP_CLONE]              = (nfsd4_dec)nfsd4_decode_clone,
++      [OP_ALLOCATE]           = nfsd4_decode_fallocate,
++      [OP_COPY]               = nfsd4_decode_notsupp,
++      [OP_COPY_NOTIFY]        = nfsd4_decode_notsupp,
++      [OP_DEALLOCATE]         = nfsd4_decode_fallocate,
++      [OP_IO_ADVISE]          = nfsd4_decode_notsupp,
++      [OP_LAYOUTERROR]        = nfsd4_decode_notsupp,
++      [OP_LAYOUTSTATS]        = nfsd4_decode_notsupp,
++      [OP_OFFLOAD_CANCEL]     = nfsd4_decode_notsupp,
++      [OP_OFFLOAD_STATUS]     = nfsd4_decode_notsupp,
++      [OP_READ_PLUS]          = nfsd4_decode_notsupp,
++      [OP_SEEK]               = nfsd4_decode_seek,
++      [OP_WRITE_SAME]         = nfsd4_decode_notsupp,
++      [OP_CLONE]              = nfsd4_decode_clone,
+ };
+ static inline bool
+@@ -3032,8 +3080,9 @@ nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+ }
+ static __be32
+-nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
++nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, void *_access)
+ {
++      struct nfsd4_access *access = _access;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3047,8 +3096,9 @@ nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+       return nfserr;
+ }
+-static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
++static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, void *_bcts)
+ {
++      struct nfsd4_bind_conn_to_session *bcts = _bcts;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3066,8 +3116,10 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp,
+ }
+ static __be32
+-nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
++nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, void *_close)
+ {
++      struct nfsd4_close *close = _close;
++
+       struct xdr_stream *xdr = &resp->xdr;
+       if (!nfserr)
+@@ -3078,8 +3130,9 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_c
+ static __be32
+-nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
++nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, void *_commit)
+ {
++      struct nfsd4_commit *commit = _commit;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3094,8 +3147,9 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ }
+ static __be32
+-nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
++nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, void *_create)
+ {
++      struct nfsd4_create *create = _create;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3111,8 +3165,9 @@ nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ }
+ static __be32
+-nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_getattr *getattr)
++nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, void *_getattr)
+ {
++      struct nfsd4_getattr *getattr = _getattr;
+       struct svc_fh *fhp = getattr->ga_fhp;
+       struct xdr_stream *xdr = &resp->xdr;
+@@ -3126,8 +3181,9 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ }
+ static __be32
+-nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh **fhpp)
++nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, void *_fhpp)
+ {
++      struct svc_fh **fhpp = (struct svc_fh **)_fhpp;
+       struct xdr_stream *xdr = &resp->xdr;
+       struct svc_fh *fhp = *fhpp;
+       unsigned int len;
+@@ -3183,8 +3239,10 @@ again:
+ }
+ static __be32
+-nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock)
++nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, void *_lock)
+ {
++      struct nfsd4_lock *lock = _lock;
++
+       struct xdr_stream *xdr = &resp->xdr;
+       if (!nfserr)
+@@ -3196,8 +3254,9 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
+ }
+ static __be32
+-nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lockt *lockt)
++nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, void *_lockt)
+ {
++      struct nfsd4_lockt *lockt = _lockt;
+       struct xdr_stream *xdr = &resp->xdr;
+       if (nfserr == nfserr_denied)
+@@ -3206,8 +3265,9 @@ nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
+ }
+ static __be32
+-nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku)
++nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, void *_locku)
+ {
++      struct nfsd4_locku *locku = _locku;
+       struct xdr_stream *xdr = &resp->xdr;
+       if (!nfserr)
+@@ -3218,8 +3278,9 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
+ static __be32
+-nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
++nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, void *_link)
+ {
++      struct nfsd4_link *link = _link;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3234,8 +3295,9 @@ nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_li
+ static __be32
+-nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
++nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, void *_open)
+ {
++      struct nfsd4_open *open = _open;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3332,8 +3394,10 @@ out:
+ }
+ static __be32
+-nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
++nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, void *_oc)
+ {
++      struct nfsd4_open_confirm *oc = _oc;
++
+       struct xdr_stream *xdr = &resp->xdr;
+       if (!nfserr)
+@@ -3343,8 +3407,10 @@ nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct
+ }
+ static __be32
+-nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od)
++nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, void *_od)
+ {
++      struct nfsd4_open_downgrade *od = _od;
++
+       struct xdr_stream *xdr = &resp->xdr;
+       if (!nfserr)
+@@ -3477,9 +3543,9 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
+ }
+ static __be32
+-nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                struct nfsd4_read *read)
++nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, void *_read)
+ {
++      struct nfsd4_read *read = _read;
+       unsigned long maxcount;
+       struct xdr_stream *xdr = &resp->xdr;
+       struct file *file = read->rd_filp;
+@@ -3531,8 +3597,9 @@ out:
+ }
+ static __be32
+-nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink)
++nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, void *_readlink)
+ {
++      struct nfsd4_readlink *readlink = _readlink;
+       int maxcount;
+       __be32 wire_count;
+       int zero = 0;
+@@ -3576,8 +3643,9 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
+ }
+ static __be32
+-nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readdir *readdir)
++nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, void *_readdir)
+ {
++      struct nfsd4_readdir *readdir = _readdir;
+       int maxcount;
+       int bytes_left;
+       loff_t offset;
+@@ -3669,8 +3737,9 @@ err_no_verf:
+ }
+ static __be32
+-nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
++nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, void *_remove)
+ {
++      struct nfsd4_remove *remove = _remove;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3684,8 +3753,9 @@ nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ }
+ static __be32
+-nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
++nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, void *_rename)
+ {
++      struct nfsd4_rename *rename = _rename;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3701,8 +3771,9 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ static __be32
+ nfsd4_do_encode_secinfo(struct xdr_stream *xdr,
+-                       __be32 nfserr, struct svc_export *exp)
++                       __be32 nfserr, void *_exp)
+ {
++      struct svc_export *exp = _exp;
+       u32 i, nflavs, supported;
+       struct exp_flavor_info *flavs;
+       struct exp_flavor_info def_flavs[2];
+@@ -3777,8 +3848,9 @@ out:
+ static __be32
+ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                   struct nfsd4_secinfo *secinfo)
++                   void *_secinfo)
+ {
++      struct nfsd4_secinfo *secinfo = _secinfo;
+       struct xdr_stream *xdr = &resp->xdr;
+       return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->si_exp);
+@@ -3786,8 +3858,9 @@ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ static __be32
+ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                   struct nfsd4_secinfo_no_name *secinfo)
++                   void *_secinfo)
+ {
++      struct nfsd4_secinfo_no_name *secinfo = _secinfo;
+       struct xdr_stream *xdr = &resp->xdr;
+       return nfsd4_do_encode_secinfo(xdr, nfserr, secinfo->sin_exp);
+@@ -3798,8 +3871,9 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
+  * regardless of the error status.
+  */
+ static __be32
+-nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
++nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, void *_setattr)
+ {
++      struct nfsd4_setattr *setattr = _setattr;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3822,8 +3896,9 @@ nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ }
+ static __be32
+-nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
++nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, void *_scd)
+ {
++      struct nfsd4_setclientid *scd = _scd;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3846,8 +3921,9 @@ nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct n
+ }
+ static __be32
+-nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
++nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, void *_write)
+ {
++      struct nfsd4_write *write = _write;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -3865,8 +3941,9 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
+ static __be32
+ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                       struct nfsd4_exchange_id *exid)
++                       void *_exid)
+ {
++      struct nfsd4_exchange_id *exid = _exid;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+       char *major_id;
+@@ -3948,8 +4025,9 @@ out:
+ static __be32
+ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                          struct nfsd4_create_session *sess)
++                          void *_sess)
+ {
++      struct nfsd4_create_session *sess = _sess;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -4004,8 +4082,9 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
+ static __be32
+ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                    struct nfsd4_sequence *seq)
++                    void *_seq)
+ {
++      struct nfsd4_sequence *seq = _seq;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -4030,8 +4109,9 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
+ static __be32
+ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                        struct nfsd4_test_stateid *test_stateid)
++                        void *_test_stateid)
+ {
++      struct nfsd4_test_stateid *test_stateid = _test_stateid;
+       struct xdr_stream *xdr = &resp->xdr;
+       struct nfsd4_test_stateid_id *stateid, *next;
+       __be32 *p;
+@@ -4053,9 +4133,9 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
+ #ifdef CONFIG_NFSD_PNFS
+ static __be32
+-nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+-              struct nfsd4_getdeviceinfo *gdev)
++nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr, void *_gdev)
+ {
++      struct nfsd4_getdeviceinfo *gdev = _gdev;
+       struct xdr_stream *xdr = &resp->xdr;
+       const struct nfsd4_layout_ops *ops =
+               nfsd4_layout_ops[gdev->gd_layout_type];
+@@ -4123,9 +4203,9 @@ toosmall:
+ }
+ static __be32
+-nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+-              struct nfsd4_layoutget *lgp)
++nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr, void *_lgp)
+ {
++      struct nfsd4_layoutget *lgp = _lgp;
+       struct xdr_stream *xdr = &resp->xdr;
+       const struct nfsd4_layout_ops *ops =
+               nfsd4_layout_ops[lgp->lg_layout_type];
+@@ -4158,9 +4238,9 @@ out:
+ }
+ static __be32
+-nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                        struct nfsd4_layoutcommit *lcp)
++nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr, void *_lcp)
+ {
++      struct nfsd4_layoutcommit *lcp = _lcp;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -4182,9 +4262,9 @@ nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
+ }
+ static __be32
+-nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
+-              struct nfsd4_layoutreturn *lrp)
++nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr, void *_lrp)
+ {
++      struct nfsd4_layoutreturn *lrp = _lrp;
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+@@ -4203,8 +4283,9 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
+ static __be32
+ nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
+-                struct nfsd4_seek *seek)
++                void *_seek)
+ {
++      struct nfsd4_seek *seek= (struct nfsd4_seek *)_seek;
+       __be32 *p;
+       if (nfserr)
+@@ -4231,87 +4312,87 @@ typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
+  * done in the decoding phase.
+  */
+ static nfsd4_enc nfsd4_enc_ops[] = {
+-      [OP_ACCESS]             = (nfsd4_enc)nfsd4_encode_access,
+-      [OP_CLOSE]              = (nfsd4_enc)nfsd4_encode_close,
+-      [OP_COMMIT]             = (nfsd4_enc)nfsd4_encode_commit,
+-      [OP_CREATE]             = (nfsd4_enc)nfsd4_encode_create,
+-      [OP_DELEGPURGE]         = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_DELEGRETURN]        = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_GETATTR]            = (nfsd4_enc)nfsd4_encode_getattr,
+-      [OP_GETFH]              = (nfsd4_enc)nfsd4_encode_getfh,
+-      [OP_LINK]               = (nfsd4_enc)nfsd4_encode_link,
+-      [OP_LOCK]               = (nfsd4_enc)nfsd4_encode_lock,
+-      [OP_LOCKT]              = (nfsd4_enc)nfsd4_encode_lockt,
+-      [OP_LOCKU]              = (nfsd4_enc)nfsd4_encode_locku,
+-      [OP_LOOKUP]             = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_LOOKUPP]            = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_NVERIFY]            = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_OPEN]               = (nfsd4_enc)nfsd4_encode_open,
+-      [OP_OPENATTR]           = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_OPEN_CONFIRM]       = (nfsd4_enc)nfsd4_encode_open_confirm,
+-      [OP_OPEN_DOWNGRADE]     = (nfsd4_enc)nfsd4_encode_open_downgrade,
+-      [OP_PUTFH]              = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_PUTPUBFH]           = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_PUTROOTFH]          = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_READ]               = (nfsd4_enc)nfsd4_encode_read,
+-      [OP_READDIR]            = (nfsd4_enc)nfsd4_encode_readdir,
+-      [OP_READLINK]           = (nfsd4_enc)nfsd4_encode_readlink,
+-      [OP_REMOVE]             = (nfsd4_enc)nfsd4_encode_remove,
+-      [OP_RENAME]             = (nfsd4_enc)nfsd4_encode_rename,
+-      [OP_RENEW]              = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_RESTOREFH]          = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_SAVEFH]             = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_SECINFO]            = (nfsd4_enc)nfsd4_encode_secinfo,
+-      [OP_SETATTR]            = (nfsd4_enc)nfsd4_encode_setattr,
+-      [OP_SETCLIENTID]        = (nfsd4_enc)nfsd4_encode_setclientid,
+-      [OP_SETCLIENTID_CONFIRM] = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_VERIFY]             = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_WRITE]              = (nfsd4_enc)nfsd4_encode_write,
+-      [OP_RELEASE_LOCKOWNER]  = (nfsd4_enc)nfsd4_encode_noop,
++      [OP_ACCESS]             = nfsd4_encode_access,
++      [OP_CLOSE]              = nfsd4_encode_close,
++      [OP_COMMIT]             = nfsd4_encode_commit,
++      [OP_CREATE]             = nfsd4_encode_create,
++      [OP_DELEGPURGE]         = nfsd4_encode_noop,
++      [OP_DELEGRETURN]        = nfsd4_encode_noop,
++      [OP_GETATTR]            = nfsd4_encode_getattr,
++      [OP_GETFH]              = nfsd4_encode_getfh,
++      [OP_LINK]               = nfsd4_encode_link,
++      [OP_LOCK]               = nfsd4_encode_lock,
++      [OP_LOCKT]              = nfsd4_encode_lockt,
++      [OP_LOCKU]              = nfsd4_encode_locku,
++      [OP_LOOKUP]             = nfsd4_encode_noop,
++      [OP_LOOKUPP]            = nfsd4_encode_noop,
++      [OP_NVERIFY]            = nfsd4_encode_noop,
++      [OP_OPEN]               = nfsd4_encode_open,
++      [OP_OPENATTR]           = nfsd4_encode_noop,
++      [OP_OPEN_CONFIRM]       = nfsd4_encode_open_confirm,
++      [OP_OPEN_DOWNGRADE]     = nfsd4_encode_open_downgrade,
++      [OP_PUTFH]              = nfsd4_encode_noop,
++      [OP_PUTPUBFH]           = nfsd4_encode_noop,
++      [OP_PUTROOTFH]          = nfsd4_encode_noop,
++      [OP_READ]               = nfsd4_encode_read,
++      [OP_READDIR]            = nfsd4_encode_readdir,
++      [OP_READLINK]           = nfsd4_encode_readlink,
++      [OP_REMOVE]             = nfsd4_encode_remove,
++      [OP_RENAME]             = nfsd4_encode_rename,
++      [OP_RENEW]              = nfsd4_encode_noop,
++      [OP_RESTOREFH]          = nfsd4_encode_noop,
++      [OP_SAVEFH]             = nfsd4_encode_noop,
++      [OP_SECINFO]            = nfsd4_encode_secinfo,
++      [OP_SETATTR]            = nfsd4_encode_setattr,
++      [OP_SETCLIENTID]        = nfsd4_encode_setclientid,
++      [OP_SETCLIENTID_CONFIRM] = nfsd4_encode_noop,
++      [OP_VERIFY]             = nfsd4_encode_noop,
++      [OP_WRITE]              = nfsd4_encode_write,
++      [OP_RELEASE_LOCKOWNER]  = nfsd4_encode_noop,
+       /* NFSv4.1 operations */
+-      [OP_BACKCHANNEL_CTL]    = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
+-      [OP_EXCHANGE_ID]        = (nfsd4_enc)nfsd4_encode_exchange_id,
+-      [OP_CREATE_SESSION]     = (nfsd4_enc)nfsd4_encode_create_session,
+-      [OP_DESTROY_SESSION]    = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_FREE_STATEID]       = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
++      [OP_BACKCHANNEL_CTL]    = nfsd4_encode_noop,
++      [OP_BIND_CONN_TO_SESSION] = nfsd4_encode_bind_conn_to_session,
++      [OP_EXCHANGE_ID]        = nfsd4_encode_exchange_id,
++      [OP_CREATE_SESSION]     = nfsd4_encode_create_session,
++      [OP_DESTROY_SESSION]    = nfsd4_encode_noop,
++      [OP_FREE_STATEID]       = nfsd4_encode_noop,
++      [OP_GET_DIR_DELEGATION] = nfsd4_encode_noop,
+ #ifdef CONFIG_NFSD_PNFS
+-      [OP_GETDEVICEINFO]      = (nfsd4_enc)nfsd4_encode_getdeviceinfo,
+-      [OP_GETDEVICELIST]      = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_LAYOUTCOMMIT]       = (nfsd4_enc)nfsd4_encode_layoutcommit,
+-      [OP_LAYOUTGET]          = (nfsd4_enc)nfsd4_encode_layoutget,
+-      [OP_LAYOUTRETURN]       = (nfsd4_enc)nfsd4_encode_layoutreturn,
++      [OP_GETDEVICEINFO]      = nfsd4_encode_getdeviceinfo,
++      [OP_GETDEVICELIST]      = nfsd4_encode_noop,
++      [OP_LAYOUTCOMMIT]       = nfsd4_encode_layoutcommit,
++      [OP_LAYOUTGET]          = nfsd4_encode_layoutget,
++      [OP_LAYOUTRETURN]       = nfsd4_encode_layoutreturn,
+ #else
+-      [OP_GETDEVICEINFO]      = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_GETDEVICELIST]      = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_LAYOUTCOMMIT]       = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_LAYOUTGET]          = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_LAYOUTRETURN]       = (nfsd4_enc)nfsd4_encode_noop,
++      [OP_GETDEVICEINFO]      = nfsd4_encode_noop,
++      [OP_GETDEVICELIST]      = nfsd4_encode_noop,
++      [OP_LAYOUTCOMMIT]       = nfsd4_encode_noop,
++      [OP_LAYOUTGET]          = nfsd4_encode_noop,
++      [OP_LAYOUTRETURN]       = nfsd4_encode_noop,
+ #endif
+-      [OP_SECINFO_NO_NAME]    = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
+-      [OP_SEQUENCE]           = (nfsd4_enc)nfsd4_encode_sequence,
+-      [OP_SET_SSV]            = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_TEST_STATEID]       = (nfsd4_enc)nfsd4_encode_test_stateid,
+-      [OP_WANT_DELEGATION]    = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_DESTROY_CLIENTID]   = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_RECLAIM_COMPLETE]   = (nfsd4_enc)nfsd4_encode_noop,
++      [OP_SECINFO_NO_NAME]    = nfsd4_encode_secinfo_no_name,
++      [OP_SEQUENCE]           = nfsd4_encode_sequence,
++      [OP_SET_SSV]            = nfsd4_encode_noop,
++      [OP_TEST_STATEID]       = nfsd4_encode_test_stateid,
++      [OP_WANT_DELEGATION]    = nfsd4_encode_noop,
++      [OP_DESTROY_CLIENTID]   = nfsd4_encode_noop,
++      [OP_RECLAIM_COMPLETE]   = nfsd4_encode_noop,
+       /* NFSv4.2 operations */
+-      [OP_ALLOCATE]           = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_COPY]               = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_COPY_NOTIFY]        = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_DEALLOCATE]         = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_IO_ADVISE]          = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_LAYOUTERROR]        = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_LAYOUTSTATS]        = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_OFFLOAD_CANCEL]     = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_OFFLOAD_STATUS]     = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_READ_PLUS]          = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_SEEK]               = (nfsd4_enc)nfsd4_encode_seek,
+-      [OP_WRITE_SAME]         = (nfsd4_enc)nfsd4_encode_noop,
+-      [OP_CLONE]              = (nfsd4_enc)nfsd4_encode_noop,
++      [OP_ALLOCATE]           = nfsd4_encode_noop,
++      [OP_COPY]               = nfsd4_encode_noop,
++      [OP_COPY_NOTIFY]        = nfsd4_encode_noop,
++      [OP_DEALLOCATE]         = nfsd4_encode_noop,
++      [OP_IO_ADVISE]          = nfsd4_encode_noop,
++      [OP_LAYOUTERROR]        = nfsd4_encode_noop,
++      [OP_LAYOUTSTATS]        = nfsd4_encode_noop,
++      [OP_OFFLOAD_CANCEL]     = nfsd4_encode_noop,
++      [OP_OFFLOAD_STATUS]     = nfsd4_encode_noop,
++      [OP_READ_PLUS]          = nfsd4_encode_noop,
++      [OP_SEEK]               = nfsd4_encode_seek,
++      [OP_WRITE_SAME]         = nfsd4_encode_noop,
++      [OP_CLONE]              = nfsd4_encode_noop,
+ };
+ /*
+@@ -4436,9 +4517,9 @@ nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
+ }
+ int
+-nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nfs4svc_encode_voidres(void *rqstp, __be32 *p, void *dummy)
+ {
+-        return xdr_ressize_check(rqstp, p);
++      return xdr_ressize_check(rqstp, p);
+ }
+ int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
+@@ -4461,8 +4542,11 @@ int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
+ }
+ int
+-nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundargs *args)
++nfs4svc_decode_compoundargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd4_compoundargs *args = _args;
++
+       if (rqstp->rq_arg.head[0].iov_len % 4) {
+               /* client is nuts */
+               dprintk("%s: compound not properly padded! (peeraddr=%pISc xid=0x%x)",
+@@ -4482,11 +4566,13 @@ nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_comp
+ }
+ int
+-nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundres *resp)
++nfs4svc_encode_compoundres(void *_rqstp, __be32 *p, void *_resp)
+ {
+       /*
+        * All that remains is to write the tag and operation count...
+        */
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd4_compoundres *resp = _resp;
+       struct xdr_buf *buf = resp->xdr.buf;
+       WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len +
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 54cde9a..ff5756c 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -513,7 +513,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+       struct kvec     *resv = &rqstp->rq_res.head[0], *cachv;
+       u32             hash;
+       struct nfsd_drc_bucket *b;
+-      int             len;
++      long            len;
+       size_t          bufsize = 0;
+       if (!rp)
+@@ -522,11 +522,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+       hash = nfsd_cache_hash(rp->c_xid);
+       b = &drc_hashtbl[hash];
+-      len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
+-      len >>= 2;
++      if (statp) {
++              len = (char*)statp - (char*)resv->iov_base;
++              len = resv->iov_len - len;
++              len >>= 2;
++      }
+       /* Don't cache excessive amounts of data and XDR failures */
+-      if (!statp || len > (256 >> 2)) {
++      if (!statp || len > (256 >> 2) || len < 0) {
+               nfsd_reply_cache_free(b, rp);
+               return;
+       }
+@@ -534,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+       switch (cachetype) {
+       case RC_REPLSTAT:
+               if (len != 1)
+-                      printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
++                      printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
+               rp->c_replstat = *statp;
+               break;
+       case RC_REPLBUFF:
+diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
+index e921476..fcda44c 100644
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -39,9 +39,11 @@ nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp)
+  * N.B. After this call resp->fh needs an fh_put
+  */
+ static __be32
+-nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
+-                                        struct nfsd_attrstat *resp)
++nfsd_proc_getattr(struct svc_rqst *rqstp, void *_argp,
++                                        void *_resp)
+ {
++      struct nfsd_fhandle *argp = _argp;
++      struct nfsd_attrstat *resp = _resp;
+       __be32 nfserr;
+       dprintk("nfsd: GETATTR  %s\n", SVCFH_fmt(&argp->fh));
+@@ -56,9 +58,11 @@ nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
+  * N.B. After this call resp->fh needs an fh_put
+  */
+ static __be32
+-nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp,
+-                                        struct nfsd_attrstat  *resp)
++nfsd_proc_setattr(struct svc_rqst *rqstp, void *_argp,
++                                        void *_resp)
+ {
++      struct nfsd_sattrargs *argp = _argp;
++      struct nfsd_attrstat *resp = _resp;
+       struct iattr *iap = &argp->attrs;
+       struct svc_fh *fhp;
+       __be32 nfserr;
+@@ -124,9 +128,11 @@ done:
+  * N.B. After this call resp->fh needs an fh_put
+  */
+ static __be32
+-nfsd_proc_lookup(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
+-                                       struct nfsd_diropres  *resp)
++nfsd_proc_lookup(struct svc_rqst *rqstp, void *_argp,
++                                       void *_resp)
+ {
++      struct nfsd_diropargs *argp = _argp;
++      struct nfsd_diropres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: LOOKUP   %s %.*s\n",
+@@ -144,9 +150,11 @@ nfsd_proc_lookup(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
+  * Read a symlink.
+  */
+ static __be32
+-nfsd_proc_readlink(struct svc_rqst *rqstp, struct nfsd_readlinkargs *argp,
+-                                         struct nfsd_readlinkres *resp)
++nfsd_proc_readlink(struct svc_rqst *rqstp, void *_argp,
++                                         void *_resp)
+ {
++      struct nfsd_readlinkargs *argp = _argp;
++      struct nfsd_readlinkres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: READLINK %s\n", SVCFH_fmt(&argp->fh));
+@@ -164,9 +172,11 @@ nfsd_proc_readlink(struct svc_rqst *rqstp, struct nfsd_readlinkargs *argp,
+  * N.B. After this call resp->fh needs an fh_put
+  */
+ static __be32
+-nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
+-                                     struct nfsd_readres  *resp)
++nfsd_proc_read(struct svc_rqst *rqstp, void *_argp,
++                                     void *_resp)
+ {
++      struct nfsd_readargs *argp = _argp;
++      struct nfsd_readres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: READ    %s %d bytes at %d\n",
+@@ -202,9 +212,11 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
+  * N.B. After this call resp->fh needs an fh_put
+  */
+ static __be32
+-nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
+-                                      struct nfsd_attrstat  *resp)
++nfsd_proc_write(struct svc_rqst *rqstp, void *_argp,
++                                      void *_resp)
+ {
++      struct nfsd_writeargs *argp = _argp;
++      struct nfsd_attrstat *resp = _resp;
+       __be32  nfserr;
+       int     stable = 1;
+       unsigned long cnt = argp->len;
+@@ -228,9 +240,11 @@ nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
+  * N.B. After this call _both_ argp->fh and resp->fh need an fh_put
+  */
+ static __be32
+-nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
+-                                       struct nfsd_diropres   *resp)
++nfsd_proc_create(struct svc_rqst *rqstp, void *_argp,
++                                       void *_resp)
+ {
++      struct nfsd_createargs *argp = _argp;
++      struct nfsd_diropres  *resp = _resp;
+       svc_fh          *dirfhp = &argp->fh;
+       svc_fh          *newfhp = &resp->fh;
+       struct iattr    *attr = &argp->attrs;
+@@ -383,9 +397,10 @@ done:
+ }
+ static __be32
+-nfsd_proc_remove(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
++nfsd_proc_remove(struct svc_rqst *rqstp, void *_argp,
+                                        void                  *resp)
+ {
++      struct nfsd_diropargs *argp = _argp;
+       __be32  nfserr;
+       dprintk("nfsd: REMOVE   %s %.*s\n", SVCFH_fmt(&argp->fh),
+@@ -398,9 +413,10 @@ nfsd_proc_remove(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
+ }
+ static __be32
+-nfsd_proc_rename(struct svc_rqst *rqstp, struct nfsd_renameargs *argp,
++nfsd_proc_rename(struct svc_rqst *rqstp, void *_argp,
+                                        void                   *resp)
+ {
++      struct nfsd_renameargs *argp = _argp;
+       __be32  nfserr;
+       dprintk("nfsd: RENAME   %s %.*s -> \n",
+@@ -416,9 +432,10 @@ nfsd_proc_rename(struct svc_rqst *rqstp, struct nfsd_renameargs *argp,
+ }
+ static __be32
+-nfsd_proc_link(struct svc_rqst *rqstp, struct nfsd_linkargs *argp,
++nfsd_proc_link(struct svc_rqst *rqstp, void *_argp,
+                               void                        *resp)
+ {
++      struct nfsd_linkargs *argp = _argp;
+       __be32  nfserr;
+       dprintk("nfsd: LINK     %s ->\n",
+@@ -436,9 +453,10 @@ nfsd_proc_link(struct svc_rqst *rqstp, struct nfsd_linkargs *argp,
+ }
+ static __be32
+-nfsd_proc_symlink(struct svc_rqst *rqstp, struct nfsd_symlinkargs *argp,
++nfsd_proc_symlink(struct svc_rqst *rqstp, void *_argp,
+                                         void                    *resp)
+ {
++      struct nfsd_symlinkargs *argp = _argp;
+       struct svc_fh   newfh;
+       __be32          nfserr;
+@@ -466,9 +484,11 @@ nfsd_proc_symlink(struct svc_rqst *rqstp, struct nfsd_symlinkargs *argp,
+  * N.B. After this call resp->fh needs an fh_put
+  */
+ static __be32
+-nfsd_proc_mkdir(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
+-                                      struct nfsd_diropres   *resp)
++nfsd_proc_mkdir(struct svc_rqst *rqstp, void *_argp,
++                                      void *_resp)
+ {
++      struct nfsd_createargs *argp = _argp;
++      struct nfsd_diropres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: MKDIR    %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
+@@ -490,9 +510,10 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
+  * Remove a directory
+  */
+ static __be32
+-nfsd_proc_rmdir(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
++nfsd_proc_rmdir(struct svc_rqst *rqstp, void *_argp,
+                                       void                  *resp)
+ {
++      struct nfsd_diropargs *argp = _argp;
+       __be32  nfserr;
+       dprintk("nfsd: RMDIR    %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
+@@ -506,9 +527,11 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
+  * Read a portion of a directory.
+  */
+ static __be32
+-nfsd_proc_readdir(struct svc_rqst *rqstp, struct nfsd_readdirargs *argp,
+-                                        struct nfsd_readdirres  *resp)
++nfsd_proc_readdir(struct svc_rqst *rqstp, void *_argp,
++                                        void *_resp)
+ {
++      struct nfsd_readdirargs *argp = _argp;
++      struct nfsd_readdirres  *resp = _resp;
+       int             count;
+       __be32          nfserr;
+       loff_t          offset;
+@@ -546,9 +569,11 @@ nfsd_proc_readdir(struct svc_rqst *rqstp, struct nfsd_readdirargs *argp,
+  * Get file system info
+  */
+ static __be32
+-nfsd_proc_statfs(struct svc_rqst * rqstp, struct nfsd_fhandle   *argp,
+-                                        struct nfsd_statfsres *resp)
++nfsd_proc_statfs(struct svc_rqst * rqstp, void *_argp,
++                                        void *_resp)
+ {
++      struct nfsd_fhandle *argp = _argp;
++      struct nfsd_statfsres *resp = _resp;
+       __be32  nfserr;
+       dprintk("nfsd: STATFS   %s\n", SVCFH_fmt(&argp->fh));
+@@ -571,166 +596,166 @@ struct nfsd_void { int dummy; };
+ static struct svc_procedure           nfsd_procedures2[18] = {
+       [NFSPROC_NULL] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_null,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_func = nfsd_proc_null,
++              .pc_decode = nfssvc_decode_void,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_void),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_GETATTR] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_getattr,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+-              .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
++              .pc_func = nfsd_proc_getattr,
++              .pc_decode = nfssvc_decode_fhandle,
++              .pc_encode = nfssvc_encode_attrstat,
++              .pc_release = nfssvc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd_fhandle),
+               .pc_ressize = sizeof(struct nfsd_attrstat),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+AT,
+       },
+       [NFSPROC_SETATTR] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_setattr,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_sattrargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+-              .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
++              .pc_func = nfsd_proc_setattr,
++              .pc_decode = nfssvc_decode_sattrargs,
++              .pc_encode = nfssvc_encode_attrstat,
++              .pc_release = nfssvc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd_sattrargs),
+               .pc_ressize = sizeof(struct nfsd_attrstat),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+AT,
+       },
+       [NFSPROC_ROOT] = {
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_decode = nfssvc_decode_void,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_void),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_LOOKUP] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_lookup,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+-              .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
++              .pc_func = nfsd_proc_lookup,
++              .pc_decode = nfssvc_decode_diropargs,
++              .pc_encode = nfssvc_encode_diropres,
++              .pc_release = nfssvc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd_diropargs),
+               .pc_ressize = sizeof(struct nfsd_diropres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+FH+AT,
+       },
+       [NFSPROC_READLINK] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_readlink,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_readlinkargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_readlinkres,
++              .pc_func = nfsd_proc_readlink,
++              .pc_decode = nfssvc_decode_readlinkargs,
++              .pc_encode = nfssvc_encode_readlinkres,
+               .pc_argsize = sizeof(struct nfsd_readlinkargs),
+               .pc_ressize = sizeof(struct nfsd_readlinkres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
+       },
+       [NFSPROC_READ] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_read,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_readargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_readres,
+-              .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
++              .pc_func = nfsd_proc_read,
++              .pc_decode = nfssvc_decode_readargs,
++              .pc_encode = nfssvc_encode_readres,
++              .pc_release = nfssvc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd_readargs),
+               .pc_ressize = sizeof(struct nfsd_readres),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+       },
+       [NFSPROC_WRITECACHE] = {
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_decode = nfssvc_decode_void,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_void),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_NOCACHE,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_WRITE] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_write,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_writeargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+-              .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
++              .pc_func = nfsd_proc_write,
++              .pc_decode = nfssvc_decode_writeargs,
++              .pc_encode = nfssvc_encode_attrstat,
++              .pc_release = nfssvc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd_writeargs),
+               .pc_ressize = sizeof(struct nfsd_attrstat),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+AT,
+       },
+       [NFSPROC_CREATE] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_create,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+-              .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
++              .pc_func = nfsd_proc_create,
++              .pc_decode = nfssvc_decode_createargs,
++              .pc_encode = nfssvc_encode_diropres,
++              .pc_release = nfssvc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd_createargs),
+               .pc_ressize = sizeof(struct nfsd_diropres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+FH+AT,
+       },
+       [NFSPROC_REMOVE] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_remove,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_func = nfsd_proc_remove,
++              .pc_decode = nfssvc_decode_diropargs,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_diropargs),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_REPLSTAT,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_RENAME] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_rename,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_renameargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_func = nfsd_proc_rename,
++              .pc_decode = nfssvc_decode_renameargs,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_renameargs),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_REPLSTAT,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_LINK] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_link,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_linkargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_func = nfsd_proc_link,
++              .pc_decode = nfssvc_decode_linkargs,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_linkargs),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_REPLSTAT,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_SYMLINK] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_symlink,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_symlinkargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_func = nfsd_proc_symlink,
++              .pc_decode = nfssvc_decode_symlinkargs,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_symlinkargs),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_REPLSTAT,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_MKDIR] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_mkdir,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+-              .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
++              .pc_func = nfsd_proc_mkdir,
++              .pc_decode = nfssvc_decode_createargs,
++              .pc_encode = nfssvc_encode_diropres,
++              .pc_release = nfssvc_release_fhandle,
+               .pc_argsize = sizeof(struct nfsd_createargs),
+               .pc_ressize = sizeof(struct nfsd_diropres),
+               .pc_cachetype = RC_REPLBUFF,
+               .pc_xdrressize = ST+FH+AT,
+       },
+       [NFSPROC_RMDIR] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_rmdir,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_void,
++              .pc_func = nfsd_proc_rmdir,
++              .pc_decode = nfssvc_decode_diropargs,
++              .pc_encode = nfssvc_encode_void,
+               .pc_argsize = sizeof(struct nfsd_diropargs),
+               .pc_ressize = sizeof(struct nfsd_void),
+               .pc_cachetype = RC_REPLSTAT,
+               .pc_xdrressize = ST,
+       },
+       [NFSPROC_READDIR] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_readdir,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_readdirargs,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_readdirres,
++              .pc_func = nfsd_proc_readdir,
++              .pc_decode = nfssvc_decode_readdirargs,
++              .pc_encode = nfssvc_encode_readdirres,
+               .pc_argsize = sizeof(struct nfsd_readdirargs),
+               .pc_ressize = sizeof(struct nfsd_readdirres),
+               .pc_cachetype = RC_NOCACHE,
+       },
+       [NFSPROC_STATFS] = {
+-              .pc_func = (svc_procfunc) nfsd_proc_statfs,
+-              .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+-              .pc_encode = (kxdrproc_t) nfssvc_encode_statfsres,
++              .pc_func = nfsd_proc_statfs,
++              .pc_decode = nfssvc_decode_fhandle,
++              .pc_encode = nfssvc_encode_statfsres,
+               .pc_argsize = sizeof(struct nfsd_fhandle),
+               .pc_ressize = sizeof(struct nfsd_statfsres),
+               .pc_cachetype = RC_NOCACHE,
+diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
+index 41b468a..44e3e32 100644
+--- a/fs/nfsd/nfsxdr.c
++++ b/fs/nfsd/nfsxdr.c
+@@ -206,14 +206,16 @@ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *f
+  * XDR decode functions
+  */
+ int
+-nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nfssvc_decode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_argsize_check(rqstp, p);
+ }
+ int
+-nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args)
++nfssvc_decode_fhandle(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_fhandle *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -221,9 +223,10 @@ nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *ar
+ }
+ int
+-nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_sattrargs *args)
++nfssvc_decode_sattrargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_sattrargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -233,9 +236,10 @@ nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_diropargs *args)
++nfssvc_decode_diropargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_diropargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->fh))
+        || !(p = decode_filename(p, &args->name, &args->len)))
+               return 0;
+@@ -244,9 +248,10 @@ nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_readargs *args)
++nfssvc_decode_readargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd_readargs *args = _args;
+       unsigned int len;
+       int v;
+       p = decode_fh(p, &args->fh);
+@@ -276,9 +281,10 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_writeargs *args)
++nfssvc_decode_writeargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd_writeargs *args = _args;
+       unsigned int len, hdr, dlen;
+       int v;
+@@ -330,9 +336,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_createargs *args)
++nfssvc_decode_createargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_createargs *args = _args;
++
+       if (   !(p = decode_fh(p, &args->fh))
+           || !(p = decode_filename(p, &args->name, &args->len)))
+               return 0;
+@@ -342,9 +349,10 @@ nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_renameargs *args)
++nfssvc_decode_renameargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_renameargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->ffh))
+        || !(p = decode_filename(p, &args->fname, &args->flen))
+        || !(p = decode_fh(p, &args->tfh))
+@@ -355,8 +363,11 @@ nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkargs *args)
++nfssvc_decode_readlinkargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd_readlinkargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -366,9 +377,10 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
+ }
+ int
+-nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_linkargs *args)
++nfssvc_decode_linkargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_linkargs *args = _args;
++
+       if (!(p = decode_fh(p, &args->ffh))
+        || !(p = decode_fh(p, &args->tfh))
+        || !(p = decode_filename(p, &args->tname, &args->tlen)))
+@@ -378,9 +390,10 @@ nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_symlinkargs *args)
++nfssvc_decode_symlinkargs(void *rqstp, __be32 *p, void *_args)
+ {
++      struct nfsd_symlinkargs *args = _args;
++
+       if (   !(p = decode_fh(p, &args->ffh))
+           || !(p = decode_filename(p, &args->fname, &args->flen))
+           || !(p = decode_pathname(p, &args->tname, &args->tlen)))
+@@ -391,9 +404,11 @@ nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_readdirargs *args)
++nfssvc_decode_readdirargs(void *_rqstp, __be32 *p, void *_args)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd_readdirargs *args = _args;
++
+       p = decode_fh(p, &args->fh);
+       if (!p)
+               return 0;
+@@ -409,32 +424,36 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
+  * XDR encode functions
+  */
+ int
+-nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
++nfssvc_encode_void(void *rqstp, __be32 *p, void *dummy)
+ {
+       return xdr_ressize_check(rqstp, p);
+ }
+ int
+-nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_attrstat *resp)
++nfssvc_encode_attrstat(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd_diropres *resp = _resp;
++
+       p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
+       return xdr_ressize_check(rqstp, p);
+ }
+ int
+-nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_diropres *resp)
++nfssvc_encode_diropres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd_diropres *resp = _resp;
++
+       p = encode_fh(p, &resp->fh);
+       p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
+       return xdr_ressize_check(rqstp, p);
+ }
+ int
+-nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_readlinkres *resp)
++nfssvc_encode_readlinkres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp= _rqstp;
++      struct nfsd_readlinkres *resp = _resp;
++
+       *p++ = htonl(resp->len);
+       xdr_ressize_check(rqstp, p);
+       rqstp->rq_res.page_len = resp->len;
+@@ -448,9 +467,11 @@ nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_readres *resp)
++nfssvc_encode_readres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd_readres *resp = _resp;
++
+       p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
+       *p++ = htonl(resp->count);
+       xdr_ressize_check(rqstp, p);
+@@ -467,9 +488,11 @@ nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_readdirres *resp)
++nfssvc_encode_readdirres(void *_rqstp, __be32 *p, void *_resp)
+ {
++      struct svc_rqst *rqstp = _rqstp;
++      struct nfsd_readdirres *resp = _resp;
++
+       xdr_ressize_check(rqstp, p);
+       p = resp->buffer;
+       *p++ = 0;                       /* no more entries */
+@@ -480,9 +503,9 @@ nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
+ }
+ int
+-nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_statfsres *resp)
++nfssvc_encode_statfsres(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd_statfsres *resp = _resp;
+       struct kstatfs  *stat = &resp->stats;
+       *p++ = htonl(NFSSVC_MAXBLKSIZE_V2);     /* max transfer size */
+@@ -542,9 +565,10 @@ nfssvc_encode_entry(void *ccdv, const char *name,
+  * XDR release functions
+  */
+ int
+-nfssvc_release_fhandle(struct svc_rqst *rqstp, __be32 *p,
+-                                      struct nfsd_fhandle *resp)
++nfssvc_release_fhandle(void *rqstp, __be32 *p, void *_resp)
+ {
++      struct nfsd_fhandle *resp = _resp;
++
+       fh_put(&resp->fh);
+       return 1;
+ }
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index ff476e6..08ef362 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -870,7 +870,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+-      host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset, 0);
++      host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset, 0);
+       set_fs(oldfs);
+       return nfsd_finish_read(file, count, host_err);
+ }
+@@ -960,7 +960,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+       /* Write the data. */
+       oldfs = get_fs(); set_fs(KERNEL_DS);
+-      host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, flags);
++      host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos, flags);
+       set_fs(oldfs);
+       if (host_err < 0)
+               goto out_nfserr;
+@@ -1459,7 +1459,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
+        */
+       oldfs = get_fs(); set_fs(KERNEL_DS);
+-      host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
++      host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
+       set_fs(oldfs);
+       if (host_err < 0)
+diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h
+index 4f0481d..193c8e7 100644
+--- a/fs/nfsd/xdr.h
++++ b/fs/nfsd/xdr.h
+@@ -131,40 +131,30 @@ union nfsd_xdrstore {
+ #define NFS2_SVC_XDRSIZE      sizeof(union nfsd_xdrstore)
+-int nfssvc_decode_void(struct svc_rqst *, __be32 *, void *);
+-int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
+-int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_sattrargs *);
+-int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_diropargs *);
+-int nfssvc_decode_readargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_readargs *);
+-int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_writeargs *);
+-int nfssvc_decode_createargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_createargs *);
+-int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_renameargs *);
+-int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_readlinkargs *);
+-int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_linkargs *);
+-int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_symlinkargs *);
+-int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd_readdirargs *);
+-int nfssvc_encode_void(struct svc_rqst *, __be32 *, void *);
+-int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *, struct nfsd_attrstat *);
+-int nfssvc_encode_diropres(struct svc_rqst *, __be32 *, struct nfsd_diropres *);
+-int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *, struct nfsd_readlinkres *);
+-int nfssvc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd_readres *);
+-int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *, struct nfsd_statfsres *);
+-int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *, struct nfsd_readdirres *);
++int nfssvc_decode_void(void *, __be32 *, void *);
++int nfssvc_decode_fhandle(void *, __be32 *, void *);
++int nfssvc_decode_sattrargs(void *, __be32 *, void *);
++int nfssvc_decode_diropargs(void *, __be32 *, void *);
++int nfssvc_decode_readargs(void *, __be32 *, void *);
++int nfssvc_decode_writeargs(void *, __be32 *, void *);
++int nfssvc_decode_createargs(void *, __be32 *, void *);
++int nfssvc_decode_renameargs(void *, __be32 *, void *);
++int nfssvc_decode_readlinkargs(void *, __be32 *, void *);
++int nfssvc_decode_linkargs(void *, __be32 *, void *);
++int nfssvc_decode_symlinkargs(void *, __be32 *, void *);
++int nfssvc_decode_readdirargs(void *, __be32 *, void *);
++int nfssvc_encode_void(void *, __be32 *, void *);
++int nfssvc_encode_attrstat(void *, __be32 *, void *);
++int nfssvc_encode_diropres(void *, __be32 *, void *);
++int nfssvc_encode_readlinkres(void *, __be32 *, void *);
++int nfssvc_encode_readres(void *, __be32 *, void *);
++int nfssvc_encode_statfsres(void *, __be32 *, void *);
++int nfssvc_encode_readdirres(void *, __be32 *, void *);
+ int nfssvc_encode_entry(void *, const char *name,
+                       int namlen, loff_t offset, u64 ino, unsigned int);
+-int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
++int nfssvc_release_fhandle(void *, __be32 *, void *);
+ /* Helper functions for NFSv2 ACL code */
+ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat);
+diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
+index 335e04a..d77a9c9 100644
+--- a/fs/nfsd/xdr3.h
++++ b/fs/nfsd/xdr3.h
+@@ -269,71 +269,41 @@ union nfsd3_xdrstore {
+ #define NFS3_SVC_XDRSIZE              sizeof(union nfsd3_xdrstore)
+-int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
+-int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_sattrargs *);
+-int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_diropargs *);
+-int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_accessargs *);
+-int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_readargs *);
+-int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_writeargs *);
+-int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_createargs *);
+-int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_createargs *);
+-int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_mknodargs *);
+-int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_renameargs *);
+-int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_readlinkargs *);
+-int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_linkargs *);
+-int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_symlinkargs *);
+-int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_readdirargs *);
+-int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_readdirargs *);
+-int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_commitargs *);
+-int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
+-int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_attrstat *);
+-int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_attrstat *);
+-int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_diropres *);
+-int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_accessres *);
+-int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_readlinkres *);
+-int nfs3svc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd3_readres *);
+-int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *, struct nfsd3_writeres *);
+-int nfs3svc_encode_createres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_diropres *);
+-int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_renameres *);
+-int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_linkres *);
+-int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_readdirres *);
+-int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_fsstatres *);
+-int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_fsinfores *);
+-int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_pathconfres *);
+-int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_commitres *);
++int nfs3svc_decode_fhandle(void *, __be32 *, void *);
++int nfs3svc_decode_sattrargs(void *, __be32 *, void *);
++int nfs3svc_decode_diropargs(void *, __be32 *, void *);
++int nfs3svc_decode_accessargs(void *, __be32 *, void *);
++int nfs3svc_decode_readargs(void *, __be32 *, void *);
++int nfs3svc_decode_writeargs(void *, __be32 *, void *);
++int nfs3svc_decode_createargs(void *, __be32 *, void *);
++int nfs3svc_decode_mkdirargs(void *, __be32 *, void *);
++int nfs3svc_decode_mknodargs(void *, __be32 *, void *);
++int nfs3svc_decode_renameargs(void *, __be32 *, void *);
++int nfs3svc_decode_readlinkargs(void *, __be32 *, void *);
++int nfs3svc_decode_linkargs(void *, __be32 *, void *);
++int nfs3svc_decode_symlinkargs(void *, __be32 *, void *);
++int nfs3svc_decode_readdirargs(void *, __be32 *, void *);
++int nfs3svc_decode_readdirplusargs(void *, __be32 *, void *);
++int nfs3svc_decode_commitargs(void *, __be32 *, void *);
++int nfs3svc_encode_voidres(void *, __be32 *, void *);
++int nfs3svc_encode_attrstat(void *, __be32 *, void *);
++int nfs3svc_encode_wccstat(void *, __be32 *, void *);
++int nfs3svc_encode_diropres(void *, __be32 *, void *);
++int nfs3svc_encode_accessres(void *, __be32 *, void *);
++int nfs3svc_encode_readlinkres(void *, __be32 *, void *);
++int nfs3svc_encode_readres(void *, __be32 *, void *);
++int nfs3svc_encode_writeres(void *, __be32 *, void *);
++int nfs3svc_encode_createres(void *, __be32 *, void *);
++int nfs3svc_encode_renameres(void *, __be32 *, void *);
++int nfs3svc_encode_linkres(void *, __be32 *, void *);
++int nfs3svc_encode_readdirres(void *, __be32 *, void *);
++int nfs3svc_encode_fsstatres(void *, __be32 *, void *);
++int nfs3svc_encode_fsinfores(void *, __be32 *, void *);
++int nfs3svc_encode_pathconfres(void *, __be32 *, void *);
++int nfs3svc_encode_commitres(void *, __be32 *, void *);
+-int nfs3svc_release_fhandle(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_attrstat *);
+-int nfs3svc_release_fhandle2(struct svc_rqst *, __be32 *,
+-                              struct nfsd3_fhandle_pair *);
++int nfs3svc_release_fhandle(void *, __be32 *, void *);
++int nfs3svc_release_fhandle2(void *, __be32 *, void *);
+ int nfs3svc_encode_entry(void *, const char *name,
+                               int namlen, loff_t offset, u64 ino,
+                               unsigned int);
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index beea0c5..7f3699d 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -659,11 +659,9 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
+ bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp);
+-int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
+-int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
+-              struct nfsd4_compoundargs *);
+-int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
+-              struct nfsd4_compoundres *);
++int nfs4svc_encode_voidres(void *, __be32 *, void *);
++int nfs4svc_decode_compoundargs(void *, __be32 *, void *);
++int nfs4svc_encode_compoundres(void *, __be32 *, void *);
+ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
+ void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
+ void nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op);
+@@ -673,26 +671,26 @@ __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
+               u32 *bmval, struct svc_rqst *, int ignore_crossmnt);
+ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_setclientid *setclid);
++              void *setclid);
+ extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_setclientid_confirm *setclientid_confirm);
++              void *setclientid_confirm);
+ extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
+-              struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
+-extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_backchannel_ctl *);
+-extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *);
++              struct nfsd4_compound_state *, void *);
++extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *, struct nfsd4_compound_state *, void *);
++extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, void *);
+ extern __be32 nfsd4_create_session(struct svc_rqst *,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_create_session *);
++              void *);
+ extern __be32 nfsd4_sequence(struct svc_rqst *,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_sequence *);
++              void *);
+ extern void nfsd4_sequence_done(struct nfsd4_compoundres *resp);
+ extern __be32 nfsd4_destroy_session(struct svc_rqst *,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_destroy_session *);
+-extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_destroy_clientid *);
+-__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_reclaim_complete *);
++              void *);
++extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *, void *);
++__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, void *);
+ extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
+               struct nfsd4_open *open, struct nfsd_net *nn);
+ extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
+@@ -701,34 +699,34 @@ extern void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate);
+ extern void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
+               struct nfsd4_open *open);
+ extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
+-              struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
++              struct nfsd4_compound_state *, void *oc);
+ extern __be32 nfsd4_close(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_close *close);
++              void *close);
+ extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_open_downgrade *od);
++              void *od);
+ extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
+-              struct nfsd4_lock *lock);
++              void *lock);
+ extern __be32 nfsd4_lockt(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_lockt *lockt);
++              void *lockt);
+ extern __be32 nfsd4_locku(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_locku *locku);
++              void *locku);
+ extern __be32
+ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+               struct nfsd4_compound_state *,
+-              struct nfsd4_release_lockowner *rlockowner);
++              void *rlockowner);
+ extern int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp);
+ extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp,
+-              struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr);
++              struct nfsd4_compound_state *, void *dr);
+ extern __be32 nfsd4_renew(struct svc_rqst *rqstp,
+-                        struct nfsd4_compound_state *, clientid_t *clid);
++                        struct nfsd4_compound_state *, void *clid);
+ extern __be32 nfsd4_test_stateid(struct svc_rqst *rqstp,
+-              struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid);
++              struct nfsd4_compound_state *, void *test_stateid);
+ extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp,
+-              struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid);
++              struct nfsd4_compound_state *, void *free_stateid);
+ extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr);
+ #endif
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
+index 52ccd34..a166501 100644
+--- a/fs/nls/nls_base.c
++++ b/fs/nls/nls_base.c
+@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
+ int __register_nls(struct nls_table *nls, struct module *owner)
+ {
+-      struct nls_table ** tmp = &tables;
++      struct nls_table *tmp = tables;
+       if (nls->next)
+               return -EBUSY;
+-      nls->owner = owner;
++      pax_open_kernel();
++      const_cast(nls->owner) = owner;
++      pax_close_kernel();
+       spin_lock(&nls_lock);
+-      while (*tmp) {
+-              if (nls == *tmp) {
++      while (tmp) {
++              if (nls == tmp) {
+                       spin_unlock(&nls_lock);
+                       return -EBUSY;
+               }
+-              tmp = &(*tmp)->next;
++              tmp = tmp->next;
+       }
+-      nls->next = tables;
++      pax_open_kernel();
++      const_cast(nls->next) = tables;
++      pax_close_kernel();
+       tables = nls;
+       spin_unlock(&nls_lock);
+       return 0;       
+@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
+ int unregister_nls(struct nls_table * nls)
+ {
+-      struct nls_table ** tmp = &tables;
++      struct nls_table * const * tmp = &tables;
+       spin_lock(&nls_lock);
+       while (*tmp) {
+               if (nls == *tmp) {
+-                      *tmp = nls->next;
++                      pax_open_kernel();
++                      *(struct nls_table **)tmp = nls->next;
++                      pax_close_kernel();
+                       spin_unlock(&nls_lock);
+                       return 0;
+               }
+@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
+       return -EINVAL;
+ }
+-static struct nls_table *find_nls(char *charset)
++static struct nls_table *find_nls(const char *charset)
+ {
+       struct nls_table *nls;
+       spin_lock(&nls_lock);
+@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
+       return nls;
+ }
+-struct nls_table *load_nls(char *charset)
++struct nls_table *load_nls(const char *charset)
+ {
+       return try_then_request_module(find_nls(charset), "nls_%s", charset);
+ }
+diff --git a/fs/nls/nls_cp932.c b/fs/nls/nls_cp932.c
+index 67b7398..38622e8 100644
+--- a/fs/nls/nls_cp932.c
++++ b/fs/nls/nls_cp932.c
+@@ -7834,7 +7834,7 @@ static const unsigned char charset2upper[256] = {
+       0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+ };
+-static int uni2char(const wchar_t uni,
++static int uni2char(wchar_t uni,
+                   unsigned char *out, int boundlen)
+ {
+       const unsigned char *uni2charset;
+diff --git a/fs/nls/nls_cp936.c b/fs/nls/nls_cp936.c
+index c96546c..d5dfe94 100644
+--- a/fs/nls/nls_cp936.c
++++ b/fs/nls/nls_cp936.c
+@@ -10997,7 +10997,7 @@ static const unsigned char charset2upper[256] = {
+       0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+ };
+-static int uni2char(const wchar_t uni,
++static int uni2char(wchar_t uni,
+                       unsigned char *out, int boundlen)
+ {
+       const unsigned char *uni2charset;
+diff --git a/fs/nls/nls_cp949.c b/fs/nls/nls_cp949.c
+index 199171e..709af9a 100644
+--- a/fs/nls/nls_cp949.c
++++ b/fs/nls/nls_cp949.c
+@@ -13858,7 +13858,7 @@ static const unsigned char charset2upper[256] = {
+       0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+ };
+-static int uni2char(const wchar_t uni,
++static int uni2char(wchar_t uni,
+                       unsigned char *out, int boundlen)
+ {
+       const unsigned char *uni2charset;
+diff --git a/fs/nls/nls_cp950.c b/fs/nls/nls_cp950.c
+index 8e14187..d9cec2f 100644
+--- a/fs/nls/nls_cp950.c
++++ b/fs/nls/nls_cp950.c
+@@ -9394,7 +9394,7 @@ static const unsigned char charset2upper[256] = {
+       0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
+ };
+-static int uni2char(const wchar_t uni,
++static int uni2char(wchar_t uni,
+                       unsigned char *out, int boundlen)
+ {
+       const unsigned char *uni2charset;
+diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
+index 162b3f1..b9121f8 100644
+--- a/fs/nls/nls_euc-jp.c
++++ b/fs/nls/nls_euc-jp.c
+@@ -406,7 +406,7 @@ static inline int sjisnec2sjisibm(unsigned char *sjisibm,
+       return 2;
+ }
+-static int uni2char(const wchar_t uni,
++static int uni2char(wchar_t uni,
+                   unsigned char *out, int boundlen)
+ {
+       int n;
+@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
+       p_nls = load_nls("cp932");
+       if (p_nls) {
+-              table.charset2upper = p_nls->charset2upper;
+-              table.charset2lower = p_nls->charset2lower;
++              pax_open_kernel();
++              const_cast(table.charset2upper) = p_nls->charset2upper;
++              const_cast(table.charset2lower) = p_nls->charset2lower;
++              pax_close_kernel();
+               return register_nls(&table);
+       }
+diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
+index a80a741..f28c9c9 100644
+--- a/fs/nls/nls_koi8-ru.c
++++ b/fs/nls/nls_koi8-ru.c
+@@ -13,7 +13,7 @@
+ static struct nls_table *p_nls;
+-static int uni2char(const wchar_t uni,
++static int uni2char(wchar_t uni,
+                   unsigned char *out, int boundlen)
+ {
+       if (boundlen <= 0)
+@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
+       p_nls = load_nls("koi8-u");
+       if (p_nls) {
+-              table.charset2upper = p_nls->charset2upper;
+-              table.charset2lower = p_nls->charset2lower;
++              pax_open_kernel();
++              const_cast(table.charset2upper) = p_nls->charset2upper;
++              const_cast(table.charset2lower) = p_nls->charset2lower;
++              pax_close_kernel();
+               return register_nls(&table);
+       }
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index a643138..4b88993 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+       fd = fanotify_event_metadata.fd;
+       ret = -EFAULT;
+-      if (copy_to_user(buf, &fanotify_event_metadata,
+-                       fanotify_event_metadata.event_len))
++      if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
++          copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
+               goto out_close_fd;
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+diff --git a/fs/notify/notification.c b/fs/notify/notification.c
+index e455e83..6e2b732 100644
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -48,7 +48,7 @@
+ #include <linux/fsnotify_backend.h>
+ #include "fsnotify.h"
+-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+ /**
+  * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
+@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+  */
+ u32 fsnotify_get_cookie(void)
+ {
+-      return atomic_inc_return(&fsnotify_sync_cookie);
++      return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
+ }
+ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
+diff --git a/fs/ntfs/debug.h b/fs/ntfs/debug.h
+index 61bf091..6ac5619 100644
+--- a/fs/ntfs/debug.h
++++ b/fs/ntfs/debug.h
+@@ -30,7 +30,7 @@
+ extern int debug_msgs;
+-extern __printf(4, 5)
++extern __printf(4, 5) __nocapture(3)
+ void __ntfs_debug(const char *file, int line, const char *function,
+                 const char *format, ...);
+ /**
+@@ -58,12 +58,12 @@ do {                                                                       \
+ #endif        /* !DEBUG */
+-extern  __printf(3, 4)
++extern  __printf(3, 4) __nocapture(1)
+ void __ntfs_warning(const char *function, const struct super_block *sb,
+                   const char *fmt, ...);
+ #define ntfs_warning(sb, f, a...)     __ntfs_warning(__func__, sb, f, ##a)
+-extern  __printf(3, 4)
++extern  __printf(3, 4) __nocapture(1)
+ void __ntfs_error(const char *function, const struct super_block *sb,
+                 const char *fmt, ...);
+ #define ntfs_error(sb, f, a...)               __ntfs_error(__func__, sb, f, ##a)
+diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
+index a186135..31eb358 100644
+--- a/fs/ntfs/dir.c
++++ b/fs/ntfs/dir.c
+@@ -1310,7 +1310,7 @@ find_next_index_buffer:
+       ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
+                                         ~(s64)(ndir->itype.index.block_size - 1)));
+       /* Bounds checks. */
+-      if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
++      if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
+               ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+                               "inode 0x%lx or driver bug.", vdir->i_ino);
+               goto err_out;
+@@ -1517,7 +1517,7 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
+       na.type = AT_BITMAP;
+       na.name = I30;
+       na.name_len = 4;
+-      bmp_vi = ilookup5(vi->i_sb, vi->i_ino, (test_t)ntfs_test_inode, &na);
++      bmp_vi = ilookup5(vi->i_sb, vi->i_ino, ntfs_test_inode, &na);
+       if (bmp_vi) {
+               write_inode_now(bmp_vi, !datasync);
+               iput(bmp_vi);
+diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
+index e01287c..9939db2 100644
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -57,8 +57,9 @@
+  * NOTE: This function runs with the inode_hash_lock spin lock held so it is not
+  * allowed to sleep.
+  */
+-int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
++int ntfs_test_inode(struct inode *vi, void *_na)
+ {
++      ntfs_attr *na = _na;
+       ntfs_inode *ni;
+       if (vi->i_ino != na->mft_no)
+@@ -101,8 +102,9 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
+  * NOTE: This function runs with the inode->i_lock spin lock held so it is not
+  * allowed to sleep. (Hence the GFP_ATOMIC allocation.)
+  */
+-static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
++static int ntfs_init_locked_inode(struct inode *vi, void *_na)
+ {
++      ntfs_attr *na = _na;
+       ntfs_inode *ni = NTFS_I(vi);
+       vi->i_ino = na->mft_no;
+@@ -145,7 +147,6 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
+       return 0;
+ }
+-typedef int (*set_t)(struct inode *, void *);
+ static int ntfs_read_locked_inode(struct inode *vi);
+ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi);
+ static int ntfs_read_locked_index_inode(struct inode *base_vi,
+@@ -178,8 +179,8 @@ struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no)
+       na.name = NULL;
+       na.name_len = 0;
+-      vi = iget5_locked(sb, mft_no, (test_t)ntfs_test_inode,
+-                      (set_t)ntfs_init_locked_inode, &na);
++      vi = iget5_locked(sb, mft_no, ntfs_test_inode,
++                      ntfs_init_locked_inode, &na);
+       if (unlikely(!vi))
+               return ERR_PTR(-ENOMEM);
+@@ -239,8 +240,8 @@ struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type,
+       na.name = name;
+       na.name_len = name_len;
+-      vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode,
+-                      (set_t)ntfs_init_locked_inode, &na);
++      vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode,
++                      ntfs_init_locked_inode, &na);
+       if (unlikely(!vi))
+               return ERR_PTR(-ENOMEM);
+@@ -294,8 +295,8 @@ struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name,
+       na.name = name;
+       na.name_len = name_len;
+-      vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode,
+-                      (set_t)ntfs_init_locked_inode, &na);
++      vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode,
++                      ntfs_init_locked_inode, &na);
+       if (unlikely(!vi))
+               return ERR_PTR(-ENOMEM);
+diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
+index b3c3469..17208ad 100644
+--- a/fs/ntfs/inode.h
++++ b/fs/ntfs/inode.h
+@@ -267,9 +267,7 @@ typedef struct {
+       ATTR_TYPE type;
+ } ntfs_attr;
+-typedef int (*test_t)(struct inode *, void *);
+-
+-extern int ntfs_test_inode(struct inode *vi, ntfs_attr *na);
++extern int ntfs_test_inode(struct inode *vi, void *_na);
+ extern struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no);
+ extern struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type,
+diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
+index d15d492..c75f95ad 100644
+--- a/fs/ntfs/mft.c
++++ b/fs/ntfs/mft.c
+@@ -963,7 +963,7 @@ bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
+                * dirty code path of the inode dirty code path when writing
+                * $MFT occurs.
+                */
+-              vi = ilookup5_nowait(sb, mft_no, (test_t)ntfs_test_inode, &na);
++              vi = ilookup5_nowait(sb, mft_no, ntfs_test_inode, &na);
+       }
+       if (vi) {
+               ntfs_debug("Base inode 0x%lx is in icache.", mft_no);
+@@ -1024,7 +1024,7 @@ bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
+               vi = igrab(mft_vi);
+               BUG_ON(vi != mft_vi);
+       } else
+-              vi = ilookup5_nowait(sb, na.mft_no, (test_t)ntfs_test_inode,
++              vi = ilookup5_nowait(sb, na.mft_no, ntfs_test_inode,
+                               &na);
+       if (!vi) {
+               /*
+diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
+index ecb4987..c723ded 100644
+--- a/fs/ntfs/super.c
++++ b/fs/ntfs/super.c
+@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+               if (!silent)
+                       ntfs_error(sb, "Primary boot sector is invalid.");
+       } else if (!silent)
+-              ntfs_error(sb, read_err_str, "primary");
++              ntfs_error(sb, read_err_str, "%s", "primary");
+       if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
+               if (bh_primary)
+                       brelse(bh_primary);
+@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+                       goto hotfix_primary_boot_sector;
+               brelse(bh_backup);
+       } else if (!silent)
+-              ntfs_error(sb, read_err_str, "backup");
++              ntfs_error(sb, read_err_str, "%s", "backup");
+       /* Try to read NT3.51- backup boot sector. */
+       if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
+               if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
+@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+                                       "sector.");
+               brelse(bh_backup);
+       } else if (!silent)
+-              ntfs_error(sb, read_err_str, "backup");
++              ntfs_error(sb, read_err_str, "%s", "backup");
+       /* We failed. Cleanup and return. */
+       if (bh_primary)
+               brelse(bh_primary);
+@@ -2711,7 +2711,7 @@ static const struct super_operations ntfs_sops = {
+  *
+  * NOTE: @sb->s_flags contains the mount options flags.
+  */
+-static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
++static int ntfs_fill_super(struct super_block *sb, void *opt, int silent)
+ {
+       ntfs_volume *vol;
+       struct buffer_head *bh;
+diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
+index 308ea0e..3c16da6 100644
+--- a/fs/ocfs2/cluster/masklog.h
++++ b/fs/ocfs2/cluster/masklog.h
+@@ -162,7 +162,7 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
+ #endif
+-__printf(4, 5)
++__printf(4, 5) __nocapture(2)
+ void __mlog_printk(const u64 *m, const char *func, int line,
+                  const char *fmt, ...);
+diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
+index e9f3705..8e53eb1 100644
+--- a/fs/ocfs2/dlm/dlmcommon.h
++++ b/fs/ocfs2/dlm/dlmcommon.h
+@@ -151,9 +151,9 @@ struct dlm_ctxt
+       struct list_head mle_hb_events;
+       /* these give a really vague idea of the system load */
+-      atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
++      atomic_unchecked_t mle_tot_count[DLM_MLE_NUM_TYPES];
+       atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
+-      atomic_t res_tot_count;
++      atomic_unchecked_t res_tot_count;
+       atomic_t res_cur_count;
+       struct dlm_debug_ctxt *dlm_debug_ctxt;
+diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
+index e7b760d..f8cd0ad 100644
+--- a/fs/ocfs2/dlm/dlmdebug.c
++++ b/fs/ocfs2/dlm/dlmdebug.c
+@@ -735,10 +735,10 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
+       out += snprintf(buf + out, len - out,
+                       "Lock Resources: %d (%d)\n",
+                       atomic_read(&dlm->res_cur_count),
+-                      atomic_read(&dlm->res_tot_count));
++                      atomic_read_unchecked(&dlm->res_tot_count));
+       for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
+-              tot_mles += atomic_read(&dlm->mle_tot_count[i]);
++              tot_mles += atomic_read_unchecked(&dlm->mle_tot_count[i]);
+       for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
+               cur_mles += atomic_read(&dlm->mle_cur_count[i]);
+@@ -751,19 +751,19 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
+       out += snprintf(buf + out, len - out,
+                       "  Blocking: %d (%d)\n",
+                       atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
+-                      atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
++                      atomic_read_unchecked(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
+       /*  Mastery: xxx (xxx) */
+       out += snprintf(buf + out, len - out,
+                       "  Mastery: %d (%d)\n",
+                       atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
+-                      atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
++                      atomic_read_unchecked(&dlm->mle_tot_count[DLM_MLE_MASTER]));
+       /*  Migration: xxx (xxx) */
+       out += snprintf(buf + out, len - out,
+                       "  Migration: %d (%d)\n",
+                       atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
+-                      atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
++                      atomic_read_unchecked(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
+       /* Lists: Dirty=Empty  Purge=InUse  PendingASTs=Empty  ... */
+       out += snprintf(buf + out, len - out,
+diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
+index 533bd52..3a9d64a 100644
+--- a/fs/ocfs2/dlm/dlmdomain.c
++++ b/fs/ocfs2/dlm/dlmdomain.c
+@@ -2055,10 +2055,10 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
+       dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+       dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+-      atomic_set(&dlm->res_tot_count, 0);
++      atomic_set_unchecked(&dlm->res_tot_count, 0);
+       atomic_set(&dlm->res_cur_count, 0);
+       for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
+-              atomic_set(&dlm->mle_tot_count[i], 0);
++              atomic_set_unchecked(&dlm->mle_tot_count[i], 0);
+               atomic_set(&dlm->mle_cur_count[i], 0);
+       }
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index 6ea06f8..6789716 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -303,7 +303,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
+               mle->mnamehash = dlm_lockid_hash(name, namelen);
+       }
+-      atomic_inc(&dlm->mle_tot_count[mle->type]);
++      atomic_inc_unchecked(&dlm->mle_tot_count[mle->type]);
+       atomic_inc(&dlm->mle_cur_count[mle->type]);
+       /* copy off the node_map and register hb callbacks on our copy */
+@@ -577,7 +577,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
+       kref_init(&res->refs);
+-      atomic_inc(&dlm->res_tot_count);
++      atomic_inc_unchecked(&dlm->res_tot_count);
+       atomic_inc(&dlm->res_cur_count);
+       /* just for consistency */
+diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
+index ef474cd..a5cc6a6 100644
+--- a/fs/ocfs2/dlmfs/dlmfs.c
++++ b/fs/ocfs2/dlmfs/dlmfs.c
+@@ -88,13 +88,13 @@ struct workqueue_struct *user_dlm_worker;
+  */
+ #define DLMFS_CAPABILITIES "bast stackglue"
+ static int param_set_dlmfs_capabilities(const char *val,
+-                                      struct kernel_param *kp)
++                                      const struct kernel_param *kp)
+ {
+       printk(KERN_ERR "%s: readonly parameter\n", kp->name);
+       return -EINVAL;
+ }
+ static int param_get_dlmfs_capabilities(char *buffer,
+-                                      struct kernel_param *kp)
++                                      const struct kernel_param *kp)
+ {
+       return strlcpy(buffer, DLMFS_CAPABILITIES,
+                      strlen(DLMFS_CAPABILITIES) + 1);
+diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
+index 2cabbcf..93edf33 100644
+--- a/fs/ocfs2/filecheck.c
++++ b/fs/ocfs2/filecheck.c
+@@ -217,7 +217,7 @@ int ocfs2_filecheck_create_sysfs(struct super_block *sb)
+       struct ocfs2_filecheck *fcheck = NULL;
+       struct ocfs2_filecheck_sysfs_entry *entry = NULL;
+       struct attribute **attrs = NULL;
+-      struct attribute_group attrgp;
++      attribute_group_no_const attrgp;
+       if (!ocfs2_kset)
+               return -ENOMEM;
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index fe0d1f9..7ec8659 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -1317,7 +1317,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
+               goto bail;
+       }
+-      atomic_inc(&osb->alloc_stats.moves);
++      atomic_inc_unchecked(&osb->alloc_stats.moves);
+ bail:
+       if (handle)
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index e63af7d..2a8a83a 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -247,11 +247,11 @@ enum ocfs2_vol_state
+ struct ocfs2_alloc_stats
+ {
+-      atomic_t moves;
+-      atomic_t local_data;
+-      atomic_t bitmap_data;
+-      atomic_t bg_allocs;
+-      atomic_t bg_extends;
++      atomic_unchecked_t moves;
++      atomic_unchecked_t local_data;
++      atomic_unchecked_t bitmap_data;
++      atomic_unchecked_t bg_allocs;
++      atomic_unchecked_t bg_extends;
+ };
+ enum ocfs2_local_alloc_state
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index 6ad3533..053f29d 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -851,7 +851,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
+                               mlog_errno(status);
+                       goto bail;
+               }
+-              atomic_inc(&osb->alloc_stats.bg_extends);
++              atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
+               /* You should never ask for this much metadata */
+               BUG_ON(bits_wanted >
+@@ -2026,7 +2026,7 @@ int ocfs2_claim_metadata(handle_t *handle,
+               mlog_errno(status);
+               goto bail;
+       }
+-      atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++      atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       *suballoc_loc = res.sr_bg_blkno;
+       *suballoc_bit_start = res.sr_bit_offset;
+@@ -2192,7 +2192,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+       trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
+                                          res->sr_bits);
+-      atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++      atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       BUG_ON(res->sr_bits != 1);
+@@ -2234,7 +2234,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
+               mlog_errno(status);
+               goto bail;
+       }
+-      atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++      atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       BUG_ON(res.sr_bits != 1);
+@@ -2338,7 +2338,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+                                                     cluster_start,
+                                                     num_clusters);
+               if (!status)
+-                      atomic_inc(&osb->alloc_stats.local_data);
++                      atomic_inc_unchecked(&osb->alloc_stats.local_data);
+       } else {
+               if (min_clusters > (osb->bitmap_cpg - 1)) {
+                       /* The only paths asking for contiguousness
+@@ -2364,7 +2364,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+                               ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
+                                                                res.sr_bg_blkno,
+                                                                res.sr_bit_offset);
+-                      atomic_inc(&osb->alloc_stats.bitmap_data);
++                      atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
+                       *num_clusters = res.sr_bits;
+               }
+       }
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 603b28d..a9818bd0 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
+                       "%10s => GlobalAllocs: %d  LocalAllocs: %d  "
+                       "SubAllocs: %d  LAWinMoves: %d  SAExtends: %d\n",
+                       "Stats",
+-                      atomic_read(&osb->alloc_stats.bitmap_data),
+-                      atomic_read(&osb->alloc_stats.local_data),
+-                      atomic_read(&osb->alloc_stats.bg_allocs),
+-                      atomic_read(&osb->alloc_stats.moves),
+-                      atomic_read(&osb->alloc_stats.bg_extends));
++                      atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
++                      atomic_read_unchecked(&osb->alloc_stats.local_data),
++                      atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
++                      atomic_read_unchecked(&osb->alloc_stats.moves),
++                      atomic_read_unchecked(&osb->alloc_stats.bg_extends));
+       out += snprintf(buf + out, len - out,
+                       "%10s => State: %u  Descriptor: %llu  Size: %u bits  "
+@@ -2087,11 +2087,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
+       mutex_init(&osb->system_file_mutex);
+-      atomic_set(&osb->alloc_stats.moves, 0);
+-      atomic_set(&osb->alloc_stats.local_data, 0);
+-      atomic_set(&osb->alloc_stats.bitmap_data, 0);
+-      atomic_set(&osb->alloc_stats.bg_allocs, 0);
+-      atomic_set(&osb->alloc_stats.bg_extends, 0);
++      atomic_set_unchecked(&osb->alloc_stats.moves, 0);
++      atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
++      atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
++      atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
++      atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
+       /* Copy the blockcheck stats from the superblock probe */
+       osb->osb_ecc_stats = *stats;
+diff --git a/fs/open.c b/fs/open.c
+index 4fd6e25..fff35d4 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -32,6 +32,8 @@
+ #include <linux/dnotify.h>
+ #include <linux/compat.h>
++#define CREATE_TRACE_POINTS
++#include <trace/events/fs.h>
+ #include "internal.h"
+ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+@@ -105,6 +107,8 @@ long vfs_truncate(const struct path *path, loff_t length)
+       error = locks_verify_truncate(inode, NULL, length);
+       if (!error)
+               error = security_path_truncate(path);
++      if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
++              error = -EACCES;
+       if (!error)
+               error = do_truncate(path->dentry, length, 0, NULL);
+@@ -189,6 +193,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+       error = locks_verify_truncate(inode, f.file, length);
+       if (!error)
+               error = security_path_truncate(&f.file->f_path);
++      if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
++              error = -EACCES;
+       if (!error)
+               error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
+       sb_end_write(inode->i_sb);
+@@ -398,6 +404,9 @@ retry:
+       if (__mnt_is_readonly(path.mnt))
+               res = -EROFS;
++      if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
++              res = -EACCES;
++
+ out_path_release:
+       path_put(&path);
+       if (retry_estale(res, lookup_flags)) {
+@@ -429,6 +438,8 @@ retry:
+       if (error)
+               goto dput_and_out;
++      gr_log_chdir(path.dentry, path.mnt);
++
+       set_fs_pwd(current->fs, &path);
+ dput_and_out:
+@@ -458,6 +469,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+               goto out_putf;
+       error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
++
++      if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
++              error = -EPERM;
++
++      if (!error)
++              gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
++
+       if (!error)
+               set_fs_pwd(current->fs, &f.file->f_path);
+ out_putf:
+@@ -487,7 +505,13 @@ retry:
+       if (error)
+               goto dput_and_out;
++      if (gr_handle_chroot_chroot(path.dentry, path.mnt))
++              goto dput_and_out;
++
+       set_fs_root(current->fs, &path);
++
++      gr_handle_chroot_chdir(&path);
++
+       error = 0;
+ dput_and_out:
+       path_put(&path);
+@@ -511,6 +535,16 @@ static int chmod_common(const struct path *path, umode_t mode)
+               return error;
+ retry_deleg:
+       inode_lock(inode);
++
++      if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
++              error = -EACCES;
++              goto out_unlock;
++      }
++      if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
++              error = -EACCES;
++              goto out_unlock;
++      }
++
+       error = security_path_chmod(path, mode);
+       if (error)
+               goto out_unlock;
+@@ -576,6 +610,9 @@ static int chown_common(const struct path *path, uid_t user, gid_t group)
+       uid = make_kuid(current_user_ns(), user);
+       gid = make_kgid(current_user_ns(), group);
++      if (!gr_acl_handle_chown(path->dentry, path->mnt))
++              return -EACCES;
++
+ retry_deleg:
+       newattrs.ia_valid =  ATTR_CTIME;
+       if (user != (uid_t) -1) {
+@@ -1040,6 +1077,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+               } else {
+                       fsnotify_open(f);
+                       fd_install(fd, f);
++                      trace_do_sys_open(tmp->name, flags, mode);
+               }
+       }
+       putname(tmp);
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index b9da9a0..e146758 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -539,10 +539,12 @@ void orangefs_kill_sb(struct super_block *sb)
+ int orangefs_inode_cache_initialize(void)
+ {
+-      orangefs_inode_cache = kmem_cache_create("orangefs_inode_cache",
++      orangefs_inode_cache = kmem_cache_create_usercopy("orangefs_inode_cache",
+                                             sizeof(struct orangefs_inode_s),
+                                             0,
+                                             ORANGEFS_CACHE_CREATE_FLAGS,
++                                            offsetof(struct orangefs_inode_s, link_target),
++                                            sizeof(((struct orangefs_inode_s *)0)->link_target),
+                                             orangefs_inode_cache_ctor);
+       if (!orangefs_inode_cache) {
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index abadbc30..a67f44c 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -197,7 +197,7 @@ static char *ovl_read_symlink(struct dentry *realdentry)
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+       res = inode->i_op->readlink(realdentry,
+-                                  (char __user *)buf, PAGE_SIZE - 1);
++                                  (char __force_user *)buf, PAGE_SIZE - 1);
+       set_fs(old_fs);
+       if (res < 0) {
+               free_page((unsigned long) buf);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index e2a94a2..f2ac233 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -148,8 +148,8 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
+ static void ovl_inode_init(struct inode *inode, struct inode *realinode,
+                          bool is_upper)
+ {
+-      WRITE_ONCE(inode->i_private, (unsigned long) realinode |
+-                 (is_upper ? OVL_ISUPPER_MASK : 0));
++      WRITE_ONCE(inode->i_private, (void *)((unsigned long) realinode |
++                 (is_upper ? OVL_ISUPPER_MASK : 0)));
+ }
+ struct vfsmount *ovl_entry_mnt_real(struct ovl_entry *oe, struct inode *inode,
+@@ -182,7 +182,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
+ {
+       struct ovl_entry *oe = dentry->d_fsdata;
+-      *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
++      *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
+ }
+ int ovl_want_write(struct dentry *dentry)
+@@ -234,7 +234,7 @@ void ovl_inode_update(struct inode *inode, struct inode *upperinode)
+       WARN_ON(!upperinode);
+       WARN_ON(!inode_unhashed(inode));
+       WRITE_ONCE(inode->i_private,
+-                 (unsigned long) upperinode | OVL_ISUPPER_MASK);
++                 (void *)((unsigned long) upperinode | OVL_ISUPPER_MASK));
+       if (!S_ISDIR(upperinode->i_mode))
+               __insert_inode_hash(inode, (unsigned long) upperinode);
+ }
+@@ -1107,8 +1107,8 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
+ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ {
+-      struct path upperpath = { NULL, NULL };
+-      struct path workpath = { NULL, NULL };
++      struct path upperpath = { .dentry = NULL, .mnt = NULL };
++      struct path workpath = { .dentry = NULL, .mnt = NULL };
+       struct dentry *root_dentry;
+       struct inode *realinode;
+       struct ovl_entry *oe;
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 4ebe6b2..b3752f2 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
+ /*
+  * Minimum pipe size, as required by POSIX
+  */
+-unsigned int pipe_min_size = PAGE_SIZE;
++unsigned int pipe_min_size __read_only = PAGE_SIZE;
+ /* Maximum allocatable pages per user. Hard limit is unset by default, soft
+  * matches default values.
+@@ -62,7 +62,7 @@ unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
+ static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
+ {
+-      if (pipe->files)
++      if (atomic_read(&pipe->files))
+               mutex_lock_nested(&pipe->mutex, subclass);
+ }
+@@ -77,7 +77,7 @@ EXPORT_SYMBOL(pipe_lock);
+ void pipe_unlock(struct pipe_inode_info *pipe)
+ {
+-      if (pipe->files)
++      if (atomic_read(&pipe->files))
+               mutex_unlock(&pipe->mutex);
+ }
+ EXPORT_SYMBOL(pipe_unlock);
+@@ -312,9 +312,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
+               }
+               if (bufs)       /* More to do? */
+                       continue;
+-              if (!pipe->writers)
++              if (!atomic_read(&pipe->writers))
+                       break;
+-              if (!pipe->waiting_writers) {
++              if (!atomic_read(&pipe->waiting_writers)) {
+                       /* syscall merging: Usually we must not sleep
+                        * if O_NONBLOCK is set, or if we got some data.
+                        * But if a writer sleeps in kernel space, then
+@@ -371,7 +371,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+       __pipe_lock(pipe);
+-      if (!pipe->readers) {
++      if (!atomic_read(&pipe->readers)) {
+               send_sig(SIGPIPE, current, 0);
+               ret = -EPIPE;
+               goto out;
+@@ -406,7 +406,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+       for (;;) {
+               int bufs;
+-              if (!pipe->readers) {
++              if (!atomic_read(&pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+@@ -474,9 +474,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+                       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+                       do_wakeup = 0;
+               }
+-              pipe->waiting_writers++;
++              atomic_inc(&pipe->waiting_writers);
+               pipe_wait(pipe);
+-              pipe->waiting_writers--;
++              atomic_dec(&pipe->waiting_writers);
+       }
+ out:
+       __pipe_unlock(pipe);
+@@ -531,7 +531,7 @@ pipe_poll(struct file *filp, poll_table *wait)
+       mask = 0;
+       if (filp->f_mode & FMODE_READ) {
+               mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+-              if (!pipe->writers && filp->f_version != pipe->w_counter)
++              if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
+                       mask |= POLLHUP;
+       }
+@@ -541,7 +541,7 @@ pipe_poll(struct file *filp, poll_table *wait)
+                * Most Unices do not set POLLERR for FIFOs but on Linux they
+                * behave exactly like pipes for poll().
+                */
+-              if (!pipe->readers)
++              if (!atomic_read(&pipe->readers))
+                       mask |= POLLERR;
+       }
+@@ -553,7 +553,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
+       int kill = 0;
+       spin_lock(&inode->i_lock);
+-      if (!--pipe->files) {
++      if (atomic_dec_and_test(&pipe->files)) {
+               inode->i_pipe = NULL;
+               kill = 1;
+       }
+@@ -570,11 +570,11 @@ pipe_release(struct inode *inode, struct file *file)
+       __pipe_lock(pipe);
+       if (file->f_mode & FMODE_READ)
+-              pipe->readers--;
++              atomic_dec(&pipe->readers);
+       if (file->f_mode & FMODE_WRITE)
+-              pipe->writers--;
++              atomic_dec(&pipe->writers);
+-      if (pipe->readers || pipe->writers) {
++      if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
+               wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+@@ -672,7 +672,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
+       kfree(pipe);
+ }
+-static struct vfsmount *pipe_mnt __read_mostly;
++struct vfsmount *pipe_mnt __read_mostly;
+ /*
+  * pipefs_dname() is called from d_path().
+@@ -702,8 +702,9 @@ static struct inode * get_pipe_inode(void)
+               goto fail_iput;
+       inode->i_pipe = pipe;
+-      pipe->files = 2;
+-      pipe->readers = pipe->writers = 1;
++      atomic_set(&pipe->files, 2);
++      atomic_set(&pipe->readers, 1);
++      atomic_set(&pipe->writers, 1);
+       inode->i_fop = &pipefifo_fops;
+       /*
+@@ -885,17 +886,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
+       spin_lock(&inode->i_lock);
+       if (inode->i_pipe) {
+               pipe = inode->i_pipe;
+-              pipe->files++;
++              atomic_inc(&pipe->files);
+               spin_unlock(&inode->i_lock);
+       } else {
+               spin_unlock(&inode->i_lock);
+               pipe = alloc_pipe_info();
+               if (!pipe)
+                       return -ENOMEM;
+-              pipe->files = 1;
++              atomic_set(&pipe->files, 1);
+               spin_lock(&inode->i_lock);
+               if (unlikely(inode->i_pipe)) {
+-                      inode->i_pipe->files++;
++                      atomic_inc(&inode->i_pipe->files);
+                       spin_unlock(&inode->i_lock);
+                       free_pipe_info(pipe);
+                       pipe = inode->i_pipe;
+@@ -920,10 +921,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
+        *  opened, even when there is no process writing the FIFO.
+        */
+               pipe->r_counter++;
+-              if (pipe->readers++ == 0)
++              if (atomic_inc_return(&pipe->readers) == 1)
+                       wake_up_partner(pipe);
+-              if (!is_pipe && !pipe->writers) {
++              if (!is_pipe && !atomic_read(&pipe->writers)) {
+                       if ((filp->f_flags & O_NONBLOCK)) {
+                               /* suppress POLLHUP until we have
+                                * seen a writer */
+@@ -942,14 +943,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
+        *  errno=ENXIO when there is no process reading the FIFO.
+        */
+               ret = -ENXIO;
+-              if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
++              if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
+                       goto err;
+               pipe->w_counter++;
+-              if (!pipe->writers++)
++              if (atomic_inc_return(&pipe->writers) == 1)
+                       wake_up_partner(pipe);
+-              if (!is_pipe && !pipe->readers) {
++              if (!is_pipe && !atomic_read(&pipe->readers)) {
+                       if (wait_for_partner(pipe, &pipe->r_counter))
+                               goto err_wr;
+               }
+@@ -963,11 +964,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
+        *  the process can at least talk to itself.
+        */
+-              pipe->readers++;
+-              pipe->writers++;
++              atomic_inc(&pipe->readers);
++              atomic_inc(&pipe->writers);
+               pipe->r_counter++;
+               pipe->w_counter++;
+-              if (pipe->readers == 1 || pipe->writers == 1)
++              if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
+                       wake_up_partner(pipe);
+               break;
+@@ -981,13 +982,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
+       return 0;
+ err_rd:
+-      if (!--pipe->readers)
++      if (atomic_dec_and_test(&pipe->readers))
+               wake_up_interruptible(&pipe->wait);
+       ret = -ERESTARTSYS;
+       goto err;
+ err_wr:
+-      if (!--pipe->writers)
++      if (atomic_dec_and_test(&pipe->writers))
+               wake_up_interruptible(&pipe->wait);
+       ret = -ERESTARTSYS;
+       goto err;
+@@ -1065,7 +1066,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+  * Currently we rely on the pipe array holding a power-of-2 number
+  * of pages.
+  */
+-static inline unsigned int round_pipe_size(unsigned int size)
++static inline unsigned long round_pipe_size(unsigned long size)
+ {
+       unsigned long nr_pages;
+@@ -1113,13 +1114,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+       switch (cmd) {
+       case F_SETPIPE_SZ: {
+-              unsigned int size, nr_pages;
++              unsigned long size, nr_pages;
++
++              ret = -EINVAL;
++              if (arg < pipe_min_size)
++                      goto out;
+               size = round_pipe_size(arg);
+               nr_pages = size >> PAGE_SHIFT;
+-              ret = -EINVAL;
+-              if (!nr_pages)
++              if (size < pipe_min_size)
+                       goto out;
+               if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index bfc3ec3..f37d85d 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -20,6 +20,7 @@
+ #include <linux/xattr.h>
+ #include <linux/export.h>
+ #include <linux/user_namespace.h>
++#include <linux/grsecurity.h>
+ static struct posix_acl **acl_by_type(struct inode *inode, int type)
+ {
+@@ -311,7 +312,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
+               }
+       }
+         if (mode_p)
+-                *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
++                *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
+         return not_equiv;
+ }
+ EXPORT_SYMBOL(posix_acl_equiv_mode);
+@@ -461,7 +462,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
+               mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
+       }
+-      *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
++      *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
+         return not_equiv;
+ }
+@@ -519,6 +520,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
+       struct posix_acl *clone = posix_acl_clone(*acl, gfp);
+       int err = -ENOMEM;
+       if (clone) {
++              *mode_p &= ~gr_acl_umask();
++
+               err = posix_acl_create_masq(clone, mode_p);
+               if (err < 0) {
+                       posix_acl_release(clone);
+@@ -722,11 +725,12 @@ struct posix_acl *
+ posix_acl_from_xattr(struct user_namespace *user_ns,
+                    const void *value, size_t size)
+ {
+-      posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
+-      posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
++      const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
++      const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
+       int count;
+       struct posix_acl *acl;
+       struct posix_acl_entry *acl_e;
++      umode_t umask = gr_acl_umask();
+       if (!value)
+               return NULL;
+@@ -752,12 +756,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
+               switch(acl_e->e_tag) {
+                       case ACL_USER_OBJ:
++                              acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
++                              break;
+                       case ACL_GROUP_OBJ:
+                       case ACL_MASK:
++                              acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
++                              break;
+                       case ACL_OTHER:
++                              acl_e->e_perm &= ~(umask & S_IRWXO);
+                               break;
+                       case ACL_USER:
++                              acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
+                               acl_e->e_uid =
+                                       make_kuid(user_ns,
+                                                 le32_to_cpu(entry->e_id));
+@@ -765,6 +775,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
+                                       goto fail;
+                               break;
+                       case ACL_GROUP:
++                              acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
+                               acl_e->e_gid =
+                                       make_kgid(user_ns,
+                                                 le32_to_cpu(entry->e_id));
+diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
+index 1ade120..a86f1a2 100644
+--- a/fs/proc/Kconfig
++++ b/fs/proc/Kconfig
+@@ -30,7 +30,7 @@ config PROC_FS
+ config PROC_KCORE
+       bool "/proc/kcore support" if !ARM
+-      depends on PROC_FS && MMU
++      depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
+       help
+         Provides a virtual ELF core file of the live kernel.  This can
+         be read with gdb and other ELF tools.  No modifications can be
+@@ -38,8 +38,8 @@ config PROC_KCORE
+ config PROC_VMCORE
+       bool "/proc/vmcore support"
+-      depends on PROC_FS && CRASH_DUMP
+-      default y
++      depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
++      default n
+         help
+         Exports the dump image of crashed kernel in ELF format.
+@@ -63,8 +63,8 @@ config PROC_SYSCTL
+         limited in memory.
+ config PROC_PAGE_MONITOR
+-      default y
+-      depends on PROC_FS && MMU
++      default n
++      depends on PROC_FS && MMU && !GRKERNSEC
+       bool "Enable /proc page monitoring" if EXPERT
+       help
+         Various /proc files exist to monitor process memory utilization:
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 88c7de1..3e4b510 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -60,6 +60,7 @@
+ #include <linux/tty.h>
+ #include <linux/string.h>
+ #include <linux/mman.h>
++#include <linux/grsecurity.h>
+ #include <linux/proc_fs.h>
+ #include <linux/ioport.h>
+ #include <linux/uaccess.h>
+@@ -369,6 +370,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
+                  cpumask_pr_args(&task->cpus_allowed));
+ }
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline void task_pax(struct seq_file *m, struct task_struct *p)
++{
++      if (p->mm)
++              seq_printf(m, "PaX:\t%c%c%c%c%c\n",
++                         p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
++                         p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
++                         p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
++                         p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
++                         p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
++      else
++              seq_printf(m, "PaX:\t-----\n");
++}
++#endif
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+                       struct pid *pid, struct task_struct *task)
+ {
+@@ -387,9 +403,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+       task_cpus_allowed(m, task);
+       cpuset_task_status_allowed(m, task);
+       task_context_switch_counts(m, task);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      task_pax(m, task);
++#endif
++
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++      task_grsec_rbac(m, task);
++#endif
++
+       return 0;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++                           (_mm->pax_flags & MF_PAX_RANDMMAP || \
++                            _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+                       struct pid *pid, struct task_struct *task, int whole)
+ {
+@@ -411,6 +442,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+       char tcomm[sizeof(task->comm)];
+       unsigned long flags;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("stat");
++              return 0;
++      }
++#endif
++
+       state = *get_task_state(task);
+       vsize = eip = esp = 0;
+       permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
+@@ -481,6 +519,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+               gtime = task_gtime(task);
+       }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (PAX_RAND_FLAGS(mm)) {
++              eip = 0;
++              esp = 0;
++              wchan = 0;
++      }
++#endif
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      wchan = 0;
++      eip =0;
++      esp =0;
++#endif
++
+       /* scale priority and nice values from timeslices to -20..20 */
+       /* to make it look like a "normal" Unix priority/nice value  */
+       priority = task_prio(task);
+@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+       seq_put_decimal_ull(m, ' ', vsize);
+       seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
+       seq_put_decimal_ull(m, ' ', rsslim);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
++      seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
++      seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
++#else
+       seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
+       seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
+       seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
++#endif
+       seq_put_decimal_ull(m, ' ', esp);
+       seq_put_decimal_ull(m, ' ', eip);
+       /* The signal information here is obsolete.
+@@ -548,7 +605,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+       seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
+       seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
+-      if (mm && permitted) {
++      if (mm && permitted
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              && !PAX_RAND_FLAGS(mm)
++#endif
++         ) {
+               seq_put_decimal_ull(m, ' ', mm->start_data);
+               seq_put_decimal_ull(m, ' ', mm->end_data);
+               seq_put_decimal_ull(m, ' ', mm->start_brk);
+@@ -586,8 +647,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+                       struct pid *pid, struct task_struct *task)
+ {
+       unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
+-      struct mm_struct *mm = get_task_mm(task);
++      struct mm_struct *mm;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("statm");
++              return 0;
++      }
++#endif
++      mm = get_task_mm(task);
+       if (mm) {
+               size = task_statm(mm, &shared, &text, &data, &resident);
+               mmput(mm);
+@@ -610,6 +678,21 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+       return 0;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
++{
++      unsigned long flags;
++      u32 curr_ip = 0;
++
++      if (lock_task_sighand(task, &flags)) {
++              curr_ip = task->signal->curr_ip;
++              unlock_task_sighand(task, &flags);
++      }
++      seq_printf(m, "%pI4\n", &curr_ip);
++      return 0;
++}
++#endif
++
+ #ifdef CONFIG_PROC_CHILDREN
+ static struct pid *
+ get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index ac0df4d..5be5b93 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -113,6 +113,14 @@ struct pid_entry {
+       union proc_op op;
+ };
++struct getdents_callback {
++      struct linux_dirent __user * current_dir;
++      struct linux_dirent __user * previous;
++      struct file * file;
++      int count;
++      int error;
++};
++
+ #define NOD(NAME, MODE, IOP, FOP, OP) {                       \
+       .name = (NAME),                                 \
+       .len  = sizeof(NAME) - 1,                       \
+@@ -224,6 +232,11 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
+               goto out_mmput;
+       }
++      if (gr_acl_handle_procpidmem(tsk)) {
++              rv = 0;
++              goto out_mmput;
++      }
++
+       page = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!page) {
+               rv = -ENOMEM;
+@@ -400,12 +413,28 @@ static const struct file_operations proc_pid_cmdline_ops = {
+       .llseek = generic_file_llseek,
+ };
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++                           (_mm->pax_flags & MF_PAX_RANDMMAP || \
++                            _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
+                        struct pid *pid, struct task_struct *task)
+ {
+       struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+       if (mm && !IS_ERR(mm)) {
+               unsigned int nwords = 0;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              /* allow if we're currently ptracing this task */
++              if (PAX_RAND_FLAGS(mm) &&
++                  (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
++                      mmput(mm);
++                      return 0;
++              }
++#endif
++
+               do {
+                       nwords += 2;
+               } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+@@ -417,7 +446,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
+ }
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /*
+  * Provides a wchan file via kallsyms in a proper one-value-per-file format.
+  * Returns the resolved symbol.  If that fails, simply return the address.
+@@ -430,8 +459,8 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
+       wchan = get_wchan(task);
+-      if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
+-                      && !lookup_symbol_name(wchan, symname))
++      if (wchan && !lookup_symbol_name(wchan, symname)
++                      && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+               seq_printf(m, "%s", symname);
+       else
+               seq_putc(m, '0');
+@@ -457,7 +486,7 @@ static void unlock_trace(struct task_struct *task)
+       mutex_unlock(&task->signal->cred_guard_mutex);
+ }
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ #define MAX_STACK_TRACE_DEPTH 64
+@@ -652,7 +681,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
+       return 0;
+ }
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
+                           struct pid *pid, struct task_struct *task)
+ {
+@@ -685,7 +714,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
+ /************************************************************************/
+ /* permission checks */
+-static int proc_fd_access_allowed(struct inode *inode)
++static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
+ {
+       struct task_struct *task;
+       int allowed = 0;
+@@ -695,7 +724,10 @@ static int proc_fd_access_allowed(struct inode *inode)
+        */
+       task = get_proc_task(inode);
+       if (task) {
+-              allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
++              if (log)
++                      allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
++              else
++                      allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
+               put_task_struct(task);
+       }
+       return allowed;
+@@ -726,6 +758,30 @@ static bool has_pid_permissions(struct pid_namespace *pid,
+                                struct task_struct *task,
+                                int hide_pid_min)
+ {
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              return false;
++
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      rcu_read_lock();
++      {
++              const struct cred *tmpcred = current_cred();
++              const struct cred *cred = __task_cred(task);
++
++              if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      || in_group_p(grsec_proc_gid)
++#endif
++              ) {
++                      rcu_read_unlock();
++                      return true;
++              }
++      }
++      rcu_read_unlock();
++
++      if (!pid->hide_pid)
++              return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
++#endif
++
+       if (pid->hide_pid < hide_pid_min)
+               return true;
+       if (in_group_p(pid->pid_gid))
+@@ -747,7 +803,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
+       put_task_struct(task);
+       if (!has_perms) {
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++              {
++#else
+               if (pid->hide_pid == 2) {
++#endif
+                       /*
+                        * Let's make getdents(), stat(), and open()
+                        * consistent with each other.  If a process
+@@ -801,13 +861,24 @@ static const struct file_operations proc_single_file_operations = {
+ };
+-struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
++struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode, u64 *ptracer_exec_id)
+ {
+       struct task_struct *task = get_proc_task(inode);
+       struct mm_struct *mm = ERR_PTR(-ESRCH);
++      if (ptracer_exec_id)
++              *ptracer_exec_id = 0;
++
+       if (task) {
+               mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
++              if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
++                      mmput(mm);
++                      mm = ERR_PTR(-EPERM);
++              }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              if (ptracer_exec_id)
++                      current_is_ptracer(task, ptracer_exec_id);
++#endif
+               put_task_struct(task);
+               if (!IS_ERR_OR_NULL(mm)) {
+@@ -823,12 +894,17 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
+ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
+ {
+-      struct mm_struct *mm = proc_mem_open(inode, mode);
++      struct mm_struct *mm = proc_mem_open(inode, mode, NULL);
+       if (IS_ERR(mm))
+               return PTR_ERR(mm);
+       file->private_data = mm;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      file->f_version = current->exec_id;
++#endif
++
+       return 0;
+ }
+@@ -850,6 +926,26 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+       ssize_t copied;
+       char *page;
++#ifdef CONFIG_GRKERNSEC
++      struct task_struct *task = get_proc_task(file_inode(file));
++      bool is_by_ptracer = false;
++
++      if (task) {
++              is_by_ptracer = current_is_ptracer(task, NULL);
++              put_task_struct(task);
++      }
++
++      if (write && !is_by_ptracer)
++              return -EPERM;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (file->f_version != current->exec_id && !is_by_ptracer) {
++              gr_log_badprocpid("mem");
++              return 0;
++      }
++#endif
++#endif
++
+       if (!mm)
+               return 0;
+@@ -862,7 +958,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+               goto free;
+       while (count > 0) {
+-              int this_len = min_t(int, count, PAGE_SIZE);
++              ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
+               if (write && copy_from_user(page, buf, this_len)) {
+                       copied = -EFAULT;
+@@ -956,6 +1052,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+       if (!mm || !mm->env_end)
+               return 0;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (file->f_version != current->exec_id) {
++              gr_log_badprocpid("environ");
++              return 0;
++      }
++#endif
++
+       page = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!page)
+               return -ENOMEM;
+@@ -969,9 +1072,12 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+       env_end = mm->env_end;
+       up_read(&mm->mmap_sem);
++      if (!env_end)
++              goto free;
++
+       while (count > 0) {
+               size_t this_len, max_len;
+-              int retval;
++              ssize_t retval;
+               if (src >= (env_end - env_start))
+                       break;
+@@ -1583,7 +1689,7 @@ static const char *proc_pid_get_link(struct dentry *dentry,
+               return ERR_PTR(-ECHILD);
+       /* Are we allowed to snoop on the tasks file descriptors? */
+-      if (!proc_fd_access_allowed(inode))
++      if (!proc_fd_access_allowed(inode, 0))
+               goto out;
+       error = PROC_I(inode)->op.proc_get_link(dentry, &path);
+@@ -1627,8 +1733,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
+       struct path path;
+       /* Are we allowed to snoop on the tasks file descriptors? */
+-      if (!proc_fd_access_allowed(inode))
+-              goto out;
++      /* logging this is needed for learning on chromium to work properly,
++         but we don't want to flood the logs from 'ps' which does a readlink
++         on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
++         CAP_SYS_PTRACE as it's not necessary for its basic functionality
++       */
++      if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
++              if (!proc_fd_access_allowed(inode,0))
++                      goto out;
++      } else {
++              if (!proc_fd_access_allowed(inode,1))
++                      goto out;
++      }
+       error = PROC_I(inode)->op.proc_get_link(dentry, &path);
+       if (error)
+@@ -1678,7 +1794,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
+               rcu_read_lock();
+               cred = __task_cred(task);
+               inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++              inode->i_gid = grsec_proc_gid;
++#else
+               inode->i_gid = cred->egid;
++#endif
+               rcu_read_unlock();
+       }
+       security_task_to_inode(task, inode);
+@@ -1714,10 +1834,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+                       return -ENOENT;
+               }
+               if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+                   task_dumpable(task)) {
+                       cred = __task_cred(task);
+                       stat->uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      stat->gid = grsec_proc_gid;
++#else
+                       stat->gid = cred->egid;
++#endif
+               }
+       }
+       rcu_read_unlock();
+@@ -1755,11 +1884,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
+       if (task) {
+               if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++                  (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+                   task_dumpable(task)) {
+                       rcu_read_lock();
+                       cred = __task_cred(task);
+                       inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      inode->i_gid = grsec_proc_gid;
++#else
+                       inode->i_gid = cred->egid;
++#endif
+                       rcu_read_unlock();
+               } else {
+                       inode->i_uid = GLOBAL_ROOT_UID;
+@@ -2373,6 +2511,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
+       if (!task)
+               goto out_no_task;
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              goto out;
++
+       /*
+        * Yes, it does not scale. And it should not. Don't add
+        * new entries into /proc/<tgid>/ without very good reasons.
+@@ -2403,6 +2544,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
+       if (!task)
+               return -ENOENT;
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              goto out;
++
+       if (!dir_emit_dots(file, ctx))
+               goto out;
+@@ -2815,7 +2959,9 @@ static const struct inode_operations proc_task_inode_operations;
+ static const struct pid_entry tgid_base_stuff[] = {
+       DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
+       DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
++#ifndef CONFIG_GRKERNSEC
+       DIR("map_files",  S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
++#endif
+       DIR("fdinfo",     S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
+       DIR("ns",         S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
+ #ifdef CONFIG_NET
+@@ -2833,7 +2979,7 @@ static const struct pid_entry tgid_base_stuff[] = {
+       REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
+ #endif
+       REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+       ONE("syscall",    S_IRUSR, proc_pid_syscall),
+ #endif
+       REG("cmdline",    S_IRUGO, proc_pid_cmdline_ops),
+@@ -2858,10 +3004,10 @@ static const struct pid_entry tgid_base_stuff[] = {
+ #ifdef CONFIG_SECURITY
+       DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       ONE("wchan",      S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       ONE("stack",      S_IRUSR, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHED_INFO
+@@ -2895,6 +3041,9 @@ static const struct pid_entry tgid_base_stuff[] = {
+ #ifdef CONFIG_HARDWALL
+       ONE("hardwall",   S_IRUGO, proc_pid_hardwall),
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++      ONE("ipaddr",     S_IRUSR, proc_pid_ipaddr),
++#endif
+ #ifdef CONFIG_USER_NS
+       REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
+       REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
+@@ -3028,7 +3177,14 @@ static int proc_pid_instantiate(struct inode *dir,
+       if (!inode)
+               goto out;
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      inode->i_gid = grsec_proc_gid;
++      inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
++#else
+       inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
++#endif
+       inode->i_op = &proc_tgid_base_inode_operations;
+       inode->i_fop = &proc_tgid_base_operations;
+       inode->i_flags|=S_IMMUTABLE;
+@@ -3066,7 +3222,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
+       if (!task)
+               goto out;
++      if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++              goto out_put_task;
++
+       result = proc_pid_instantiate(dir, dentry, task, NULL);
++out_put_task:
+       put_task_struct(task);
+ out:
+       return ERR_PTR(result);
+@@ -3220,7 +3380,7 @@ static const struct pid_entry tid_base_stuff[] = {
+       NOD("comm",      S_IFREG|S_IRUGO|S_IWUSR,
+                        &proc_tid_comm_inode_operations,
+                        &proc_pid_set_comm_operations, {}),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+       ONE("syscall",   S_IRUSR, proc_pid_syscall),
+ #endif
+       REG("cmdline",   S_IRUGO, proc_pid_cmdline_ops),
+@@ -3247,10 +3407,10 @@ static const struct pid_entry tid_base_stuff[] = {
+ #ifdef CONFIG_SECURITY
+       DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       ONE("wchan",     S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+       ONE("stack",      S_IRUSR, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHED_INFO
+diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
+index cbd82df..c0407d2 100644
+--- a/fs/proc/cmdline.c
++++ b/fs/proc/cmdline.c
+@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
+ static int __init proc_cmdline_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
++#else
+       proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
++#endif
+       return 0;
+ }
+ fs_initcall(proc_cmdline_init);
+diff --git a/fs/proc/devices.c b/fs/proc/devices.c
+index 50493ed..248166b 100644
+--- a/fs/proc/devices.c
++++ b/fs/proc/devices.c
+@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
+ static int __init proc_devices_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
++#else
+       proc_create("devices", 0, NULL, &proc_devinfo_operations);
++#endif
+       return 0;
+ }
+ fs_initcall(proc_devices_init);
+diff --git a/fs/proc/fd.c b/fs/proc/fd.c
+index 01df23c..9b6c8f9 100644
+--- a/fs/proc/fd.c
++++ b/fs/proc/fd.c
+@@ -27,7 +27,8 @@ static int seq_show(struct seq_file *m, void *v)
+       if (!task)
+               return -ENOENT;
+-      files = get_files_struct(task);
++      if (!gr_acl_handle_procpidmem(task))
++              files = get_files_struct(task);
+       put_task_struct(task);
+       if (files) {
+@@ -296,13 +297,15 @@ int proc_fd_permission(struct inode *inode, int mask)
+       int rv;
+       rv = generic_permission(inode, mask);
+-      if (rv == 0)
+-              return rv;
+       rcu_read_lock();
+       p = pid_task(proc_pid(inode), PIDTYPE_PID);
+-      if (p && same_thread_group(p, current))
+-              rv = 0;
++      if (p) {
++              if (same_thread_group(p, current))
++                      rv = 0;
++              if (gr_acl_handle_procpidmem(p))
++                      rv = -EACCES;
++      }
+       rcu_read_unlock();
+       return rv;
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index c633476..881fce8 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -22,6 +22,7 @@
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include "internal.h"
+@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
+       return proc_lookup_de(PDE(dir), dir, dentry);
+ }
++struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
++              unsigned int flags)
++{
++      if (gr_proc_is_restricted())
++              return ERR_PTR(-EACCES);
++
++      return proc_lookup_de(PDE(dir), dir, dentry);
++}
++
+ /*
+  * This returns non-zero if at EOF, so that the /proc
+  * root directory can use this and check if it should
+@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
+       return proc_readdir_de(PDE(inode), file, ctx);
+ }
++int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
++{
++      struct inode *inode = file_inode(file);
++
++      if (gr_proc_is_restricted())
++              return -EACCES;
++
++      return proc_readdir_de(PDE(inode), file, ctx);
++}
++
+ /*
+  * These are the generic /proc directory operations. They
+  * use the in-memory "struct proc_dir_entry" tree to parse
+@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
+       .iterate_shared         = proc_readdir,
+ };
++static const struct file_operations proc_dir_restricted_operations = {
++      .llseek                 = generic_file_llseek,
++      .read                   = generic_read_dir,
++      .iterate                = proc_readdir_restrict,
++};
++
+ /*
+  * proc directories can do almost nothing..
+  */
+@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
+       .setattr        = proc_notify_change,
+ };
++static const struct inode_operations proc_dir_restricted_inode_operations = {
++      .lookup         = proc_lookup_restrict,
++      .getattr        = proc_getattr,
++      .setattr        = proc_notify_change,
++};
++
+ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
+ {
+       int ret;
+@@ -445,6 +477,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
+ }
+ EXPORT_SYMBOL_GPL(proc_mkdir_data);
++struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
++              struct proc_dir_entry *parent, void *data)
++{
++      struct proc_dir_entry *ent;
++
++      if (mode == 0)
++              mode = S_IRUGO | S_IXUGO;
++
++      ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
++      if (ent) {
++              ent->data = data;
++              ent->restricted = 1;
++              ent->proc_fops = &proc_dir_restricted_operations;
++              ent->proc_iops = &proc_dir_restricted_inode_operations;
++              parent->nlink++;
++              if (proc_register(parent, ent) < 0) {
++                      kfree(ent);
++                      parent->nlink--;
++                      ent = NULL;
++              }
++      }
++      return ent;
++}
++EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
++
+ struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
+                                      struct proc_dir_entry *parent)
+ {
+@@ -459,6 +516,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
+ }
+ EXPORT_SYMBOL(proc_mkdir);
++struct proc_dir_entry *proc_mkdir_restrict(const char *name,
++              struct proc_dir_entry *parent)
++{
++      return proc_mkdir_data_restrict(name, 0, parent, NULL);
++}
++EXPORT_SYMBOL(proc_mkdir_restrict);
++
+ struct proc_dir_entry *proc_create_mount_point(const char *name)
+ {
+       umode_t mode = S_IFDIR | S_IRUGO | S_IXUGO;
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index c1b7238..290c707 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -23,11 +23,17 @@
+ #include <linux/slab.h>
+ #include <linux/mount.h>
+ #include <linux/magic.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include "internal.h"
++#ifdef CONFIG_PROC_SYSCTL
++extern const struct inode_operations proc_sys_inode_operations;
++extern const struct inode_operations proc_sys_dir_operations;
++#endif
++
+ static void proc_evict_inode(struct inode *inode)
+ {
+       struct proc_dir_entry *de;
+@@ -48,6 +54,13 @@ static void proc_evict_inode(struct inode *inode)
+               RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
+               sysctl_head_put(head);
+       }
++
++#ifdef CONFIG_PROC_SYSCTL
++      if (inode->i_op == &proc_sys_inode_operations ||
++          inode->i_op == &proc_sys_dir_operations)
++              gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
++#endif
++
+ }
+ static struct kmem_cache * proc_inode_cachep;
+@@ -431,7 +444,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+               if (de->mode) {
+                       inode->i_mode = de->mode;
+                       inode->i_uid = de->uid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                      inode->i_gid = grsec_proc_gid;
++#else
+                       inode->i_gid = de->gid;
++#endif
+               }
+               if (de->size)
+                       inode->i_size = de->size;
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 7931c55..7db5ad1 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -47,9 +47,10 @@ struct proc_dir_entry {
+       struct completion *pde_unload_completion;
+       struct list_head pde_openers;   /* who did ->open, but not ->release */
+       spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
++      u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
+       u8 namelen;
+       char name[];
+-};
++} __randomize_layout;
+ union proc_op {
+       int (*proc_get_link)(struct dentry *, struct path *);
+@@ -67,7 +68,7 @@ struct proc_inode {
+       struct ctl_table *sysctl_entry;
+       const struct proc_ns_operations *ns_ops;
+       struct inode vfs_inode;
+-};
++} __randomize_layout;
+ /*
+  * General functions
+@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
+                          struct pid *, struct task_struct *);
+ extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
+                         struct pid *, struct task_struct *);
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
++                        struct pid *, struct task_struct *);
++#endif
+ /*
+  * base.c
+@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
+  * generic.c
+  */
+ extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
++extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
+ extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
+                                    struct dentry *);
+ extern int proc_readdir(struct file *, struct dir_context *);
++extern int proc_readdir_restrict(struct file *, struct dir_context *);
+ extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
+ static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
+@@ -286,9 +293,12 @@ struct proc_maps_private {
+ #ifdef CONFIG_NUMA
+       struct mempolicy *task_mempolicy;
+ #endif
+-};
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      u64 ptracer_exec_id;
++#endif
++} __randomize_layout;
+-struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
++struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode, u64 *ptracer_exec_id);
+ extern const struct file_operations proc_pid_maps_operations;
+ extern const struct file_operations proc_tid_maps_operations;
+diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
+index a352d57..cb94a5c 100644
+--- a/fs/proc/interrupts.c
++++ b/fs/proc/interrupts.c
+@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
+ static int __init proc_interrupts_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
++#else
+       proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
++#endif
+       return 0;
+ }
+ fs_initcall(proc_interrupts_init);
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index 5c89a07..1749d06 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -316,7 +316,7 @@ static char *storenote(struct memelfnote *men, char *bufp)
+  * store an ELF coredump header in the supplied buffer
+  * nphdr is the number of elf_phdr to insert
+  */
+-static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
++static void elf_kcore_store_hdr(char *bufp, int nphdr, size_t dataoff)
+ {
+       struct elf_prstatus prstatus;   /* NT_PRSTATUS */
+       struct elf_prpsinfo prpsinfo;   /* NT_PRPSINFO */
+@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+        * the addresses in the elf_phdr on our list.
+        */
+       start = kc_offset_to_vaddr(*fpos - elf_buflen);
+-      if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
++      tsz = PAGE_SIZE - (start & ~PAGE_MASK);
++      if (tsz > buflen)
+               tsz = buflen;
+-              
++
+       while (buflen) {
+               struct kcore_list *m;
+@@ -508,24 +509,22 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+               } else {
+                       if (kern_addr_valid(start)) {
+                               unsigned long n;
++                              mm_segment_t oldfs;
+                               /*
+                                * Using bounce buffer to bypass the
+                                * hardened user copy kernel text checks.
+                                */
+-                              memcpy(buf, (char *) start, tsz);
+-                              n = copy_to_user(buffer, buf, tsz);
+-                              /*
+-                               * We cannot distinguish between fault on source
+-                               * and fault on destination. When this happens
+-                               * we clear too and hope it will trigger the
+-                               * EFAULT again.
+-                               */
+-                              if (n) { 
+-                                      if (clear_user(buffer + tsz - n,
+-                                                              n))
+-                                              return -EFAULT;
+-                              }
++                              oldfs = get_fs();
++                              set_fs(KERNEL_DS);
++                              n = __copy_from_user(buf, (const void __user *)start, tsz);
++                              set_fs(oldfs);
++                              if (n)
++                                      n = clear_user(buffer, tsz);
++                              else
++                                      n = copy_to_user(buffer, buf, tsz);
++                              if (n)
++                                      return -EFAULT;
+                       } else {
+                               if (clear_user(buffer, tsz))
+                                       return -EFAULT;
+@@ -545,10 +544,13 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+ static int open_kcore(struct inode *inode, struct file *filp)
+ {
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++      return -EPERM;
++#endif
+       if (!capable(CAP_SYS_RAWIO))
+               return -EPERM;
+-      filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
++      filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL|GFP_USERCOPY);
+       if (!filp->private_data)
+               return -ENOMEM;
+@@ -589,7 +591,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block kcore_callback_nb __meminitdata = {
++static struct notifier_block kcore_callback_nb = {
+       .notifier_call = kcore_callback,
+       .priority = 0,
+ };
+diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
+index b9a8c81..936ca066 100644
+--- a/fs/proc/meminfo.c
++++ b/fs/proc/meminfo.c
+@@ -161,7 +161,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
+               0ul, // used to be vmalloc 'used'
+               0ul  // used to be vmalloc 'largest_chunk'
+ #ifdef CONFIG_MEMORY_FAILURE
+-              , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
++              , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
+ #endif
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               , K(global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR)
+diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
+index f8595e8..e0d13cbd 100644
+--- a/fs/proc/nommu.c
++++ b/fs/proc/nommu.c
+@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
+       if (file) {
+               seq_pad(m, ' ');
+-              seq_file_path(m, file, "");
++              seq_file_path(m, file, "\n\\");
+       }
+       seq_putc(m, '\n');
+diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
+index c8bbc68..d0f82d5 100644
+--- a/fs/proc/proc_net.c
++++ b/fs/proc/proc_net.c
+@@ -23,9 +23,27 @@
+ #include <linux/nsproxy.h>
+ #include <net/net_namespace.h>
+ #include <linux/seq_file.h>
++#include <linux/grsecurity.h>
+ #include "internal.h"
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++static struct seq_operations *ipv6_seq_ops_addr;
++
++void register_ipv6_seq_ops_addr(struct seq_operations *addr)
++{
++      ipv6_seq_ops_addr = addr;
++}
++
++void unregister_ipv6_seq_ops_addr(void)
++{
++      ipv6_seq_ops_addr = NULL;
++}
++
++EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
++EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
++#endif
++
+ static inline struct net *PDE_NET(struct proc_dir_entry *pde)
+ {
+       return pde->parent->data;
+@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
+       return maybe_get_net(PDE_NET(PDE(inode)));
+ }
++extern const struct seq_operations dev_seq_ops;
++
+ int seq_open_net(struct inode *ino, struct file *f,
+                const struct seq_operations *ops, int size)
+ {
+@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
+       BUG_ON(size < sizeof(*p));
++      /* only permit access to /proc/net/dev */
++      if (
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++          ops != ipv6_seq_ops_addr && 
++#endif
++          ops != &dev_seq_ops && gr_proc_is_restricted())
++              return -EACCES;
++
+       net = get_proc_net(ino);
+       if (net == NULL)
+               return -ENXIO;
+@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
+       int err;
+       struct net *net;
++      if (gr_proc_is_restricted())
++              return -EACCES;
++
+       err = -ENXIO;
+       net = get_proc_net(inode);
+       if (net == NULL)
+@@ -220,7 +251,7 @@ static __net_exit void proc_net_ns_exit(struct net *net)
+       kfree(net->proc_net);
+ }
+-static struct pernet_operations __net_initdata proc_net_ns_ops = {
++static struct pernet_operations __net_initconst proc_net_ns_ops = {
+       .init = proc_net_ns_init,
+       .exit = proc_net_ns_exit,
+ };
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 1b93650..49c54f2 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -11,13 +11,21 @@
+ #include <linux/namei.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
++#include <linux/nsproxy.h>
++#ifdef CONFIG_GRKERNSEC
++#include <net/net_namespace.h>
++#endif
+ #include "internal.h"
++extern int gr_handle_chroot_sysctl(const int op);
++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
++                              const int op);
++
+ static const struct dentry_operations proc_sys_dentry_operations;
+ static const struct file_operations proc_sys_file_operations;
+-static const struct inode_operations proc_sys_inode_operations;
++const struct inode_operations proc_sys_inode_operations;
+ static const struct file_operations proc_sys_dir_file_operations;
+-static const struct inode_operations proc_sys_dir_operations;
++const struct inode_operations proc_sys_dir_operations;
+ /* Support for permanently empty directories */
+@@ -32,13 +40,17 @@ static bool is_empty_dir(struct ctl_table_header *head)
+ static void set_empty_dir(struct ctl_dir *dir)
+ {
+-      dir->header.ctl_table[0].child = sysctl_mount_point;
++      pax_open_kernel();
++      const_cast(dir->header.ctl_table[0].child) = sysctl_mount_point;
++      pax_close_kernel();
+ }
+ static void clear_empty_dir(struct ctl_dir *dir)
+ {
+-      dir->header.ctl_table[0].child = NULL;
++      pax_open_kernel();
++      const_cast(dir->header.ctl_table[0].child) = NULL;
++      pax_close_kernel();
+ }
+ void proc_sys_poll_notify(struct ctl_table_poll *poll)
+@@ -504,6 +516,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
+       err = NULL;
+       d_set_d_op(dentry, &proc_sys_dentry_operations);
++
++      gr_handle_proc_create(dentry, inode);
++
+       d_add(dentry, inode);
+ out:
+@@ -519,6 +534,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+       struct inode *inode = file_inode(filp);
+       struct ctl_table_header *head = grab_header(inode);
+       struct ctl_table *table = PROC_I(inode)->sysctl_entry;
++      int op = write ? MAY_WRITE : MAY_READ;
+       ssize_t error;
+       size_t res;
+@@ -530,7 +546,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+        * and won't be until we finish.
+        */
+       error = -EPERM;
+-      if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
++      if (sysctl_perm(head, table, op))
+               goto out;
+       /* if that can happen at all, it should be -EINVAL, not -EISDIR */
+@@ -538,6 +554,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+       if (!table->proc_handler)
+               goto out;
++#ifdef CONFIG_GRKERNSEC
++      error = -EPERM;
++      if (gr_handle_chroot_sysctl(op))
++              goto out;
++      dget(filp->f_path.dentry);
++      if (gr_handle_sysctl_mod((const char *)filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
++              dput(filp->f_path.dentry);
++              goto out;
++      }
++      dput(filp->f_path.dentry);
++      if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
++              goto out;
++      if (write) {
++              if (current->nsproxy->net_ns != table->extra2) {
++                      if (!capable(CAP_SYS_ADMIN))
++                              goto out;
++              } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
++                      goto out;
++      }
++#endif
++
+       /* careful: calling conventions are nasty here */
+       res = count;
+       error = table->proc_handler(table, write, buf, &res, ppos);
+@@ -639,6 +676,7 @@ static bool proc_sys_fill_cache(struct file *file,
+                               return false;
+                       }
+                       d_set_d_op(child, &proc_sys_dentry_operations);
++                      gr_handle_proc_create(child, inode);
+                       d_add(child, inode);
+               }
+       }
+@@ -679,6 +717,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
+       if ((*pos)++ < ctx->pos)
+               return true;
++      if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
++              return 0;
++
+       if (unlikely(S_ISLNK(table->mode)))
+               res = proc_sys_link_fill_cache(file, ctx, head, table);
+       else
+@@ -772,6 +813,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
+       if (IS_ERR(head))
+               return PTR_ERR(head);
++      if (table && !gr_acl_handle_hidden_file(dentry, mnt))
++              return -ENOENT;
++
+       generic_fillattr(inode, stat);
+       if (table)
+               stat->mode = (stat->mode & S_IFMT) | table->mode;
+@@ -794,13 +838,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
+       .llseek         = generic_file_llseek,
+ };
+-static const struct inode_operations proc_sys_inode_operations = {
++const struct inode_operations proc_sys_inode_operations = {
+       .permission     = proc_sys_permission,
+       .setattr        = proc_sys_setattr,
+       .getattr        = proc_sys_getattr,
+ };
+-static const struct inode_operations proc_sys_dir_operations = {
++const struct inode_operations proc_sys_dir_operations = {
+       .lookup         = proc_sys_lookup,
+       .permission     = proc_sys_permission,
+       .setattr        = proc_sys_setattr,
+@@ -877,7 +921,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
+ static struct ctl_dir *new_dir(struct ctl_table_set *set,
+                              const char *name, int namelen)
+ {
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       struct ctl_dir *new;
+       struct ctl_node *node;
+       char *new_name;
+@@ -889,7 +933,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
+               return NULL;
+       node = (struct ctl_node *)(new + 1);
+-      table = (struct ctl_table *)(node + 1);
++      table = (ctl_table_no_const *)(node + 1);
+       new_name = (char *)(table + 2);
+       memcpy(new_name, name, namelen);
+       new_name[namelen] = '\0';
+@@ -1058,7 +1102,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
+ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
+       struct ctl_table_root *link_root)
+ {
+-      struct ctl_table *link_table, *entry, *link;
++      ctl_table_no_const *link_table, *link;
++      struct ctl_table *entry;
+       struct ctl_table_header *links;
+       struct ctl_node *node;
+       char *link_name;
+@@ -1081,7 +1126,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
+               return NULL;
+       node = (struct ctl_node *)(links + 1);
+-      link_table = (struct ctl_table *)(node + nr_entries);
++      link_table = (ctl_table_no_const *)(node + nr_entries);
+       link_name = (char *)&link_table[nr_entries + 1];
+       for (link = link_table, entry = table; entry->procname; link++, entry++) {
+@@ -1329,8 +1374,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
+       struct ctl_table_header ***subheader, struct ctl_table_set *set,
+       struct ctl_table *table)
+ {
+-      struct ctl_table *ctl_table_arg = NULL;
+-      struct ctl_table *entry, *files;
++      ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
++      struct ctl_table *entry;
+       int nr_files = 0;
+       int nr_dirs = 0;
+       int err = -ENOMEM;
+@@ -1342,10 +1387,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
+                       nr_files++;
+       }
+-      files = table;
+       /* If there are mixed files and directories we need a new table */
+       if (nr_dirs && nr_files) {
+-              struct ctl_table *new;
++              ctl_table_no_const *new;
+               files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
+                               GFP_KERNEL);
+               if (!files)
+@@ -1363,7 +1407,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
+       /* Register everything except a directory full of subdirectories */
+       if (nr_files || !nr_dirs) {
+               struct ctl_table_header *header;
+-              header = __register_sysctl_table(set, path, files);
++              header = __register_sysctl_table(set, path, files ? files : table);
+               if (!header) {
+                       kfree(ctl_table_arg);
+                       goto out;
+diff --git a/fs/proc/root.c b/fs/proc/root.c
+index 8d3e484..5fc5ce2 100644
+--- a/fs/proc/root.c
++++ b/fs/proc/root.c
+@@ -143,7 +143,15 @@ void __init proc_root_init(void)
+       proc_create_mount_point("openprom");
+ #endif
+       proc_tty_init();
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+       proc_mkdir("bus", NULL);
++#endif
+       proc_sys_init();
+ }
+diff --git a/fs/proc/stat.c b/fs/proc/stat.c
+index 7907e45..027fceb 100644
+--- a/fs/proc/stat.c
++++ b/fs/proc/stat.c
+@@ -11,6 +11,7 @@
+ #include <linux/irqnr.h>
+ #include <linux/cputime.h>
+ #include <linux/tick.h>
++#include <linux/grsecurity.h>
+ #ifndef arch_irq_stat_cpu
+ #define arch_irq_stat_cpu(cpu) 0
+@@ -86,6 +87,18 @@ static int show_stat(struct seq_file *p, void *v)
+       u64 sum_softirq = 0;
+       unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
+       struct timespec64 boottime;
++      int unrestricted = 1;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++              && !in_group_p(grsec_proc_gid)
++#endif
++      )
++              unrestricted = 0;
++#endif
++#endif
+       user = nice = system = idle = iowait =
+               irq = softirq = steal = 0;
+@@ -97,23 +110,25 @@ static int show_stat(struct seq_file *p, void *v)
+               nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+               system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
+               idle += get_idle_time(i);
+-              iowait += get_iowait_time(i);
+-              irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+-              softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+-              steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+-              guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+-              guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+-              sum += kstat_cpu_irqs_sum(i);
+-              sum += arch_irq_stat_cpu(i);
++              if (unrestricted) {
++                      iowait += get_iowait_time(i);
++                      irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
++                      softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
++                      steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
++                      guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
++                      guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
++                      sum += kstat_cpu_irqs_sum(i);
++                      sum += arch_irq_stat_cpu(i);
++                      for (j = 0; j < NR_SOFTIRQS; j++) {
++                              unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
+-              for (j = 0; j < NR_SOFTIRQS; j++) {
+-                      unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
+-
+-                      per_softirq_sums[j] += softirq_stat;
+-                      sum_softirq += softirq_stat;
++                              per_softirq_sums[j] += softirq_stat;
++                              sum_softirq += softirq_stat;
++                      }
+               }
+       }
+-      sum += arch_irq_stat();
++      if (unrestricted)
++              sum += arch_irq_stat();
+       seq_puts(p, "cpu ");
+       seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
+@@ -134,12 +149,14 @@ static int show_stat(struct seq_file *p, void *v)
+               nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+               system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
+               idle = get_idle_time(i);
+-              iowait = get_iowait_time(i);
+-              irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+-              softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+-              steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+-              guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+-              guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
++              if (unrestricted) {
++                      iowait = get_iowait_time(i);
++                      irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
++                      softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
++                      steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
++                      guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
++                      guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
++              }
+               seq_printf(p, "cpu%d", i);
+               seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
+               seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
+@@ -157,7 +174,7 @@ static int show_stat(struct seq_file *p, void *v)
+       /* sum again ? it could be updated? */
+       for_each_irq_nr(j)
+-              seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
++              seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
+       seq_printf(p,
+               "\nctxt %llu\n"
+@@ -165,11 +182,11 @@ static int show_stat(struct seq_file *p, void *v)
+               "processes %lu\n"
+               "procs_running %lu\n"
+               "procs_blocked %lu\n",
+-              nr_context_switches(),
++              unrestricted ? nr_context_switches() : 0ULL,
+               (unsigned long long)boottime.tv_sec,
+-              total_forks,
+-              nr_running(),
+-              nr_iowait());
++              unrestricted ? total_forks : 0UL,
++              unrestricted ? nr_running() : 0UL,
++              unrestricted ? nr_iowait() : 0UL);
+       seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index f6fa99e..ea67f46 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -15,12 +15,19 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/page_idle.h>
+ #include <linux/shmem_fs.h>
++#include <linux/grsecurity.h>
+ #include <asm/elf.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include "internal.h"
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++                           (_mm->pax_flags & MF_PAX_RANDMMAP || \
++                            _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ {
+       unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
+@@ -65,8 +72,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+               "VmLib:\t%8lu kB\n"
+               "VmPTE:\t%8lu kB\n"
+               "VmPMD:\t%8lu kB\n"
+-              "VmSwap:\t%8lu kB\n",
+-              hiwater_vm << (PAGE_SHIFT-10),
++              "VmSwap:\t%8lu kB\n"
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++              "CsBase:\t%8lx\nCsLim:\t%8lx\n"
++#endif
++
++              ,hiwater_vm << (PAGE_SHIFT-10),
+               total_vm << (PAGE_SHIFT-10),
+               mm->locked_vm << (PAGE_SHIFT-10),
+               mm->pinned_vm << (PAGE_SHIFT-10),
+@@ -79,7 +91,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+               mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+               ptes >> 10,
+               pmds >> 10,
+-              swap << (PAGE_SHIFT-10));
++              swap << (PAGE_SHIFT-10)
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
++              , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
++#else
++              , mm->context.user_cs_base
++              , mm->context.user_cs_limit
++#endif
++#endif
++
++      );
+       hugetlb_report_usage(m, mm);
+ }
+@@ -230,7 +254,11 @@ static int proc_maps_open(struct inode *inode, struct file *file,
+               return -ENOMEM;
+       priv->inode = inode;
+-      priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      priv->mm = proc_mem_open(inode, PTRACE_MODE_READ, &priv->ptracer_exec_id);
++#else
++      priv->mm = proc_mem_open(inode, PTRACE_MODE_READ, NULL);
++#endif
+       if (IS_ERR(priv->mm)) {
+               int err = PTR_ERR(priv->mm);
+@@ -285,7 +313,7 @@ static int is_stack(struct proc_maps_private *priv,
+ }
+ static void
+-show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
++show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid, bool restrict)
+ {
+       struct mm_struct *mm = vma->vm_mm;
+       struct file *file = vma->vm_file;
+@@ -304,13 +332,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+               pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+       }
+-      /* We don't show the stack guard page in /proc/maps */
+-      start = vma->vm_start;
+-      if (stack_guard_page_start(vma, start))
+-              start += PAGE_SIZE;
+-      end = vma->vm_end;
+-      if (stack_guard_page_end(vma, end))
+-              end -= PAGE_SIZE;
++      start = restrict ? 0UL : vma->vm_start;
++      end = restrict ? 0UL : vma->vm_end;
+       seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
+       seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
+@@ -320,7 +343,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+                       flags & VM_WRITE ? 'w' : '-',
+                       flags & VM_EXEC ? 'x' : '-',
+                       flags & VM_MAYSHARE ? 's' : 'p',
+-                      pgoff,
++                      restrict ? 0UL : pgoff,
+                       MAJOR(dev), MINOR(dev), ino);
+       /*
+@@ -329,7 +352,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+        */
+       if (file) {
+               seq_pad(m, ' ');
+-              seq_file_path(m, file, "\n");
++              seq_file_path(m, file, "\n\\");
+               goto done;
+       }
+@@ -366,7 +389,20 @@ done:
+ static int show_map(struct seq_file *m, void *v, int is_pid)
+ {
+-      show_map_vma(m, v, is_pid);
++      bool restrict = false;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      struct vm_area_struct *vma = (struct vm_area_struct *)v;
++      struct proc_maps_private *priv = m->private;
++      restrict = current->exec_id != priv->ptracer_exec_id;
++      if (current->exec_id != m->exec_id && restrict) {
++              gr_log_badprocpid("maps");
++              return 0;
++      }
++      if (restrict)
++              restrict = PAX_RAND_FLAGS(vma->vm_mm);
++#endif
++      show_map_vma(m, v, is_pid, restrict);
+       m_cache_vma(m, v);
+       return 0;
+ }
+@@ -654,6 +690,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
+               [ilog2(VM_RAND_READ)]   = "rr",
+               [ilog2(VM_DONTCOPY)]    = "dc",
+               [ilog2(VM_DONTEXPAND)]  = "de",
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++              [ilog2(VM_PAGEEXEC)]    = "px",
++#endif
+               [ilog2(VM_ACCOUNT)]     = "ac",
+               [ilog2(VM_NORESERVE)]   = "nr",
+               [ilog2(VM_HUGETLB)]     = "ht",
+@@ -735,7 +774,14 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
+               .mm = vma->vm_mm,
+               .private = &mss,
+       };
++      bool restrict = false;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("smaps");
++              return 0;
++      }
++#endif
+       memset(&mss, 0, sizeof mss);
+ #ifdef CONFIG_SHMEM
+@@ -762,10 +808,15 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
+       }
+ #endif
+-      /* mmap_sem is held in m_start */
+-      walk_page_vma(vma, &smaps_walk);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (PAX_RAND_FLAGS(vma->vm_mm))
++              restrict = true;
++      else
++#endif
++              /* mmap_sem is held in m_start */
++              walk_page_vma(vma, &smaps_walk);
+-      show_map_vma(m, vma, is_pid);
++      show_map_vma(m, vma, is_pid, restrict);
+       seq_printf(m,
+                  "Size:           %8lu kB\n"
+@@ -786,7 +837,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
+                  "KernelPageSize: %8lu kB\n"
+                  "MMUPageSize:    %8lu kB\n"
+                  "Locked:         %8lu kB\n",
+-                 (vma->vm_end - vma->vm_start) >> 10,
++                 restrict ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
+                  mss.resident >> 10,
+                  (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
+                  mss.shared_clean  >> 10,
+@@ -1443,7 +1494,7 @@ static int pagemap_open(struct inode *inode, struct file *file)
+ {
+       struct mm_struct *mm;
+-      mm = proc_mem_open(inode, PTRACE_MODE_READ);
++      mm = proc_mem_open(inode, PTRACE_MODE_READ, NULL);
+       if (IS_ERR(mm))
+               return PTR_ERR(mm);
+       file->private_data = mm;
+@@ -1646,6 +1697,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
+       char buffer[64];
+       int nid;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (current->exec_id != m->exec_id) {
++              gr_log_badprocpid("numa_maps");
++              return 0;
++      }
++#endif
++
+       if (!mm)
+               return 0;
+@@ -1660,11 +1718,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
+               mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
+       }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
++#else
+       seq_printf(m, "%08lx %s", vma->vm_start, buffer);
++#endif
+       if (file) {
+               seq_puts(m, " file=");
+-              seq_file_path(m, file, "\n\t= ");
++              seq_file_path(m, file, "\n\t\\= ");
+       } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+               seq_puts(m, " heap");
+       } else if (is_stack(proc_priv, vma, is_pid)) {
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index faacb0c..b185575 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+       else
+               bytes += kobjsize(mm);
+       
+-      if (current->fs && current->fs->users > 1)
++      if (current->fs && atomic_read(&current->fs->users) > 1)
+               sbytes += kobjsize(current->fs);
+       else
+               bytes += kobjsize(current->fs);
+@@ -142,7 +142,7 @@ static int is_stack(struct proc_maps_private *priv,
+                       stack = vma_is_stack_for_task(vma, task);
+               rcu_read_unlock();
+       }
+-      return stack;
++      return stack || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP));
+ }
+ /*
+@@ -183,7 +183,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
+       if (file) {
+               seq_pad(m, ' ');
+-              seq_file_path(m, file, "");
++              seq_file_path(m, file, "\n\\");
+       } else if (mm && is_stack(priv, vma, is_pid)) {
+               seq_pad(m, ' ');
+               seq_printf(m, "[stack]");
+@@ -287,7 +287,7 @@ static int maps_open(struct inode *inode, struct file *file,
+               return -ENOMEM;
+       priv->inode = inode;
+-      priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
++      priv->mm = proc_mem_open(inode, PTRACE_MODE_READ, NULL);
+       if (IS_ERR(priv->mm)) {
+               int err = PTR_ERR(priv->mm);
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 8ab782d..ef5bcbd 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
+                       nr_bytes = count;
+               /* If pfn is not ram, return zeros for sparse dump files */
+-              if (pfn_is_ram(pfn) == 0)
+-                      memset(buf, 0, nr_bytes);
+-              else {
++              if (pfn_is_ram(pfn) == 0) {
++                      if (userbuf) {
++                              if (clear_user((char __force_user *)buf, nr_bytes))
++                                      return -EFAULT;
++                      } else
++                              memset(buf, 0, nr_bytes);
++              } else {
+                       tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+                                               offset, userbuf);
+                       if (tmp < 0)
+@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ static int copy_to(void *target, void *src, size_t size, int userbuf)
+ {
+       if (userbuf) {
+-              if (copy_to_user((char __user *) target, src, size))
++              if (copy_to_user((char __force_user *) target, src, size))
+                       return -EFAULT;
+       } else {
+               memcpy(target, src, size);
+@@ -235,7 +239,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
+                                           m->offset + m->size - *fpos,
+                                           buflen);
+                       start = m->paddr + *fpos - m->offset;
+-                      tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
++                      tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
+                       if (tmp < 0)
+                               return tmp;
+                       buflen -= tsz;
+@@ -255,7 +259,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
+ static ssize_t read_vmcore(struct file *file, char __user *buffer,
+                          size_t buflen, loff_t *fpos)
+ {
+-      return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
++      return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
+ }
+ /*
+diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
+index d488770..10f088f 100644
+--- a/fs/pstore/ftrace.c
++++ b/fs/pstore/ftrace.c
+@@ -13,6 +13,7 @@
+ #include <linux/kernel.h>
+ #include <linux/compiler.h>
++#include <linux/bug.h>
+ #include <linux/irqflags.h>
+ #include <linux/percpu.h>
+ #include <linux/smp.h>
+diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
+index f23b5c4..da5d2f3 100644
+--- a/fs/qnx6/qnx6.h
++++ b/fs/qnx6/qnx6.h
+@@ -74,7 +74,7 @@ enum {
+       BYTESEX_BE,
+ };
+-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
++static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
+ {
+       if (sbi->s_bytesex == BYTESEX_LE)
+               return le64_to_cpu((__force __le64)n);
+@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
+               return (__force __fs64)cpu_to_be64(n);
+ }
+-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
++static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
+ {
+       if (sbi->s_bytesex == BYTESEX_LE)
+               return le32_to_cpu((__force __le32)n);
+diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
+index 8b25267..0706a93 100644
+--- a/fs/quota/netlink.c
++++ b/fs/quota/netlink.c
+@@ -42,7 +42,7 @@ static struct genl_family quota_genl_family = {
+ void quota_send_warning(struct kqid qid, dev_t dev,
+                       const char warntype)
+ {
+-      static atomic_t seq;
++      static atomic_unchecked_t seq;
+       struct sk_buff *skb;
+       void *msg_head;
+       int ret;
+@@ -58,7 +58,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
+                 "VFS: Not enough memory to send quota warning.\n");
+               return;
+       }
+-      msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
++      msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
+                       &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+       if (!msg_head) {
+               printk(KERN_ERR
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 66215a7..7d66f62 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -23,7 +23,8 @@
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+-typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
++typedef ssize_t (*io_fnr_t)(struct file *, char __user *, size_t, loff_t *);
++typedef ssize_t (*io_fnw_t)(struct file *, const char __user *, size_t, loff_t *);
+ typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
+ const struct file_operations generic_ro_fops = {
+@@ -526,7 +527,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
+       old_fs = get_fs();
+       set_fs(get_ds());
+-      p = (__force const char __user *)buf;
++      p = (const char __force_user *)buf;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       ret = __vfs_write(file, p, count, pos);
+@@ -700,7 +701,7 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
+ /* Do it by hand, with file-ops */
+ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
+-              loff_t *ppos, io_fn_t fn, int flags)
++              loff_t *ppos, io_fnr_t fnr, io_fnw_t fnw, int flags)
+ {
+       ssize_t ret = 0;
+@@ -711,7 +712,10 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
+               struct iovec iovec = iov_iter_iovec(iter);
+               ssize_t nr;
+-              nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos);
++              if (fnr)
++                      nr = fnr(filp, iovec.iov_base, iovec.iov_len, ppos);
++              else
++                      nr = fnw(filp, iovec.iov_base, iovec.iov_len, ppos);
+               if (nr < 0) {
+                       if (!ret)
+@@ -815,7 +819,8 @@ static ssize_t do_readv_writev(int type, struct file *file,
+       struct iovec *iov = iovstack;
+       struct iov_iter iter;
+       ssize_t ret;
+-      io_fn_t fn;
++      io_fnr_t fnr;
++      io_fnw_t fnw;
+       iter_fn_t iter_fn;
+       ret = import_iovec(type, uvector, nr_segs,
+@@ -831,10 +836,12 @@ static ssize_t do_readv_writev(int type, struct file *file,
+               goto out;
+       if (type == READ) {
+-              fn = file->f_op->read;
++              fnr = file->f_op->read;
++              fnw = NULL;
+               iter_fn = file->f_op->read_iter;
+       } else {
+-              fn = (io_fn_t)file->f_op->write;
++              fnr = NULL;
++              fnw = file->f_op->write;
+               iter_fn = file->f_op->write_iter;
+               file_start_write(file);
+       }
+@@ -842,7 +849,7 @@ static ssize_t do_readv_writev(int type, struct file *file,
+       if (iter_fn)
+               ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
+       else
+-              ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
++              ret = do_loop_readv_writev(file, &iter, pos, fnr, fnw, flags);
+       if (type != READ)
+               file_end_write(file);
+@@ -1040,7 +1047,8 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
+       struct iovec *iov = iovstack;
+       struct iov_iter iter;
+       ssize_t ret;
+-      io_fn_t fn;
++      io_fnr_t fnr;
++      io_fnw_t fnw;
+       iter_fn_t iter_fn;
+       ret = compat_import_iovec(type, uvector, nr_segs,
+@@ -1056,10 +1064,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
+               goto out;
+       if (type == READ) {
+-              fn = file->f_op->read;
++              fnr = file->f_op->read;
++              fnw = NULL;
+               iter_fn = file->f_op->read_iter;
+       } else {
+-              fn = (io_fn_t)file->f_op->write;
++              fnr = NULL;
++              fnw = file->f_op->write;
+               iter_fn = file->f_op->write_iter;
+               file_start_write(file);
+       }
+@@ -1067,7 +1077,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
+       if (iter_fn)
+               ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
+       else
+-              ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
++              ret = do_loop_readv_writev(file, &iter, pos, fnr, fnw, flags);
+       if (type != READ)
+               file_end_write(file);
+diff --git a/fs/readdir.c b/fs/readdir.c
+index 9d0212c..da1afd1 100644
+--- a/fs/readdir.c
++++ b/fs/readdir.c
+@@ -18,6 +18,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/unistd.h>
++#include <linux/namei.h>
+ #include <asm/uaccess.h>
+@@ -84,6 +85,7 @@ struct old_linux_dirent {
+ struct readdir_callback {
+       struct dir_context ctx;
+       struct old_linux_dirent __user * dirent;
++      struct file * file;
+       int result;
+ };
+@@ -102,6 +104,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
+               buf->result = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       buf->result++;
+       dirent = buf->dirent;
+       if (!access_ok(VERIFY_WRITE, dirent,
+@@ -133,6 +139,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+       if (!f.file)
+               return -EBADF;
++      buf.file = f.file;
+       error = iterate_dir(f.file, &buf.ctx);
+       if (buf.result)
+               error = buf.result;
+@@ -158,6 +165,7 @@ struct getdents_callback {
+       struct dir_context ctx;
+       struct linux_dirent __user * current_dir;
+       struct linux_dirent __user * previous;
++      struct file * file;
+       int count;
+       int error;
+ };
+@@ -180,6 +188,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
+               buf->error = -EOVERFLOW;
+               return -EOVERFLOW;
+       }
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+               if (signal_pending(current))
+@@ -227,6 +239,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
+       if (!f.file)
+               return -EBADF;
++      buf.file = f.file;
+       error = iterate_dir(f.file, &buf.ctx);
+       if (error >= 0)
+               error = buf.error;
+@@ -245,6 +258,7 @@ struct getdents_callback64 {
+       struct dir_context ctx;
+       struct linux_dirent64 __user * current_dir;
+       struct linux_dirent64 __user * previous;
++      struct file *file;
+       int count;
+       int error;
+ };
+@@ -261,6 +275,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
+       buf->error = -EINVAL;   /* only used if we fail.. */
+       if (reclen > buf->count)
+               return -EINVAL;
++
++      if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++              return 0;
++
+       dirent = buf->previous;
+       if (dirent) {
+               if (signal_pending(current))
+@@ -310,13 +328,13 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+       if (!f.file)
+               return -EBADF;
++      buf.file = f.file;
+       error = iterate_dir(f.file, &buf.ctx);
+       if (error >= 0)
+               error = buf.error;
+       lastdirent = buf.previous;
+       if (lastdirent) {
+-              typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+-              if (__put_user(d_off, &lastdirent->d_off))
++              if (__put_user(buf.ctx.pos, &lastdirent->d_off))
+                       error = -EFAULT;
+               else
+                       error = count - buf.count;
+diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
+index 9c02d96..6562c10 100644
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
+               return;
+       }
+-      atomic_inc(&fs_generation(tb->tb_sb));
++      atomic_inc_unchecked(&fs_generation(tb->tb_sb));
+       do_balance_starts(tb);
+       /*
+diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
+index aca73dd..e3c558d 100644
+--- a/fs/reiserfs/item_ops.c
++++ b/fs/reiserfs/item_ops.c
+@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
+ }
+ static struct item_operations errcatch_ops = {
+-      errcatch_bytes_number,
+-      errcatch_decrement_key,
+-      errcatch_is_left_mergeable,
+-      errcatch_print_item,
+-      errcatch_check_item,
++      .bytes_number = errcatch_bytes_number,
++      .decrement_key = errcatch_decrement_key,
++      .is_left_mergeable = errcatch_is_left_mergeable,
++      .print_item = errcatch_print_item,
++      .check_item = errcatch_check_item,
+-      errcatch_create_vi,
+-      errcatch_check_left,
+-      errcatch_check_right,
+-      errcatch_part_size,
+-      errcatch_unit_num,
+-      errcatch_print_vi
++      .create_vi = errcatch_create_vi,
++      .check_left = errcatch_check_left,
++      .check_right = errcatch_check_right,
++      .part_size = errcatch_part_size,
++      .unit_num = errcatch_unit_num,
++      .print_vi = errcatch_print_vi
+ };
+ #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
+diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
+index fe99915..24fd9bd 100644
+--- a/fs/reiserfs/procfs.c
++++ b/fs/reiserfs/procfs.c
+@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
+                  "SMALL_TAILS " : "NO_TAILS ",
+                  replay_only(sb) ? "REPLAY_ONLY " : "",
+                  convert_reiserfs(sb) ? "CONV " : "",
+-                 atomic_read(&r->s_generation_counter),
++                 atomic_read_unchecked(&r->s_generation_counter),
+                  SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
+                  SF(s_do_balance), SF(s_unneeded_left_neighbor),
+                  SF(s_good_search_by_key_reada), SF(s_bmaps),
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index 2adcde1..7d27bc8 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
+       /* Comment? -Hans */
+       wait_queue_head_t s_wait;
+       /* increased by one every time the  tree gets re-balanced */
+-      atomic_t s_generation_counter;
++      atomic_unchecked_t s_generation_counter;
+       /* File system properties. Currently holds on-disk FS format */
+       unsigned long s_properties;
+@@ -2300,7 +2300,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
+ #define REISERFS_USER_MEM             1       /* user memory mode */
+ #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
+-#define get_generation(s) atomic_read (&fs_generation(s))
++#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
+ #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
+ #define __fs_changed(gen,s) (gen != get_generation (s))
+ #define fs_changed(gen,s)             \
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 74d5ddd..0ce3ad9 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1887,6 +1887,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+       sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+       sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+       sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
++#ifdef CONFIG_REISERFS_FS_XATTR
++      /* turn on user xattrs by default */
++      sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
++#endif
+       /* no preallocation minimum, be smart in reiserfs_file_write instead */
+       sbi->s_alloc_options.preallocmin = 0;
+       /* Preallocate by 16 blocks (17-1) at once */
+diff --git a/fs/select.c b/fs/select.c
+index 8ed9da5..4ee3bb4 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -20,6 +20,7 @@
+ #include <linux/export.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
++#include <linux/security.h>
+ #include <linux/personality.h> /* for STICKY_TIMEOUTS */
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+@@ -723,7 +724,7 @@ SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
+ #ifdef __ARCH_WANT_SYS_OLD_SELECT
+ struct sel_arg_struct {
+-      unsigned long n;
++      long n;
+       fd_set __user *inp, *outp, *exp;
+       struct timeval __user *tvp;
+ };
+@@ -886,6 +887,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+       struct poll_list *walk = head;
+       unsigned long todo = nfds;
++      gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
+       if (nfds > rlimit(RLIMIT_NOFILE))
+               return -EINVAL;
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index 6dc4296..cfdaf8e 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -14,6 +14,8 @@
+ #include <linux/mm.h>
+ #include <linux/printk.h>
+ #include <linux/string_helpers.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include <asm/page.h>
+@@ -26,7 +28,7 @@ static void seq_set_overflow(struct seq_file *m)
+ static void *seq_buf_alloc(unsigned long size)
+ {
+       void *buf;
+-      gfp_t gfp = GFP_KERNEL;
++      gfp_t gfp = GFP_KERNEL | GFP_USERCOPY;
+       /*
+        * For high order allocations, use __GFP_NORETRY to avoid oom-killing -
+@@ -38,7 +40,7 @@ static void *seq_buf_alloc(unsigned long size)
+               gfp |= __GFP_NORETRY | __GFP_NOWARN;
+       buf = kmalloc(size, gfp);
+       if (!buf && size > PAGE_SIZE)
+-              buf = vmalloc(size);
++              buf = vmalloc_usercopy(size);
+       return buf;
+ }
+@@ -77,6 +79,10 @@ int seq_open(struct file *file, const struct seq_operations *op)
+       // to the lifetime of the file.
+       p->file = file;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      p->exec_id = current->exec_id;
++#endif
++
+       /*
+        * Wrappers around seq_open(e.g. swaps_open) need to be
+        * aware of this. If they set f_version themselves, they
+@@ -98,6 +104,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
+ }
+ EXPORT_SYMBOL(seq_open);
++
++int seq_open_restrict(struct file *file, const struct seq_operations *op)
++{
++      if (gr_proc_is_restricted())
++              return -EACCES;
++
++      return seq_open(file, op);
++}
++EXPORT_SYMBOL(seq_open_restrict);
++
+ static int traverse(struct seq_file *m, loff_t offset)
+ {
+       loff_t pos = 0, index;
+@@ -169,7 +185,7 @@ Eoverflow:
+ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ {
+       struct seq_file *m = file->private_data;
+-      size_t copied = 0;
++      ssize_t copied = 0;
+       loff_t pos;
+       size_t n;
+       void *p;
+@@ -566,7 +582,7 @@ static void single_stop(struct seq_file *p, void *v)
+ int single_open(struct file *file, int (*show)(struct seq_file *, void *),
+               void *data)
+ {
+-      struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
++      seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
+       int res = -ENOMEM;
+       if (op) {
+@@ -602,6 +618,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
+ }
+ EXPORT_SYMBOL(single_open_size);
++int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
++              void *data)
++{
++      if (gr_proc_is_restricted())
++              return -EACCES;
++
++      return single_open(file, show, data);
++}
++EXPORT_SYMBOL(single_open_restrict);
++
++
+ int single_release(struct inode *inode, struct file *file)
+ {
+       const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
+diff --git a/fs/splice.c b/fs/splice.c
+index dd9bf7e..3d55c3e 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+       pipe_lock(pipe);
+       for (;;) {
+-              if (!pipe->readers) {
++              if (!atomic_read(&pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+@@ -218,7 +218,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+                       page_nr++;
+                       ret += buf->len;
+-                      if (pipe->files)
++                      if (atomic_read(&pipe->files))
+                               do_wakeup = 1;
+                       if (!--spd->nr_pages)
+@@ -249,9 +249,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+                       do_wakeup = 0;
+               }
+-              pipe->waiting_writers++;
++              atomic_inc(&pipe->waiting_writers);
+               pipe_wait(pipe);
+-              pipe->waiting_writers--;
++              atomic_dec(&pipe->waiting_writers);
+       }
+       pipe_unlock(pipe);
+@@ -580,7 +580,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos, 0);
++      res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos, 0);
+       set_fs(old_fs);
+       return res;
+@@ -595,7 +595,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      res = vfs_write(file, (__force const char __user *)buf, count, &pos);
++      res = vfs_write(file, (const char __force_user *)buf, count, &pos);
+       set_fs(old_fs);
+       return res;
+@@ -648,7 +648,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+                       goto err;
+               this_len = min_t(size_t, len, PAGE_SIZE - offset);
+-              vec[i].iov_base = (void __user *) page_address(page);
++              vec[i].iov_base = (void __force_user *) page_address(page);
+               vec[i].iov_len = this_len;
+               spd.pages[i] = page;
+               spd.nr_pages++;
+@@ -787,7 +787,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
+                       ops->release(pipe, buf);
+                       pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
+                       pipe->nrbufs--;
+-                      if (pipe->files)
++                      if (atomic_read(&pipe->files))
+                               sd->need_wakeup = true;
+               }
+@@ -818,10 +818,10 @@ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_des
+               return -ERESTARTSYS;
+       while (!pipe->nrbufs) {
+-              if (!pipe->writers)
++              if (!atomic_read(&pipe->writers))
+                       return 0;
+-              if (!pipe->waiting_writers && sd->num_spliced)
++              if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
+                       return 0;
+               if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1037,7 +1037,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
+                               ops->release(pipe, buf);
+                               pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
+                               pipe->nrbufs--;
+-                              if (pipe->files)
++                              if (atomic_read(&pipe->files))
+                                       sd.need_wakeup = true;
+                       } else {
+                               buf->offset += ret;
+@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+                * out of the pipe right after the splice_to_pipe(). So set
+                * PIPE_READERS appropriately.
+                */
+-              pipe->readers = 1;
++              atomic_set(&pipe->readers, 1);
+               current->splice_pipe = pipe;
+       }
+@@ -1507,6 +1507,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+                       partial[buffers].offset = off;
+                       partial[buffers].len = plen;
++                      partial[buffers].private = 0;
+                       off = 0;
+                       len -= plen;
+@@ -1738,9 +1739,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+-              if (!pipe->writers)
++              if (!atomic_read(&pipe->writers))
+                       break;
+-              if (!pipe->waiting_writers) {
++              if (!atomic_read(&pipe->waiting_writers)) {
+                       if (flags & SPLICE_F_NONBLOCK) {
+                               ret = -EAGAIN;
+                               break;
+@@ -1772,7 +1773,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+       pipe_lock(pipe);
+       while (pipe->nrbufs >= pipe->buffers) {
+-              if (!pipe->readers) {
++              if (!atomic_read(&pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       ret = -EPIPE;
+                       break;
+@@ -1785,9 +1786,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+-              pipe->waiting_writers++;
++              atomic_inc(&pipe->waiting_writers);
+               pipe_wait(pipe);
+-              pipe->waiting_writers--;
++              atomic_dec(&pipe->waiting_writers);
+       }
+       pipe_unlock(pipe);
+@@ -1823,14 +1824,14 @@ retry:
+       pipe_double_lock(ipipe, opipe);
+       do {
+-              if (!opipe->readers) {
++              if (!atomic_read(&opipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+                       break;
+               }
+-              if (!ipipe->nrbufs && !ipipe->writers)
++              if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
+                       break;
+               /*
+@@ -1927,7 +1928,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+       pipe_double_lock(ipipe, opipe);
+       do {
+-              if (!opipe->readers) {
++              if (!atomic_read(&opipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+@@ -1972,7 +1973,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+        * return EAGAIN if we have the potential of some data in the
+        * future, otherwise just return 0
+        */
+-      if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
++      if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
+               ret = -EAGAIN;
+       pipe_unlock(ipipe);
+diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
+index 1548b37..0624869 100644
+--- a/fs/squashfs/xattr.c
++++ b/fs/squashfs/xattr.c
+@@ -46,8 +46,8 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
+                                                + msblk->xattr_table;
+       int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
+       int count = squashfs_i(inode)->xattr_count;
+-      size_t rest = buffer_size;
+-      int err;
++      size_t used = 0;
++      ssize_t err;
+       /* check that the file system has xattrs */
+       if (msblk->xattr_id_table == NULL)
+@@ -72,7 +72,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
+                       size_t prefix_size = strlen(prefix);
+                       if (buffer) {
+-                              if (prefix_size + name_size + 1 > rest) {
++                              if (prefix_size + name_size + 1 > buffer_size - used) {
+                                       err = -ERANGE;
+                                       goto failed;
+                               }
+@@ -87,7 +87,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
+                               buffer[name_size] = '\0';
+                               buffer += name_size + 1;
+                       }
+-                      rest -= prefix_size + name_size + 1;
++                      used += prefix_size + name_size + 1;
+               } else  {
+                       /* no handler or insuffficient privileges, so skip */
+                       err = squashfs_read_metadata(sb, NULL, &start,
+@@ -108,7 +108,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
+               if (err < 0)
+                       goto failed;
+       }
+-      err = buffer_size - rest;
++      err = used;
+ failed:
+       return err;
+diff --git a/fs/stat.c b/fs/stat.c
+index bc045c7..68725c1 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+       stat->gid = inode->i_gid;
+       stat->rdev = inode->i_rdev;
+       stat->size = i_size_read(inode);
+-      stat->atime = inode->i_atime;
+-      stat->mtime = inode->i_mtime;
++      if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++              stat->atime = inode->i_ctime;
++              stat->mtime = inode->i_ctime;
++      } else {
++              stat->atime = inode->i_atime;
++              stat->mtime = inode->i_mtime;
++      }
+       stat->ctime = inode->i_ctime;
+       stat->blksize = (1 << inode->i_blkbits);
+       stat->blocks = inode->i_blocks;
+@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
+ int vfs_getattr_nosec(struct path *path, struct kstat *stat)
+ {
+       struct inode *inode = d_backing_inode(path->dentry);
++      int retval;
+-      if (inode->i_op->getattr)
+-              return inode->i_op->getattr(path->mnt, path->dentry, stat);
++      if (inode->i_op->getattr) {
++              retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
++              if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++                      stat->atime = stat->ctime;
++                      stat->mtime = stat->ctime;
++              }
++              return retval;
++      }
+       generic_fillattr(inode, stat);
+       return 0;
+diff --git a/fs/super.c b/fs/super.c
+index 47d11e0..31ae978 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -357,7 +357,8 @@ EXPORT_SYMBOL(deactivate_super);
+  *    called for superblocks not in rundown mode (== ones still on ->fs_supers
+  *    of their type), so increment of ->s_count is OK here.
+  */
+-static int grab_super(struct super_block *s) __releases(sb_lock)
++static int grab_super(struct super_block *s) __releases(&sb_lock);
++static int grab_super(struct super_block *s)
+ {
+       s->s_count++;
+       spin_unlock(&sb_lock);
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 94374e4..b5da3a1 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
+       kfree(buf);
+ }
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++extern int grsec_enable_sysfs_restrict;
++#endif
++
+ /**
+  * sysfs_create_dir_ns - create a directory for an object with a namespace tag
+  * @kobj: object we're creating directory for
+@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
+ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+ {
+       struct kernfs_node *parent, *kn;
++      const char *name;
++      umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++      const char *parent_name;
++#endif
+       BUG_ON(!kobj);
++      name = kobject_name(kobj);
++
+       if (kobj->parent)
+               parent = kobj->parent->sd;
+       else
+@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+       if (!parent)
+               return -ENOENT;
+-      kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
+-                                S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++      parent_name = parent->name;
++      mode = S_IRWXU;
++
++      if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
++          (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
++          (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
++          (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
++              mode = S_IRWXU | S_IRUGO | S_IXUGO;
++      if (!grsec_enable_sysfs_restrict)
++              mode = S_IRWXU | S_IRUGO | S_IXUGO;
++#endif
++
++      kn = kernfs_create_dir_ns(parent, name,
++                                mode, kobj, ns);
+       if (IS_ERR(kn)) {
+               if (PTR_ERR(kn) == -EEXIST)
+-                      sysfs_warn_dup(parent, kobject_name(kobj));
++                      sysfs_warn_dup(parent, name);
+               return PTR_ERR(kn);
+       }
+diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
+index 6c21228..9afd5fe 100644
+--- a/fs/sysv/sysv.h
++++ b/fs/sysv/sysv.h
+@@ -187,7 +187,7 @@ static inline u32 PDP_swab(u32 x)
+ #endif
+ }
+-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
++static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
+ {
+       if (sbi->s_bytesex == BYTESEX_PDP)
+               return PDP_swab((__force __u32)n);
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index ad40b64..9892e72 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -53,7 +53,7 @@ static const struct file_operations tracefs_file_operations = {
+ static struct tracefs_dir_ops {
+       int (*mkdir)(const char *name);
+       int (*rmdir)(const char *name);
+-} tracefs_ops;
++} __no_const tracefs_ops __read_only;
+ static char *get_dname(struct dentry *dentry)
+ {
+@@ -494,8 +494,10 @@ struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *pare
+       if (!dentry)
+               return NULL;
+-      tracefs_ops.mkdir = mkdir;
+-      tracefs_ops.rmdir = rmdir;
++      pax_open_kernel();
++      const_cast(tracefs_ops.mkdir) = mkdir;
++      const_cast(tracefs_ops.rmdir) = rmdir;
++      pax_close_kernel();
+       return dentry;
+ }
+diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
+index 2dcf3d4..fa1e496 100644
+--- a/fs/ubifs/find.c
++++ b/fs/ubifs/find.c
+@@ -94,8 +94,9 @@ static int valuable(struct ubifs_info *c, const struct ubifs_lprops *lprops)
+  */
+ static int scan_for_dirty_cb(struct ubifs_info *c,
+                            const struct ubifs_lprops *lprops, int in_tree,
+-                           struct scan_data *data)
++                           void *_data)
+ {
++      struct scan_data *data = _data;
+       int ret = LPT_SCAN_CONTINUE;
+       /* Exclude LEBs that are currently in use */
+@@ -179,7 +180,7 @@ static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c,
+       data.lnum = -1;
+       data.exclude_index = exclude_index;
+       err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
+-                                  (ubifs_lpt_scan_callback)scan_for_dirty_cb,
++                                  scan_for_dirty_cb,
+                                   &data);
+       if (err)
+               return ERR_PTR(err);
+@@ -361,8 +362,9 @@ out:
+  */
+ static int scan_for_free_cb(struct ubifs_info *c,
+                           const struct ubifs_lprops *lprops, int in_tree,
+-                          struct scan_data *data)
++                          void *_data)
+ {
++      struct scan_data *data = _data;
+       int ret = LPT_SCAN_CONTINUE;
+       /* Exclude LEBs that are currently in use */
+@@ -458,7 +460,7 @@ const struct ubifs_lprops *do_find_free_space(struct ubifs_info *c,
+       data.pick_free = pick_free;
+       data.lnum = -1;
+       err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
+-                                  (ubifs_lpt_scan_callback)scan_for_free_cb,
++                                  scan_for_free_cb,
+                                   &data);
+       if (err)
+               return ERR_PTR(err);
+@@ -601,8 +603,9 @@ out:
+  */
+ static int scan_for_idx_cb(struct ubifs_info *c,
+                          const struct ubifs_lprops *lprops, int in_tree,
+-                         struct scan_data *data)
++                         void *_data)
+ {
++      struct scan_data *data = _data;
+       int ret = LPT_SCAN_CONTINUE;
+       /* Exclude LEBs that are currently in use */
+@@ -638,7 +641,7 @@ static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c)
+       data.lnum = -1;
+       err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
+-                                  (ubifs_lpt_scan_callback)scan_for_idx_cb,
++                                  scan_for_idx_cb,
+                                   &data);
+       if (err)
+               return ERR_PTR(err);
+@@ -738,18 +741,21 @@ out:
+       return err;
+ }
+-static int cmp_dirty_idx(const struct ubifs_lprops **a,
+-                       const struct ubifs_lprops **b)
++static int cmp_dirty_idx(const void *_a,
++                       const void *_b)
+ {
++      const struct ubifs_lprops **a = (const struct ubifs_lprops **)_a;
++      const struct ubifs_lprops **b = (const struct ubifs_lprops **)_b;
+       const struct ubifs_lprops *lpa = *a;
+       const struct ubifs_lprops *lpb = *b;
+       return lpa->dirty + lpa->free - lpb->dirty - lpb->free;
+ }
+-static void swap_dirty_idx(struct ubifs_lprops **a, struct ubifs_lprops **b,
+-                         int size)
++static void swap_dirty_idx(void *_a, void *_b, int size)
+ {
++      struct ubifs_lprops **a = (struct ubifs_lprops **)_a;
++      struct ubifs_lprops **b = (struct ubifs_lprops **)_b;
+       struct ubifs_lprops *t = *a;
+       *a = *b;
+@@ -775,8 +781,7 @@ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c)
+              sizeof(void *) * c->dirty_idx.cnt);
+       /* Sort it so that the dirtiest is now at the end */
+       sort(c->dirty_idx.arr, c->dirty_idx.cnt, sizeof(void *),
+-           (int (*)(const void *, const void *))cmp_dirty_idx,
+-           (void (*)(void *, void *, int))swap_dirty_idx);
++           cmp_dirty_idx, swap_dirty_idx);
+       dbg_find("found %d dirty index LEBs", c->dirty_idx.cnt);
+       if (c->dirty_idx.cnt)
+               dbg_find("dirtiest index LEB is %d with dirty %d and free %d",
+@@ -804,8 +809,9 @@ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c)
+  */
+ static int scan_dirty_idx_cb(struct ubifs_info *c,
+                          const struct ubifs_lprops *lprops, int in_tree,
+-                         struct scan_data *data)
++                         void *_data)
+ {
++      struct scan_data *data = _data;
+       int ret = LPT_SCAN_CONTINUE;
+       /* Exclude LEBs that are currently in use */
+@@ -865,7 +871,7 @@ static int find_dirty_idx_leb(struct ubifs_info *c)
+               /* All pnodes are in memory, so skip scan */
+               return -ENOSPC;
+       err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum,
+-                                  (ubifs_lpt_scan_callback)scan_dirty_idx_cb,
++                                  scan_dirty_idx_cb,
+                                   &data);
+       if (err)
+               return err;
+diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
+index a0011aa..c8cf709 100644
+--- a/fs/ubifs/lprops.c
++++ b/fs/ubifs/lprops.c
+@@ -1028,8 +1028,9 @@ out:
+  */
+ static int scan_check_cb(struct ubifs_info *c,
+                        const struct ubifs_lprops *lp, int in_tree,
+-                       struct ubifs_lp_stats *lst)
++                       void *_lst)
+ {
++      struct ubifs_lp_stats *lst = _lst;
+       struct ubifs_scan_leb *sleb;
+       struct ubifs_scan_node *snod;
+       int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty, ret;
+@@ -1283,7 +1284,7 @@ int dbg_check_lprops(struct ubifs_info *c)
+       memset(&lst, 0, sizeof(struct ubifs_lp_stats));
+       err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1,
+-                                  (ubifs_lpt_scan_callback)scan_check_cb,
++                                  scan_check_cb,
+                                   &lst);
+       if (err && err != -ENOSPC)
+               goto out;
+diff --git a/fs/udf/misc.c b/fs/udf/misc.c
+index 71d1c25..084e2ad 100644
+--- a/fs/udf/misc.c
++++ b/fs/udf/misc.c
+@@ -288,7 +288,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
+ u8 udf_tag_checksum(const struct tag *t)
+ {
+-      u8 *data = (u8 *)t;
++      const u8 *data = (const u8 *)t;
+       u8 checksum = 0;
+       int i;
+       for (i = 0; i < sizeof(struct tag); ++i)
+diff --git a/fs/ufs/super.c b/fs/ufs/super.c
+index f04ab23..b26fff4 100644
+--- a/fs/ufs/super.c
++++ b/fs/ufs/super.c
+@@ -1424,10 +1424,12 @@ static void init_once(void *foo)
+ static int __init init_inodecache(void)
+ {
+-      ufs_inode_cachep = kmem_cache_create("ufs_inode_cache",
++      ufs_inode_cachep = kmem_cache_create_usercopy("ufs_inode_cache",
+                                            sizeof(struct ufs_inode_info),
+                                            0, (SLAB_RECLAIM_ACCOUNT|
+                                               SLAB_MEM_SPREAD|SLAB_ACCOUNT),
++                                           offsetof(struct ufs_inode_info, i_u1.i_symlink),
++                                           sizeof(((struct ufs_inode_info *)0)->i_u1.i_symlink),
+                                            init_once);
+       if (ufs_inode_cachep == NULL)
+               return -ENOMEM;
+diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
+index 8d974c4..b82f6ec 100644
+--- a/fs/ufs/swab.h
++++ b/fs/ufs/swab.h
+@@ -22,7 +22,7 @@ enum {
+       BYTESEX_BE
+ };
+-static inline u64
++static inline u64 __intentional_overflow(-1)
+ fs64_to_cpu(struct super_block *sbp, __fs64 n)
+ {
+       if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
+               return (__force __fs64)cpu_to_be64(n);
+ }
+-static inline u32
++static inline u32 __intentional_overflow(-1)
+ fs32_to_cpu(struct super_block *sbp, __fs32 n)
+ {
+       if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 85959d8..6e511a7 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -432,7 +432,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
+       struct userfaultfd_wake_range range = { .len = 0, };
+       unsigned long new_flags;
+-      ACCESS_ONCE(ctx->released) = true;
++      ACCESS_ONCE_RW(ctx->released) = true;
+       if (!mmget_not_zero(mm))
+               goto wakeup;
+diff --git a/fs/utimes.c b/fs/utimes.c
+index ba54b9e..49fc4d8 100644
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -1,6 +1,7 @@
+ #include <linux/compiler.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
++#include <linux/security.h>
+ #include <linux/linkage.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+@@ -90,6 +91,12 @@ static int utimes_common(struct path *path, struct timespec *times)
+               newattrs.ia_valid |= ATTR_TOUCH;
+       }
+ retry_deleg:
++
++      if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
++              error = -EACCES;
++              goto mnt_drop_write_and_out;
++      }
++
+       inode_lock(inode);
+       error = notify_change(path->dentry, &newattrs, &delegated_inode);
+       inode_unlock(inode);
+@@ -99,6 +106,7 @@ retry_deleg:
+                       goto retry_deleg;
+       }
++mnt_drop_write_and_out:
+       mnt_drop_write(path->mnt);
+ out:
+       return error;
+diff --git a/fs/xattr.c b/fs/xattr.c
+index c243905..6f99cc7 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -215,6 +215,27 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
+       return error;
+ }
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ssize_t
++pax_getxattr(struct dentry *dentry, void *value, size_t size)
++{
++      struct inode *inode = dentry->d_inode;
++      ssize_t error;
++
++      error = inode_permission(inode, MAY_EXEC);
++      if (error)
++              return error;
++
++      if (inode->i_op->getxattr)
++              error = inode->i_op->getxattr(dentry, inode, XATTR_NAME_USER_PAX_FLAGS, value, size);
++      else
++              error = -EOPNOTSUPP;
++
++      return error;
++}
++EXPORT_SYMBOL(pax_getxattr);
++#endif
++
+ ssize_t
+ vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+ {
+@@ -307,7 +328,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
+  * Extended attribute SET operations
+  */
+ static long
+-setxattr(struct dentry *d, const char __user *name, const void __user *value,
++setxattr(struct path *path, const char __user *name, const void __user *value,
+        size_t size, int flags)
+ {
+       int error;
+@@ -341,7 +362,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
+                       posix_acl_fix_xattr_from_user(kvalue, size);
+       }
+-      error = vfs_setxattr(d, kname, kvalue, size, flags);
++      if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
++              error = -EACCES;
++              goto out;
++      }
++
++      error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
+ out:
+       kvfree(kvalue);
+@@ -360,7 +386,7 @@ retry:
+               return error;
+       error = mnt_want_write(path.mnt);
+       if (!error) {
+-              error = setxattr(path.dentry, name, value, size, flags);
++              error = setxattr(&path, name, value, size, flags);
+               mnt_drop_write(path.mnt);
+       }
+       path_put(&path);
+@@ -396,7 +422,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
+       audit_file(f.file);
+       error = mnt_want_write_file(f.file);
+       if (!error) {
+-              error = setxattr(f.file->f_path.dentry, name, value, size, flags);
++              error = setxattr(&f.file->f_path, name, value, size, flags);
+               mnt_drop_write_file(f.file);
+       }
+       fdput(f);
+@@ -576,7 +602,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
+  * Extended attribute REMOVE operations
+  */
+ static long
+-removexattr(struct dentry *d, const char __user *name)
++removexattr(struct path *path, const char __user *name)
+ {
+       int error;
+       char kname[XATTR_NAME_MAX + 1];
+@@ -587,7 +613,10 @@ removexattr(struct dentry *d, const char __user *name)
+       if (error < 0)
+               return error;
+-      return vfs_removexattr(d, kname);
++      if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
++              return -EACCES;
++
++      return vfs_removexattr(path->dentry, kname);
+ }
+ static int path_removexattr(const char __user *pathname,
+@@ -601,7 +630,7 @@ retry:
+               return error;
+       error = mnt_want_write(path.mnt);
+       if (!error) {
+-              error = removexattr(path.dentry, name);
++              error = removexattr(&path, name);
+               mnt_drop_write(path.mnt);
+       }
+       path_put(&path);
+@@ -627,14 +656,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
+ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
+ {
+       struct fd f = fdget(fd);
++      struct path *path;
+       int error = -EBADF;
+       if (!f.file)
+               return error;
++      path = &f.file->f_path;
+       audit_file(f.file);
+       error = mnt_want_write_file(f.file);
+       if (!error) {
+-              error = removexattr(f.file->f_path.dentry, name);
++              error = removexattr(path, name);
+               mnt_drop_write_file(f.file);
+       }
+       fdput(f);
+diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
+index 689f746..3e200fc 100644
+--- a/fs/xfs/kmem.h
++++ b/fs/xfs/kmem.h
+@@ -102,6 +102,14 @@ kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
+       return kmem_cache_create(zone_name, size, 0, flags, construct);
+ }
++static inline kmem_zone_t *
++kmem_zone_init_flags_usercopy(int size, char *zone_name, unsigned long flags,
++                            size_t useroffset, size_t usersize,
++                            void (*construct)(void *))
++{
++      return kmem_cache_create_usercopy(zone_name, size, 0, flags, useroffset, usersize, construct);
++}
++
+ static inline void
+ kmem_zone_free(kmem_zone_t *zone, void *ptr)
+ {
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index b060bca..bfd7974 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -559,7 +559,7 @@ xfs_bmap_validate_ret(
+ #else
+ #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)               do { } while (0)
+-#define       xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
++#define       xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)    do { } while (0)
+ #endif /* DEBUG */
+ /*
+diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
+index f2dc1a9..5677aa5 100644
+--- a/fs/xfs/libxfs/xfs_da_btree.c
++++ b/fs/xfs/libxfs/xfs_da_btree.c
+@@ -2011,6 +2011,7 @@ xfs_da_grow_inode_int(
+       struct xfs_inode        *dp = args->dp;
+       int                     w = args->whichfork;
+       xfs_rfsblock_t          nblks = dp->i_d.di_nblocks;
++      xfs_rfsblock_t          nblocks;
+       struct xfs_bmbt_irec    map, *mapp;
+       int                     nmap, error, got, i, mapi;
+@@ -2079,7 +2080,8 @@ xfs_da_grow_inode_int(
+       }
+       /* account for newly allocated blocks in reserved blocks total */
+-      args->total -= dp->i_d.di_nblocks - nblks;
++      nblocks = dp->i_d.di_nblocks - nblks;
++      args->total -= nblocks;
+ out_free_map:
+       if (mapp != &map)
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 96a70fd..de3d84c 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -121,7 +121,7 @@ xfs_find_handle(
+       }
+       error = -EFAULT;
+-      if (copy_to_user(hreq->ohandle, &handle, hsize) ||
++      if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
+           copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
+               goto out_put;
+@@ -1590,6 +1590,12 @@ xfs_ioc_swapext(
+               goto out_put_tmp_file;
+       }
++      if (f.file->f_op != &xfs_file_operations ||
++          tmp.file->f_op != &xfs_file_operations) {
++              error = -EINVAL;
++              goto out_put_tmp_file;
++      }
++
+       ip = XFS_I(file_inode(f.file));
+       tip = XFS_I(file_inode(tmp.file));
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index b8d64d5..3e87626 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -218,7 +218,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
+  * of the compiler which do not like us using do_div in the middle
+  * of large functions.
+  */
+-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
++static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
+ {
+       __u32   mod;
+@@ -274,7 +274,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
+       return 0;
+ }
+ #else
+-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
++static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
+ {
+       __u32   mod;
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index fd6be45..6be6542 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1761,9 +1761,11 @@ xfs_init_zones(void)
+               goto out_destroy_efd_zone;
+       xfs_inode_zone =
+-              kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
+-                      KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
+-                      KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
++              kmem_zone_init_flags_usercopy(sizeof(xfs_inode_t), "xfs_inode",
++                      KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD | KM_ZONE_ACCOUNT,
++                      offsetof(xfs_inode_t, i_df.if_u2.if_inline_data),
++                      sizeof(((xfs_inode_t *)0)->i_df.if_u2.if_inline_data),
++                      xfs_fs_inode_init_once);
+       if (!xfs_inode_zone)
+               goto out_destroy_efi_zone;
+diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
+new file mode 100644
+index 0000000..821601d
+--- /dev/null
++++ b/grsecurity/Kconfig
+@@ -0,0 +1,1205 @@
++#
++# grecurity configuration
++#
++menu "Memory Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_KMEM
++      bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
++      default y if GRKERNSEC_CONFIG_AUTO
++      select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
++      help
++        If you say Y here, /dev/kmem and /dev/mem won't be allowed to
++        be written to or read from to modify or leak the contents of the running
++        kernel.  /dev/port will also not be allowed to be opened, writing to
++        /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
++        If you have module support disabled, enabling this will close up several
++        ways that are currently used to insert malicious code into the running
++        kernel.
++
++        Even with this feature enabled, we still highly recommend that
++        you use the RBAC system, as it is still possible for an attacker to
++        modify the running kernel through other more obscure methods.
++
++        Enabling this feature will prevent the "cpupower" and "powertop" tools
++        from working and excludes debugfs from being compiled into the kernel.
++
++        It is highly recommended that you say Y here if you meet all the
++        conditions above.
++
++config GRKERNSEC_VM86
++      bool "Restrict VM86 mode"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++      depends on X86_32
++
++      help
++        If you say Y here, only processes with CAP_SYS_RAWIO will be able to
++        make use of a special execution mode on 32bit x86 processors called
++        Virtual 8086 (VM86) mode.  XFree86 may need vm86 mode for certain
++        video cards and will still work with this option enabled.  The purpose
++        of the option is to prevent exploitation of emulation errors in
++        virtualization of vm86 mode like the one discovered in VMWare in 2009.
++        Nearly all users should be able to enable this option.
++
++config GRKERNSEC_IO
++      bool "Disable privileged I/O"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++      depends on X86
++      select RTC_CLASS
++      select RTC_INTF_DEV
++      select RTC_DRV_CMOS
++
++      help
++        If you say Y here, all ioperm and iopl calls will return an error.
++        Ioperm and iopl can be used to modify the running kernel.
++        Unfortunately, some programs need this access to operate properly,
++        the most notable of which are XFree86 and hwclock.  hwclock can be
++        remedied by having RTC support in the kernel, so real-time 
++        clock support is enabled if this option is enabled, to ensure 
++        that hwclock operates correctly.  If hwclock still does not work,
++        either update udev or symlink /dev/rtc to /dev/rtc0.
++
++        If you're using XFree86 or a version of Xorg from 2012 or earlier,
++        you may not be able to boot into a graphical environment with this
++        option enabled.  In this case, you should use the RBAC system instead.
++
++config GRKERNSEC_BPF_HARDEN
++      bool "Harden BPF interpreter"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        Unlike previous versions of grsecurity that hardened both the BPF
++        interpreted code against corruption at rest as well as the JIT code
++        against JIT-spray attacks and attacker-controlled immediate values
++        for ROP, this feature will enforce disabling of the new eBPF JIT engine
++        and will ensure the interpreted code is read-only at rest.  This feature
++        may be removed at a later time when eBPF stabilizes to entirely revert
++        back to the more secure pre-3.16 BPF interpreter/JIT.
++
++        If you're using KERNEXEC, it's recommended that you enable this option
++        to supplement the hardening of the kernel.
++  
++config GRKERNSEC_PERF_HARDEN
++      bool "Disable unprivileged PERF_EVENTS usage by default"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PERF_EVENTS
++      help
++        If you say Y here, the range of acceptable values for the
++        /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
++        default to a new value: 3.  When the sysctl is set to this value, no
++        unprivileged use of the PERF_EVENTS syscall interface will be permitted.
++
++        Though PERF_EVENTS can be used legitimately for performance monitoring
++        and low-level application profiling, it is forced on regardless of
++        configuration, has been at fault for several vulnerabilities, and
++        creates new opportunities for side channels and other information leaks.
++
++        This feature puts PERF_EVENTS into a secure default state and permits
++        the administrator to change out of it temporarily if unprivileged
++        application profiling is needed.
++
++config GRKERNSEC_RAND_THREADSTACK
++      bool "Insert random gaps between thread stacks"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_RANDMMAP && !PPC
++      help
++        If you say Y here, a random-sized gap will be enforced between allocated
++        thread stacks.  Glibc's NPTL and other threading libraries that
++        pass MAP_STACK to the kernel for thread stack allocation are supported.
++        The implementation currently provides 8 bits of entropy for the gap.
++
++        Many distributions do not compile threaded remote services with the
++        -fstack-check argument to GCC, causing the variable-sized stack-based
++        allocator, alloca(), to not probe the stack on allocation.  This
++        permits an unbounded alloca() to skip over any guard page and potentially
++        modify another thread's stack reliably.  An enforced random gap
++        reduces the reliability of such an attack and increases the chance
++        that such a read/write to another thread's stack instead lands in
++        an unmapped area, causing a crash and triggering grsecurity's
++        anti-bruteforcing logic.
++
++config GRKERNSEC_PROC_MEMMAP
++      bool "Harden ASLR against information leaks and entropy reduction"
++      default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
++      depends on PAX_NOEXEC || PAX_ASLR
++      help
++        If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
++        give no information about the addresses of its mappings if
++        PaX features that rely on random addresses are enabled on the task.
++        In addition to sanitizing this information and disabling other
++        dangerous sources of information, this option causes reads of sensitive
++        /proc/<pid> entries where the file descriptor was opened in a different
++        task than the one performing the read.  Such attempts are logged.
++        This option also limits argv/env strings for suid/sgid binaries
++        to 512KB to prevent a complete exhaustion of the stack entropy provided
++        by ASLR.  Finally, it places an 8MB stack resource limit on suid/sgid
++        binaries to prevent alternative mmap layouts from being abused.
++
++        If you use PaX it is essential that you say Y here as it closes up
++        several holes that make full ASLR useless locally.
++
++
++config GRKERNSEC_KSTACKOVERFLOW
++      bool "Prevent kernel stack overflows"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on X86_64
++      help
++        If you say Y here, the kernel's process stacks will be allocated
++        with vmalloc instead of the kernel's default allocator.  This
++        introduces guard pages that in combination with the alloca checking
++        of the STACKLEAK feature and removal of thread_info from the kernel
++        stack prevents all forms of kernel process stack overflow abuse.
++          Note that this is different from kernel stack buffer overflows.
++
++config GRKERNSEC_BRUTE
++      bool "Deter exploit bruteforcing"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, attempts to bruteforce exploits against forking
++        daemons such as apache or sshd, as well as against suid/sgid binaries
++        will be deterred.  When a child of a forking daemon is killed by PaX
++        or crashes due to an illegal instruction or other suspicious signal,
++        the parent process will be delayed 30 seconds upon every subsequent
++        fork until the administrator is able to assess the situation and
++        restart the daemon.
++        In the suid/sgid case, the attempt is logged, the user has all their
++        existing instances of the suid/sgid binary terminated and will
++        be unable to execute any suid/sgid binaries for 15 minutes.
++
++        It is recommended that you also enable signal logging in the auditing
++        section so that logs are generated when a process triggers a suspicious
++        signal.
++        If the sysctl option is enabled, a sysctl option with name
++        "deter_bruteforce" is created.
++
++config GRKERNSEC_MODHARDEN
++      bool "Harden module auto-loading"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on MODULES
++      help
++        If you say Y here, module auto-loading in response to use of some
++        feature implemented by an unloaded module will be restricted to
++        root users.  Enabling this option helps defend against attacks 
++        by unprivileged users who abuse the auto-loading behavior to 
++        cause a vulnerable module to load that is then exploited.
++
++        If this option prevents a legitimate use of auto-loading for a 
++        non-root user, the administrator can execute modprobe manually 
++        with the exact name of the module mentioned in the alert log.
++        Alternatively, the administrator can add the module to the list
++        of modules loaded at boot by modifying init scripts.
++
++        Modification of init scripts will most likely be needed on 
++        Ubuntu servers with encrypted home directory support enabled,
++        as the first non-root user logging in will cause the ecb(aes),
++        ecb(aes)-all, cbc(aes), and cbc(aes)-all  modules to be loaded.
++
++config GRKERNSEC_HIDESYM
++      bool "Hide kernel symbols"
++      default y if GRKERNSEC_CONFIG_AUTO
++      select PAX_USERCOPY_SLABS
++      help
++        If you say Y here, getting information on loaded modules, and
++        displaying all kernel symbols through a syscall will be restricted
++        to users with CAP_SYS_MODULE.  For software compatibility reasons,
++        /proc/kallsyms will be restricted to the root user.  The RBAC
++        system can hide that entry even from root.
++
++        This option also prevents leaking of kernel addresses through
++        several /proc entries.
++
++        Note that this option is only effective provided the following
++        conditions are met:
++        1) The kernel using grsecurity is not precompiled by some distribution
++        2) You have also enabled GRKERNSEC_DMESG
++        3) You are using the RBAC system and hiding other files such as your
++           kernel image and System.map.  Alternatively, enabling this option
++           causes the permissions on /boot, /lib/modules, and the kernel
++           source directory to change at compile time to prevent 
++           reading by non-root users.
++        If the above conditions are met, this option will aid in providing a
++        useful protection against local kernel exploitation of overflows
++        and arbitrary read/write vulnerabilities.
++
++        It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
++        in addition to this feature.
++
++config GRKERNSEC_RANDSTRUCT
++      bool "Randomize layout of sensitive kernel structures"
++      default y if GRKERNSEC_CONFIG_AUTO
++      select GRKERNSEC_HIDESYM
++      select MODVERSIONS if MODULES
++      help
++        If you say Y here, the layouts of a number of sensitive kernel
++        structures (task, fs, cred, etc) and all structures composed entirely
++        of function pointers (aka "ops" structs) will be randomized at compile-time.
++        This can introduce the requirement of an additional infoleak
++        vulnerability for exploits targeting these structure types.
++
++        Enabling this feature will introduce some performance impact, slightly
++        increase memory usage, and prevent the use of forensic tools like
++        Volatility against the system (unless the kernel source tree isn't
++        cleaned after kernel installation).
++
++        The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
++        It remains after a make clean to allow for external modules to be compiled
++        with the existing seed and will be removed by a make mrproper or
++        make distclean.
++
++          Note that the implementation requires gcc 4.6.4. or newer.  You may need
++        to install the supporting headers explicitly in addition to the normal
++        gcc package.
++
++config GRKERNSEC_RANDSTRUCT_PERFORMANCE
++      bool "Use cacheline-aware structure randomization"
++      depends on GRKERNSEC_RANDSTRUCT
++      default y if GRKERNSEC_CONFIG_PRIORITY_PERF
++      help
++        If you say Y here, the RANDSTRUCT randomization will make a best effort
++        at restricting randomization to cacheline-sized groups of elements.  It
++        will further not randomize bitfields in structures.  This reduces the
++        performance hit of RANDSTRUCT at the cost of weakened randomization.
++
++config GRKERNSEC_KERN_LOCKOUT
++      bool "Active kernel exploit response"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on X86 || ARM || PPC || SPARC
++      help
++        If you say Y here, when a PaX alert is triggered due to suspicious
++        activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
++        or an OOPS occurs due to bad memory accesses, instead of just
++        terminating the offending process (and potentially allowing
++        a subsequent exploit from the same user), we will take one of two
++        actions:
++         If the user was root, we will panic the system
++         If the user was non-root, we will log the attempt, terminate
++         all processes owned by the user, then prevent them from creating
++         any new processes until the system is restarted
++        This deters repeated kernel exploitation/bruteforcing attempts
++        and is useful for later forensics.
++
++config GRKERNSEC_OLD_ARM_USERLAND
++      bool "Old ARM userland compatibility"
++      depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
++      help
++        If you say Y here, stubs of executable code to perform such operations
++        as "compare-exchange" will be placed at fixed locations in the ARM vector
++        table.  This is unfortunately needed for old ARM userland meant to run
++        across a wide range of processors.  Without this option enabled,
++        the get_tls and data memory barrier stubs will be emulated by the kernel,
++        which is enough for Linaro userlands or other userlands designed for v6
++        and newer ARM CPUs.  It's recommended that you try without this option enabled
++        first, and only enable it if your userland does not boot (it will likely fail
++        at init time).
++
++endmenu
++menu "Role Based Access Control Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_RBAC_DEBUG
++      bool
++
++config GRKERNSEC_NO_RBAC
++      bool "Disable RBAC system"
++      help
++        If you say Y here, the /dev/grsec device will be removed from the kernel,
++        preventing the RBAC system from being enabled.  You should only say Y
++        here if you have no intention of using the RBAC system, so as to prevent
++        an attacker with root access from misusing the RBAC system to hide files
++        and processes when loadable module support and /dev/[k]mem have been
++        locked down.
++
++config GRKERNSEC_ACL_HIDEKERN
++      bool "Hide kernel processes"
++      help
++        If you say Y here, all kernel threads will be hidden to all
++        processes but those whose subject has the "view hidden processes"
++        flag.
++
++config GRKERNSEC_ACL_MAXTRIES
++      int "Maximum tries before password lockout"
++      default 3
++      help
++        This option enforces the maximum number of times a user can attempt
++        to authorize themselves with the grsecurity RBAC system before being
++        denied the ability to attempt authorization again for a specified time.
++        The lower the number, the harder it will be to brute-force a password.
++
++config GRKERNSEC_ACL_TIMEOUT
++      int "Time to wait after max password tries, in seconds"
++      default 30
++      help
++        This option specifies the time the user must wait after attempting to
++        authorize to the RBAC system with the maximum number of invalid
++        passwords.  The higher the number, the harder it will be to brute-force
++        a password.
++
++endmenu
++menu "Filesystem Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_PROC
++      bool "Proc restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, the permissions of the /proc filesystem
++        will be altered to enhance system security and privacy.  You MUST
++        choose either a user only restriction or a user and group restriction.
++        Depending upon the option you choose, you can either restrict users to
++        see only the processes they themselves run, or choose a group that can
++        view all processes and files normally restricted to root if you choose
++        the "restrict to user only" option.  NOTE: If you're running identd or
++        ntpd as a non-root user, you will have to run it as the group you
++        specify here.
++
++config GRKERNSEC_PROC_USER
++      bool "Restrict /proc to user only"
++      depends on GRKERNSEC_PROC
++      help
++        If you say Y here, non-root users will only be able to view their own
++        processes, and restricts them from viewing network-related information,
++        and viewing kernel symbol and module information.
++
++config GRKERNSEC_PROC_USERGROUP
++      bool "Allow special group"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
++      help
++        If you say Y here, you will be able to select a group that will be
++        able to view all processes and network-related information.  If you've
++        enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
++        remain hidden.  This option is useful if you want to run identd as
++        a non-root user.  The group you select may also be chosen at boot time
++        via "grsec_proc_gid=" on the kernel commandline.
++
++config GRKERNSEC_PROC_GID
++      int "GID for special group"
++      depends on GRKERNSEC_PROC_USERGROUP
++      default 1001
++
++config GRKERNSEC_PROC_ADD
++      bool "Additional restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
++      help
++        If you say Y here, additional restrictions will be placed on
++        /proc that keep normal users from viewing device information and 
++        slabinfo information that could be useful for exploits.
++
++config GRKERNSEC_LINK
++      bool "Linking restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, /tmp race exploits will be prevented, since users
++        will no longer be able to follow symlinks owned by other users in
++        world-writable +t directories (e.g. /tmp), unless the owner of the
++        symlink is the owner of the directory. users will also not be
++        able to hardlink to files they do not own.  If the sysctl option is
++        enabled, a sysctl option with name "linking_restrictions" is created.
++
++config GRKERNSEC_SYMLINKOWN
++      bool "Kernel-enforced SymlinksIfOwnerMatch"
++      default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
++      help
++        Apache's SymlinksIfOwnerMatch option has an inherent race condition
++        that prevents it from being used as a security feature.  As Apache
++        verifies the symlink by performing a stat() against the target of
++        the symlink before it is followed, an attacker can setup a symlink
++        to point to a same-owned file, then replace the symlink with one
++        that targets another user's file just after Apache "validates" the
++        symlink -- a classic TOCTOU race.  If you say Y here, a complete,
++        race-free replacement for Apache's "SymlinksIfOwnerMatch" option
++        will be in place for the group you specify. If the sysctl option
++        is enabled, a sysctl option with name "enforce_symlinksifowner" is
++        created.
++
++config GRKERNSEC_SYMLINKOWN_GID
++      int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
++      depends on GRKERNSEC_SYMLINKOWN
++      default 1006
++      help
++        Setting this GID determines what group kernel-enforced
++        SymlinksIfOwnerMatch will be enabled for.  If the sysctl option
++        is enabled, a sysctl option with name "symlinkown_gid" is created.
++
++config GRKERNSEC_FIFO
++      bool "FIFO restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, users will not be able to write to FIFOs they don't
++        own in world-writable +t directories (e.g. /tmp), unless the owner of
++        the FIFO is the same owner of the directory it's held in.  If the sysctl
++        option is enabled, a sysctl option with name "fifo_restrictions" is
++        created.
++
++config GRKERNSEC_SYSFS_RESTRICT
++      bool "Sysfs/debugfs restriction"
++      default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++      depends on SYSFS
++      help
++        If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
++        any filesystem normally mounted under it (e.g. debugfs) will be
++        mostly accessible only by root.  These filesystems generally provide access
++        to hardware and debug information that isn't appropriate for unprivileged
++        users of the system.  Sysfs and debugfs have also become a large source
++        of new vulnerabilities, ranging from infoleaks to local compromise.
++        There has been very little oversight with an eye toward security involved
++        in adding new exporters of information to these filesystems, so their
++        use is discouraged.
++        For reasons of compatibility, a few directories have been whitelisted
++        for access by non-root users:
++        /sys/fs/selinux
++        /sys/fs/fuse
++        /sys/devices/system/cpu
++
++config GRKERNSEC_ROFS
++      bool "Runtime read-only mount protection"
++      depends on SYSCTL
++      help
++        If you say Y here, a sysctl option with name "romount_protect" will
++        be created.  By setting this option to 1 at runtime, filesystems
++        will be protected in the following ways:
++        * No new writable mounts will be allowed
++        * Existing read-only mounts won't be able to be remounted read/write
++        * Write operations will be denied on all block devices
++        This option acts independently of grsec_lock: once it is set to 1,
++        it cannot be turned off.  Therefore, please be mindful of the resulting
++        behavior if this option is enabled in an init script on a read-only
++        filesystem.
++        Also be aware that as with other root-focused features, GRKERNSEC_KMEM
++        and GRKERNSEC_IO should be enabled and module loading disabled via
++        config or at runtime.
++        This feature is mainly intended for secure embedded systems.
++        
++
++config GRKERNSEC_DEVICE_SIDECHANNEL
++      bool "Eliminate stat/notify-based device sidechannels"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, timing analyses on block or character
++        devices like /dev/ptmx using stat or inotify/dnotify/fanotify
++        will be thwarted for unprivileged users.  If a process without
++        CAP_MKNOD stats such a device, the last access and last modify times
++        will match the device's create time.  No access or modify events
++        will be triggered through inotify/dnotify/fanotify for such devices.
++        This feature will prevent attacks that may at a minimum
++        allow an attacker to determine the administrator's password length.
++
++config GRKERNSEC_CHROOT
++      bool "Chroot jail restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, you will be able to choose several options that will
++        make breaking out of a chrooted jail much more difficult.  If you
++        encounter no software incompatibilities with the following options, it
++        is recommended that you enable each one.
++
++        Note that the chroot restrictions are not intended to apply to "chroots"
++        to directories that are simple bind mounts of the global root filesystem.
++        For several other reasons, a user shouldn't expect any significant
++        security by performing such a chroot.
++
++config GRKERNSEC_CHROOT_MOUNT
++      bool "Deny mounts"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to
++        mount or remount filesystems.  If the sysctl option is enabled, a
++        sysctl option with name "chroot_deny_mount" is created.
++
++config GRKERNSEC_CHROOT_DOUBLE
++      bool "Deny double-chroots"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to chroot
++        again outside the chroot.  This is a widely used method of breaking
++        out of a chroot jail and should not be allowed.  If the sysctl 
++        option is enabled, a sysctl option with name 
++        "chroot_deny_chroot" is created.
++
++config GRKERNSEC_CHROOT_PIVOT
++      bool "Deny pivot_root in chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to use
++        a function called pivot_root() that was introduced in Linux 2.3.41.  It
++        works similar to chroot in that it changes the root filesystem.  This
++        function could be misused in a chrooted process to attempt to break out
++        of the chroot, and therefore should not be allowed.  If the sysctl
++        option is enabled, a sysctl option with name "chroot_deny_pivot" is
++        created.
++
++config GRKERNSEC_CHROOT_CHDIR
++      bool "Enforce chdir(\"/\") on all chroots"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, the current working directory of all newly-chrooted
++        applications will be set to the the root directory of the chroot.
++        The man page on chroot(2) states:
++        Note that this call does not change  the  current  working
++        directory,  so  that `.' can be outside the tree rooted at
++        `/'.  In particular, the  super-user  can  escape  from  a
++        `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
++
++        It is recommended that you say Y here, since it's not known to break
++        any software.  If the sysctl option is enabled, a sysctl option with
++        name "chroot_enforce_chdir" is created.
++
++config GRKERNSEC_CHROOT_CHMOD
++      bool "Deny (f)chmod +s"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to chmod
++        or fchmod files to make them have suid or sgid bits.  This protects
++        against another published method of breaking a chroot.  If the sysctl
++        option is enabled, a sysctl option with name "chroot_deny_chmod" is
++        created.
++
++config GRKERNSEC_CHROOT_FCHDIR
++      bool "Deny fchdir and fhandle out of chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, a well-known method of breaking chroots by fchdir'ing
++        to a file descriptor of the chrooting process that points to a directory
++        outside the filesystem will be stopped.  This option also prevents use of
++        the recently-created syscall for opening files by a guessable "file handle"
++        inside a chroot, as well as accessing relative paths outside of a
++        directory passed in via file descriptor with openat and similar syscalls.
++        If the sysctl option is enabled, a sysctl option with name "chroot_deny_fchdir"
++        is created.
++
++config GRKERNSEC_CHROOT_MKNOD
++      bool "Deny mknod"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be allowed to
++        mknod.  The problem with using mknod inside a chroot is that it
++        would allow an attacker to create a device entry that is the same
++        as one on the physical root of your system, which could range from
++        anything from the console device to a device for your harddrive (which
++        they could then use to wipe the drive or steal data).  It is recommended
++        that you say Y here, unless you run into software incompatibilities.
++        If the sysctl option is enabled, a sysctl option with name
++        "chroot_deny_mknod" is created.
++
++config GRKERNSEC_CHROOT_SHMAT
++      bool "Deny shmat() out of chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to attach
++        to shared memory segments that were created outside of the chroot jail.
++        It is recommended that you say Y here.  If the sysctl option is enabled,
++        a sysctl option with name "chroot_deny_shmat" is created.
++
++config GRKERNSEC_CHROOT_UNIX
++      bool "Deny access to abstract AF_UNIX sockets out of chroot"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to
++        connect to abstract (meaning not belonging to a filesystem) Unix
++        domain sockets that were bound outside of a chroot.  It is recommended
++        that you say Y here.  If the sysctl option is enabled, a sysctl option
++        with name "chroot_deny_unix" is created.
++
++config GRKERNSEC_CHROOT_FINDTASK
++      bool "Protect outside processes"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to
++        kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, 
++        getsid, or view any process outside of the chroot.  If the sysctl
++        option is enabled, a sysctl option with name "chroot_findtask" is
++        created.
++
++config GRKERNSEC_CHROOT_NICE
++      bool "Restrict priority changes"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, processes inside a chroot will not be able to raise
++        the priority of processes in the chroot, or alter the priority of
++        processes outside the chroot.  This provides more security than simply
++        removing CAP_SYS_NICE from the process' capability set.  If the
++        sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
++        is created.
++
++config GRKERNSEC_CHROOT_SYSCTL
++      bool "Deny sysctl writes"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, an attacker in a chroot will not be able to
++        write to sysctl entries, either by sysctl(2) or through a /proc
++        interface.  It is strongly recommended that you say Y here. If the
++        sysctl option is enabled, a sysctl option with name
++        "chroot_deny_sysctl" is created.
++
++config GRKERNSEC_CHROOT_RENAME
++      bool "Deny bad renames"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, an attacker in a chroot will not be able to
++        abuse the ability to create double chroots to break out of the
++        chroot by exploiting a race condition between a rename of a directory
++        within a chroot against an open of a symlink with relative path
++        components.  This feature will likewise prevent an accomplice outside
++        a chroot from enabling a user inside the chroot to break out and make
++        use of their credentials on the global filesystem.  Enabling this
++        feature is essential to prevent root users from breaking out of a
++        chroot. If the sysctl option is enabled, a sysctl option with name
++        "chroot_deny_bad_rename" is created.
++
++config GRKERNSEC_CHROOT_CAPS
++      bool "Capability restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT
++      help
++        If you say Y here, the capabilities on all processes within a
++        chroot jail will be lowered to stop module insertion, raw i/o,
++        system and net admin tasks, rebooting the system, modifying immutable
++        files, modifying IPC owned by another, and changing the system time.
++        This is left an option because it can break some apps.  Disable this
++        if your chrooted apps are having problems performing those kinds of
++        tasks.  If the sysctl option is enabled, a sysctl option with
++        name "chroot_caps" is created.
++
++config GRKERNSEC_CHROOT_INITRD
++      bool "Exempt initrd tasks from restrictions"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
++      help
++        If you say Y here, tasks started prior to init will be exempted from
++        grsecurity's chroot restrictions.  This option is mainly meant to
++        resolve Plymouth's performing privileged operations unnecessarily
++        in a chroot.
++
++endmenu
++menu "Kernel Auditing"
++depends on GRKERNSEC
++
++config GRKERNSEC_AUDIT_GROUP
++      bool "Single group for auditing"
++      help
++        If you say Y here, the exec and chdir logging features will only operate
++        on a group you specify.  This option is recommended if you only want to
++        watch certain users instead of having a large amount of logs from the
++        entire system.  If the sysctl option is enabled, a sysctl option with
++        name "audit_group" is created.
++
++config GRKERNSEC_AUDIT_GID
++      int "GID for auditing"
++      depends on GRKERNSEC_AUDIT_GROUP
++      default 1007
++
++config GRKERNSEC_EXECLOG
++      bool "Exec logging"
++      help
++        If you say Y here, all execve() calls will be logged (since the
++        other exec*() calls are frontends to execve(), all execution
++        will be logged).  Useful for shell-servers that like to keep track
++        of their users.  If the sysctl option is enabled, a sysctl option with
++        name "exec_logging" is created.
++        WARNING: This option when enabled will produce a LOT of logs, especially
++        on an active system.
++
++config GRKERNSEC_RESLOG
++      bool "Resource logging"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, all attempts to overstep resource limits will
++        be logged with the resource name, the requested size, and the current
++        limit.  It is highly recommended that you say Y here.  If the sysctl
++        option is enabled, a sysctl option with name "resource_logging" is
++        created.  If the RBAC system is enabled, the sysctl value is ignored.
++
++config GRKERNSEC_CHROOT_EXECLOG
++      bool "Log execs within chroot"
++      help
++        If you say Y here, all executions inside a chroot jail will be logged
++        to syslog.  This can cause a large amount of logs if certain
++        applications (eg. djb's daemontools) are installed on the system, and
++        is therefore left as an option.  If the sysctl option is enabled, a
++        sysctl option with name "chroot_execlog" is created.
++
++config GRKERNSEC_AUDIT_PTRACE
++      bool "Ptrace logging"
++      help
++        If you say Y here, all attempts to attach to a process via ptrace
++        will be logged.  If the sysctl option is enabled, a sysctl option
++        with name "audit_ptrace" is created.
++
++config GRKERNSEC_AUDIT_CHDIR
++      bool "Chdir logging"
++      help
++        If you say Y here, all chdir() calls will be logged.  If the sysctl
++        option is enabled, a sysctl option with name "audit_chdir" is created.
++
++config GRKERNSEC_AUDIT_MOUNT
++      bool "(Un)Mount logging"
++      help
++        If you say Y here, all mounts and unmounts will be logged.  If the
++        sysctl option is enabled, a sysctl option with name "audit_mount" is
++        created.
++
++config GRKERNSEC_SIGNAL
++      bool "Signal logging"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, certain important signals will be logged, such as
++        SIGSEGV, which will as a result inform you of when a error in a program
++        occurred, which in some cases could mean a possible exploit attempt.
++        If the sysctl option is enabled, a sysctl option with name
++        "signal_logging" is created.
++
++config GRKERNSEC_FORKFAIL
++      bool "Fork failure logging"
++      help
++        If you say Y here, all failed fork() attempts will be logged.
++        This could suggest a fork bomb, or someone attempting to overstep
++        their process limit.  If the sysctl option is enabled, a sysctl option
++        with name "forkfail_logging" is created.
++
++config GRKERNSEC_TIME
++      bool "Time change logging"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, any changes of the system clock will be logged.
++        If the sysctl option is enabled, a sysctl option with name
++        "timechange_logging" is created.
++
++config GRKERNSEC_PROC_IPADDR
++      bool "/proc/<pid>/ipaddr support"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, a new entry will be added to each /proc/<pid>
++        directory that contains the IP address of the person using the task.
++        The IP is carried across local TCP and AF_UNIX stream sockets.
++        This information can be useful for IDS/IPSes to perform remote response
++        to a local attack.  The entry is readable by only the owner of the
++        process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
++        the RBAC system), and thus does not create privacy concerns.
++
++config GRKERNSEC_RWXMAP_LOG
++      bool 'Denied RWX mmap/mprotect logging'
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
++      help
++        If you say Y here, calls to mmap() and mprotect() with explicit
++        usage of PROT_WRITE and PROT_EXEC together will be logged when
++        denied by the PAX_MPROTECT feature.  This feature will also
++        log other problematic scenarios that can occur when PAX_MPROTECT
++        is enabled on a binary, like textrels and PT_GNU_STACK.  If the 
++          sysctl option is enabled, a sysctl option with name "rwxmap_logging"
++        is created.
++
++endmenu
++
++menu "Executable Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_DMESG
++      bool "Dmesg(8) restriction"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, non-root users will not be able to use dmesg(8)
++        to view the contents of the kernel's circular log buffer.
++        The kernel's log buffer often contains kernel addresses and other
++        identifying information useful to an attacker in fingerprinting a
++        system for a targeted exploit.
++        If the sysctl option is enabled, a sysctl option with name "dmesg" is
++        created.
++
++config GRKERNSEC_HARDEN_PTRACE
++      bool "Deter ptrace-based process snooping"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, TTY sniffers and other malicious monitoring
++        programs implemented through ptrace will be defeated.  If you
++        have been using the RBAC system, this option has already been
++        enabled for several years for all users, with the ability to make
++        fine-grained exceptions.
++
++        This option only affects the ability of non-root users to ptrace
++        processes that are not a descendent of the ptracing process.
++        This means that strace ./binary and gdb ./binary will still work,
++        but attaching to arbitrary processes will not.  If the sysctl
++        option is enabled, a sysctl option with name "harden_ptrace" is
++        created.
++
++config GRKERNSEC_PTRACE_READEXEC
++      bool "Require read access to ptrace sensitive binaries"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, unprivileged users will not be able to ptrace unreadable
++        binaries.  This option is useful in environments that
++        remove the read bits (e.g. file mode 4711) from suid binaries to
++        prevent infoleaking of their contents.  This option adds
++        consistency to the use of that file mode, as the binary could normally
++        be read out when run without privileges while ptracing.
++
++        If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
++        is created.
++
++config GRKERNSEC_SETXID
++      bool "Enforce consistent multithreaded privileges"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on (X86 || SPARC64 || PPC || ARM || MIPS)
++      help
++        If you say Y here, a change from a root uid to a non-root uid
++        in a multithreaded application will cause the resulting uids,
++        gids, supplementary groups, and capabilities in that thread
++        to be propagated to the other threads of the process.  In most
++        cases this is unnecessary, as glibc will emulate this behavior
++        on behalf of the application.  Other libcs do not act in the
++        same way, allowing the other threads of the process to continue
++        running with root privileges.  If the sysctl option is enabled,
++        a sysctl option with name "consistent_setxid" is created.
++
++config GRKERNSEC_HARDEN_IPC
++      bool "Disallow access to overly-permissive IPC objects"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on SYSVIPC
++      help
++        If you say Y here, access to overly-permissive IPC objects (shared
++        memory, message queues, and semaphores) will be denied for processes
++        given the following criteria beyond normal permission checks:
++        1) If the IPC object is world-accessible and the euid doesn't match
++           that of the creator or current uid for the IPC object
++        2) If the IPC object is group-accessible and the egid doesn't
++           match that of the creator or current gid for the IPC object
++        It's a common error to grant too much permission to these objects,
++        with impact ranging from denial of service and information leaking to
++        privilege escalation.  This feature was developed in response to
++        research by Tim Brown:
++        http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
++        who found hundreds of such insecure usages.  Processes with
++        CAP_IPC_OWNER are still permitted to access these IPC objects.
++        If the sysctl option is enabled, a sysctl option with name
++        "harden_ipc" is created.
++
++config GRKERNSEC_HARDEN_TTY
++      bool "Disallow unprivileged use of command injection"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, the ability to use the TIOCSTI ioctl for
++        terminal command injection will be denied for unprivileged users.
++        There are very few legitimate uses for this functionality and it
++        has made vulnerabilities in several 'su'-like programs possible in
++        the past.  Even without these vulnerabilities, it provides an
++        attacker with an easy mechanism to move laterally among other
++        processes within the same user's compromised session.
++        By default, Linux allows unprivileged use of command injection as
++        long as the injection is being performed into the same tty session.
++        This feature makes that case the same as attempting to inject into
++        another session, making any TIOCSTI use require CAP_SYS_ADMIN.
++        If the sysctl option is enabled, a sysctl option with name
++        "harden_tty" is created.
++
++config GRKERNSEC_TPE
++      bool "Trusted Path Execution (TPE)"
++      default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
++      help
++        If you say Y here, you will be able to choose a gid to add to the
++        supplementary groups of users you want to mark as "untrusted."
++        These users will not be able to execute any files that are not in
++        root-owned directories writable only by root.  If the sysctl option
++        is enabled, a sysctl option with name "tpe" is created.
++
++config GRKERNSEC_TPE_ALL
++      bool "Partially restrict all non-root users"
++      depends on GRKERNSEC_TPE
++      help
++        If you say Y here, all non-root users will be covered under
++        a weaker TPE restriction.  This is separate from, and in addition to,
++        the main TPE options that you have selected elsewhere.  Thus, if a
++        "trusted" GID is chosen, this restriction applies to even that GID.
++        Under this restriction, all non-root users will only be allowed to
++        execute files in directories they own that are not group or
++        world-writable, or in directories owned by root and writable only by
++        root.  If the sysctl option is enabled, a sysctl option with name
++        "tpe_restrict_all" is created.
++
++config GRKERNSEC_TPE_INVERT
++      bool "Invert GID option"
++      depends on GRKERNSEC_TPE
++      help
++        If you say Y here, the group you specify in the TPE configuration will
++        decide what group TPE restrictions will be *disabled* for.  This
++        option is useful if you want TPE restrictions to be applied to most
++        users on the system.  If the sysctl option is enabled, a sysctl option
++        with name "tpe_invert" is created.  Unlike other sysctl options, this
++        entry will default to on for backward-compatibility.
++
++config GRKERNSEC_TPE_GID
++      int
++      default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
++      default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
++      
++config GRKERNSEC_TPE_UNTRUSTED_GID
++      int "GID for TPE-untrusted users"
++      depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
++      default 1005
++      help
++        Setting this GID determines what group TPE restrictions will be
++        *enabled* for.  If the sysctl option is enabled, a sysctl option
++        with name "tpe_gid" is created.
++
++config GRKERNSEC_TPE_TRUSTED_GID
++      int "GID for TPE-trusted users"
++      depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
++      default 1005
++      help
++        Setting this GID determines what group TPE restrictions will be
++        *disabled* for.  If the sysctl option is enabled, a sysctl option
++        with name "tpe_gid" is created.
++
++endmenu
++menu "Network Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_BLACKHOLE
++      bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on NET
++      help
++        If you say Y here, neither TCP resets nor ICMP
++        destination-unreachable packets will be sent in response to packets
++        sent to ports for which no associated listening process exists.
++        It will also prevent the sending of ICMP protocol unreachable packets
++        in response to packets with unknown protocols.
++        This feature supports both IPV4 and IPV6 and exempts the 
++        loopback interface from blackholing.  Enabling this feature 
++        makes a host more resilient to DoS attacks and reduces network
++        visibility against scanners.
++
++        The blackhole feature as-implemented is equivalent to the FreeBSD
++        blackhole feature, as it prevents RST responses to all packets, not
++        just SYNs.  Under most application behavior this causes no
++        problems, but applications (like haproxy) may not close certain
++        connections in a way that cleanly terminates them on the remote
++        end, leaving the remote host in LAST_ACK state.  Because of this
++        side-effect and to prevent intentional LAST_ACK DoSes, this
++        feature also adds automatic mitigation against such attacks.
++        The mitigation drastically reduces the amount of time a socket
++        can spend in LAST_ACK state.  If you're using haproxy and not
++        all servers it connects to have this option enabled, consider
++        disabling this feature on the haproxy host.
++
++        If the sysctl option is enabled, two sysctl options with names
++        "ip_blackhole" and "lastack_retries" will be created.
++        While "ip_blackhole" takes the standard zero/non-zero on/off
++        toggle, "lastack_retries" uses the same kinds of values as
++        "tcp_retries1" and "tcp_retries2".  The default value of 4
++        prevents a socket from lasting more than 45 seconds in LAST_ACK
++        state.
++
++config GRKERNSEC_NO_SIMULT_CONNECT
++      bool "Disable TCP Simultaneous Connect"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on NET
++      help
++        If you say Y here, a feature by Willy Tarreau will be enabled that
++        removes a weakness in Linux's strict implementation of TCP that
++        allows two clients to connect to each other without either entering
++        a listening state.  The weakness allows an attacker to easily prevent
++        a client from connecting to a known server provided the source port
++        for the connection is guessed correctly.
++
++        As the weakness could be used to prevent an antivirus or IPS from
++        fetching updates, or prevent an SSL gateway from fetching a CRL,
++        it should be eliminated by enabling this option.  Though Linux is
++        one of few operating systems supporting simultaneous connect, it
++        has no legitimate use in practice and is rarely supported by firewalls.
++      
++config GRKERNSEC_SOCKET
++      bool "Socket restrictions"
++      depends on NET
++      help
++        If you say Y here, you will be able to choose from several options.
++        If you assign a GID on your system and add it to the supplementary
++        groups of users you want to restrict socket access to, this patch
++        will perform up to three things, based on the option(s) you choose.
++
++config GRKERNSEC_SOCKET_ALL
++      bool "Deny any sockets to group"
++      depends on GRKERNSEC_SOCKET
++      help
++        If you say Y here, you will be able to choose a GID of whose users will
++        be unable to connect to other hosts from your machine or run server
++        applications from your machine.  If the sysctl option is enabled, a
++        sysctl option with name "socket_all" is created.
++
++config GRKERNSEC_SOCKET_ALL_GID
++      int "GID to deny all sockets for"
++      depends on GRKERNSEC_SOCKET_ALL
++      default 1004
++      help
++        Here you can choose the GID to disable socket access for. Remember to
++        add the users you want socket access disabled for to the GID
++        specified here.  If the sysctl option is enabled, a sysctl option
++        with name "socket_all_gid" is created.
++
++config GRKERNSEC_SOCKET_CLIENT
++      bool "Deny client sockets to group"
++      depends on GRKERNSEC_SOCKET
++      help
++        If you say Y here, you will be able to choose a GID of whose users will
++        be unable to connect to other hosts from your machine, but will be
++        able to run servers.  If this option is enabled, all users in the group
++        you specify will have to use passive mode when initiating ftp transfers
++        from the shell on your machine.  If the sysctl option is enabled, a
++        sysctl option with name "socket_client" is created.
++
++config GRKERNSEC_SOCKET_CLIENT_GID
++      int "GID to deny client sockets for"
++      depends on GRKERNSEC_SOCKET_CLIENT
++      default 1003
++      help
++        Here you can choose the GID to disable client socket access for.
++        Remember to add the users you want client socket access disabled for to
++        the GID specified here.  If the sysctl option is enabled, a sysctl
++        option with name "socket_client_gid" is created.
++
++config GRKERNSEC_SOCKET_SERVER
++      bool "Deny server sockets to group"
++      depends on GRKERNSEC_SOCKET
++      help
++        If you say Y here, you will be able to choose a GID of whose users will
++        be unable to run server applications from your machine.  If the sysctl
++        option is enabled, a sysctl option with name "socket_server" is created.
++
++config GRKERNSEC_SOCKET_SERVER_GID
++      int "GID to deny server sockets for"
++      depends on GRKERNSEC_SOCKET_SERVER
++      default 1002
++      help
++        Here you can choose the GID to disable server socket access for.
++        Remember to add the users you want server socket access disabled for to
++        the GID specified here.  If the sysctl option is enabled, a sysctl
++        option with name "socket_server_gid" is created.
++
++endmenu
++
++menu "Physical Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_DENYUSB
++      bool "Deny new USB connections after toggle"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on SYSCTL && USB_SUPPORT
++      help
++        If you say Y here, a new sysctl option with name "deny_new_usb"
++        will be created.  Setting its value to 1 will prevent any new
++        USB devices from being recognized by the OS.  Any attempted USB
++        device insertion will be logged.  This option is intended to be
++        used against custom USB devices designed to exploit vulnerabilities
++        in various USB device drivers.
++
++        For greatest effectiveness, this sysctl should be set after any
++        relevant init scripts.  This option is safe to enable in distros
++        as each user can choose whether or not to toggle the sysctl.
++
++config GRKERNSEC_DENYUSB_FORCE
++      bool "Reject all USB devices not connected at boot"
++      select USB
++      depends on GRKERNSEC_DENYUSB
++      help
++        If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
++        that doesn't involve a sysctl entry.  This option should only be
++        enabled if you're sure you want to deny all new USB connections
++        at runtime and don't want to modify init scripts.  This should not
++        be enabled by distros.  It forces the core USB code to be built
++        into the kernel image so that all devices connected at boot time
++        can be recognized and new USB device connections can be prevented
++        prior to init running.
++
++endmenu
++
++menu "Sysctl Support"
++depends on GRKERNSEC && SYSCTL
++
++config GRKERNSEC_SYSCTL
++      bool "Sysctl support"
++      default y if GRKERNSEC_CONFIG_AUTO
++      help
++        If you say Y here, you will be able to change the options that
++        grsecurity runs with at bootup, without having to recompile your
++        kernel.  You can echo values to files in /proc/sys/kernel/grsecurity
++        to enable (1) or disable (0) various features.  All the sysctl entries
++        are mutable until the "grsec_lock" entry is set to a non-zero value.
++        All features enabled in the kernel configuration are disabled at boot
++        if you do not say Y to the "Turn on features by default" option.
++        All options should be set at startup, and the grsec_lock entry should
++        be set to a non-zero value after all the options are set.
++        *THIS IS EXTREMELY IMPORTANT*
++
++config GRKERNSEC_SYSCTL_DISTRO
++      bool "Extra sysctl support for distro makers (READ HELP)"
++      depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
++      help
++        If you say Y here, additional sysctl options will be created
++        for features that affect processes running as root.  Therefore,
++        it is critical when using this option that the grsec_lock entry be
++        enabled after boot.  Only distros with prebuilt kernel packages
++        with this option enabled that can ensure grsec_lock is enabled
++        after boot should use this option.
++        *Failure to set grsec_lock after boot makes all grsec features
++        this option covers useless*
++
++        Currently this option creates the following sysctl entries:
++        "Disable Privileged I/O": "disable_priv_io"   
++
++config GRKERNSEC_SYSCTL_ON
++      bool "Turn on features by default"
++      default y if GRKERNSEC_CONFIG_AUTO
++      depends on GRKERNSEC_SYSCTL
++      help
++        If you say Y here, instead of having all features enabled in the
++        kernel configuration disabled at boot time, the features will be
++        enabled at boot time.  It is recommended you say Y here unless
++        there is some reason you would want all sysctl-tunable features to
++        be disabled by default.  As mentioned elsewhere, it is important
++        to enable the grsec_lock entry once you have finished modifying
++        the sysctl entries.
++
++endmenu
++menu "Logging Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_FLOODTIME
++      int "Seconds in between log messages (minimum)"
++      default 10
++      help
++        This option allows you to enforce the number of seconds between
++        grsecurity log messages.  The default should be suitable for most
++        people, however, if you choose to change it, choose a value small enough
++        to allow informative logs to be produced, but large enough to
++        prevent flooding.
++
++        Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
++        any rate limiting on grsecurity log messages.
++
++config GRKERNSEC_FLOODBURST
++      int "Number of messages in a burst (maximum)"
++      default 6
++      help
++        This option allows you to choose the maximum number of messages allowed
++        within the flood time interval you chose in a separate option.  The
++        default should be suitable for most people, however if you find that
++        many of your logs are being interpreted as flooding, you may want to
++        raise this value.
++
++        Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
++        any rate limiting on grsecurity log messages.
++
++endmenu
+diff --git a/grsecurity/Makefile b/grsecurity/Makefile
+new file mode 100644
+index 0000000..e136e5f
+--- /dev/null
++++ b/grsecurity/Makefile
+@@ -0,0 +1,54 @@
++# grsecurity - access control and security hardening for Linux
++# All code in this directory and various hooks located throughout the Linux kernel are
++# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
++# http://www.grsecurity.net spender@grsecurity.net
++#
++# This program is free software; you can redistribute it and/or
++# modify it under the terms of the GNU General Public License version 2
++# as published by the Free Software Foundation.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
++
++KBUILD_CFLAGS += -Werror
++
++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
++      grsec_mount.o grsec_sig.o grsec_sysctl.o \
++      grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
++      grsec_usb.o grsec_ipc.o grsec_proc.o grsec_tty.o
++
++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
++      gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
++      gracl_learn.o grsec_log.o gracl_policy.o
++ifdef CONFIG_COMPAT
++obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
++endif
++
++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
++
++ifdef CONFIG_NET
++obj-y += grsec_sock.o
++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
++endif
++
++ifndef CONFIG_GRKERNSEC
++obj-y += grsec_disabled.o
++endif
++
++ifdef CONFIG_GRKERNSEC_HIDESYM
++extra-y := grsec_hidesym.o
++$(obj)/grsec_hidesym.o:
++      @-chmod -f 500 /boot
++      @-chmod -f 500 /lib/modules
++      @-chmod -f 500 /lib64/modules
++      @-chmod -f 500 /lib32/modules
++      @-chmod -f 700 .
++      @-chmod -f 700 $(objtree)
++      @echo '  grsec: protected kernel image paths'
++endif
+diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
+new file mode 100644
+index 0000000..1bbe70e
+--- /dev/null
++++ b/grsecurity/gracl.c
+@@ -0,0 +1,2773 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/lglock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/sysctl.h>
++#include <linux/netdevice.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/security.h>
++#include <linux/grinternal.h>
++#include <linux/pid_namespace.h>
++#include <linux/stop_machine.h>
++#include <linux/fdtable.h>
++#include <linux/percpu.h>
++#include <linux/lglock.h>
++#include <linux/hugetlb.h>
++#include <linux/posix-timers.h>
++#include <linux/prefetch.h>
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++#include <linux/magic.h>
++#include <linux/pagemap.h>
++#include "../fs/btrfs/async-thread.h"
++#include "../fs/btrfs/ctree.h"
++#include "../fs/btrfs/btrfs_inode.h"
++#endif
++#include "../fs/mount.h"
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++#define FOR_EACH_ROLE_START(role) \
++      role = running_polstate.role_list; \
++      while (role) {
++
++#define FOR_EACH_ROLE_END(role) \
++              role = role->prev; \
++      }
++
++extern struct path gr_real_root;
++
++static struct gr_policy_state running_polstate;
++struct gr_policy_state *polstate = &running_polstate;
++extern struct gr_alloc_state *current_alloc_state;
++
++extern char *gr_shared_page[4];
++DEFINE_RWLOCK(gr_inode_lock);
++
++static unsigned int gr_status __read_only = GR_STATUS_INIT;
++
++#ifdef CONFIG_NET
++extern struct vfsmount *sock_mnt;
++#endif
++
++extern struct vfsmount *pipe_mnt;
++extern struct vfsmount *shm_mnt;
++
++#ifdef CONFIG_HUGETLBFS
++extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
++#endif
++
++extern u16 acl_sp_role_value;
++extern struct acl_object_label *fakefs_obj_rw;
++extern struct acl_object_label *fakefs_obj_rwx;
++
++int gr_acl_is_enabled(void)
++{
++      return (gr_status & GR_READY);
++}
++
++void gr_enable_rbac_system(void)
++{
++      pax_open_kernel();
++      gr_status |= GR_READY;
++      pax_close_kernel();
++}
++
++int gr_rbac_disable(void *unused)
++{
++      pax_open_kernel();
++      gr_status &= ~GR_READY;
++      pax_close_kernel();
++
++      return 0;
++}
++
++static inline dev_t __get_dev(const struct dentry *dentry)
++{
++      struct dentry *ldentry = d_backing_dentry((struct dentry *)dentry);
++
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++      if (ldentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
++              return BTRFS_I(d_inode(ldentry))->root->anon_dev;
++      else
++#endif
++              return d_inode(ldentry)->i_sb->s_dev;
++}
++
++static inline u64 __get_ino(const struct dentry *dentry)
++{
++      struct dentry *ldentry = d_backing_dentry((struct dentry *)dentry);
++
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++      if (ldentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
++              return btrfs_ino(d_inode(dentry));
++      else
++#endif
++              return d_inode(ldentry)->i_ino;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++      return __get_dev(dentry);
++}
++
++u64 gr_get_ino_from_dentry(struct dentry *dentry)
++{
++      return __get_ino(dentry);
++}
++
++static char gr_task_roletype_to_char(struct task_struct *task)
++{
++      switch (task->role->roletype &
++              (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
++               GR_ROLE_SPECIAL)) {
++      case GR_ROLE_DEFAULT:
++              return 'D';
++      case GR_ROLE_USER:
++              return 'U';
++      case GR_ROLE_GROUP:
++              return 'G';
++      case GR_ROLE_SPECIAL:
++              return 'S';
++      }
++
++      return 'X';
++}
++
++char gr_roletype_to_char(void)
++{
++      return gr_task_roletype_to_char(current);
++}
++
++int
++gr_acl_tpe_check(void)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++      if (current->role->roletype & GR_ROLE_TPE)
++              return 1;
++      else
++              return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
++          grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++          !capable(CAP_SYS_RAWIO))
++              return 1;
++#endif
++      return 0;
++}
++
++int
++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
++{
++      if (likely(lena != lenb))
++              return 0;
++
++      return !memcmp(a, b, lena);
++}
++
++static int prepend(char **buffer, int *buflen, const char *str, int namelen)
++{
++      *buflen -= namelen;
++      if (*buflen < 0)
++              return -ENAMETOOLONG;
++      *buffer -= namelen;
++      memcpy(*buffer, str, namelen);
++      return 0;
++}
++
++static int prepend_name(char **buffer, int *buflen, struct qstr *name)
++{
++      return prepend(buffer, buflen, (const char *)name->name, name->len);
++}
++
++static int prepend_path(const struct path *path, struct path *root,
++                      char **buffer, int *buflen)
++{
++      struct dentry *dentry = path->dentry;
++      struct vfsmount *vfsmnt = path->mnt;
++      struct mount *mnt = real_mount(vfsmnt);
++      bool slash = false;
++      int error = 0;
++
++      while (dentry != root->dentry || vfsmnt != root->mnt) {
++              struct dentry * parent;
++
++              if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
++                      /* Global root? */
++                      if (!mnt_has_parent(mnt)) {
++                              goto out;
++                      }
++                      dentry = mnt->mnt_mountpoint;
++                      mnt = mnt->mnt_parent;
++                      vfsmnt = &mnt->mnt;
++                      continue;
++              }
++              parent = dentry->d_parent;
++              prefetch(parent);
++              spin_lock(&dentry->d_lock);
++              error = prepend_name(buffer, buflen, &dentry->d_name);
++              spin_unlock(&dentry->d_lock);
++              if (!error)
++                      error = prepend(buffer, buflen, "/", 1);
++              if (error)
++                      break;
++
++              slash = true;
++              dentry = parent;
++      }
++
++out:
++      if (!error && !slash)
++              error = prepend(buffer, buflen, "/", 1);
++
++      return error;
++}
++
++/* this must be called with mount_lock and rename_lock held */
++
++static char *__our_d_path(const struct path *path, struct path *root,
++                      char *buf, int buflen)
++{
++      char *res = buf + buflen;
++      int error;
++
++      prepend(&res, &buflen, "\0", 1);
++      error = prepend_path(path, root, &res, &buflen);
++      if (error)
++              return ERR_PTR(error);
++
++      return res;
++}
++
++static char *
++gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
++{
++      char *retval;
++
++      retval = __our_d_path(path, root, buf, buflen);
++      if (unlikely(IS_ERR(retval)))
++              retval = strcpy(buf, "<path too long>");
++      else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
++              retval[1] = '\0';
++
++      return retval;
++}
++
++static char *
++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++              char *buf, int buflen)
++{
++      struct path path;
++      char *res;
++
++      path.dentry = (struct dentry *)dentry;
++      path.mnt = (struct vfsmount *)vfsmnt;
++
++      /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
++         by the RBAC system */
++      res = gen_full_path(&path, &gr_real_root, buf, buflen);
++
++      return res;
++}
++
++static char *
++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++          char *buf, int buflen)
++{
++      char *res;
++      struct path path;
++      struct path root;
++      struct task_struct *reaper = init_pid_ns.child_reaper;
++
++      path.dentry = (struct dentry *)dentry;
++      path.mnt = (struct vfsmount *)vfsmnt;
++
++      /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
++      get_fs_root(reaper->fs, &root);
++
++      read_seqlock_excl(&mount_lock);
++      write_seqlock(&rename_lock);
++      res = gen_full_path(&path, &root, buf, buflen);
++      write_sequnlock(&rename_lock);
++      read_sequnlock_excl(&mount_lock);
++
++      path_put(&root);
++      return res;
++}
++
++char *
++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      char *ret;
++      read_seqlock_excl(&mount_lock);
++      write_seqlock(&rename_lock);
++      ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++                           PAGE_SIZE);
++      write_sequnlock(&rename_lock);
++      read_sequnlock_excl(&mount_lock);
++      return ret;
++}
++
++static char *
++gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      char *ret;
++      char *buf;
++      int buflen;
++
++      read_seqlock_excl(&mount_lock);
++      write_seqlock(&rename_lock);
++      buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++      ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
++      buflen = (int)(ret - buf);
++      if (buflen >= 5)
++              prepend(&ret, &buflen, "/proc", 5);
++      else
++              ret = strcpy(buf, "<path too long>");
++      write_sequnlock(&rename_lock);
++      read_sequnlock_excl(&mount_lock);
++      return ret;
++}
++
++char *
++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++                           PAGE_SIZE);
++}
++
++char *
++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++char *
++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++char *
++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++char *
++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
++                         PAGE_SIZE);
++}
++
++__u32
++to_gr_audit(const __u32 reqmode)
++{
++      /* masks off auditable permission flags, then shifts them to create
++         auditing flags, and adds the special case of append auditing if
++         we're requesting write */
++      return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
++}
++
++struct acl_role_label *
++__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
++                    const gid_t gid)
++{
++      unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
++      struct acl_role_label *match;
++      struct role_allowed_ip *ipp;
++      unsigned int x;
++      u32 curr_ip = task->signal->saved_ip;
++
++      match = state->acl_role_set.r_hash[index];
++
++      while (match) {
++              if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
++                      for (x = 0; x < match->domain_child_num; x++) {
++                              if (match->domain_children[x] == uid)
++                                      goto found;
++                      }
++              } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
++                      break;
++              match = match->next;
++      }
++found:
++      if (match == NULL) {
++            try_group:
++              index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
++              match = state->acl_role_set.r_hash[index];
++
++              while (match) {
++                      if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
++                              for (x = 0; x < match->domain_child_num; x++) {
++                                      if (match->domain_children[x] == gid)
++                                              goto found2;
++                              }
++                      } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
++                              break;
++                      match = match->next;
++              }
++found2:
++              if (match == NULL)
++                      match = state->default_role;
++              if (match->allowed_ips == NULL)
++                      return match;
++              else {
++                      for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++                              if (likely
++                                  ((ntohl(curr_ip) & ipp->netmask) ==
++                                   (ntohl(ipp->addr) & ipp->netmask)))
++                                      return match;
++                      }
++                      match = state->default_role;
++              }
++      } else if (match->allowed_ips == NULL) {
++              return match;
++      } else {
++              for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++                      if (likely
++                          ((ntohl(curr_ip) & ipp->netmask) ==
++                           (ntohl(ipp->addr) & ipp->netmask)))
++                              return match;
++              }
++              goto try_group;
++      }
++
++      return match;
++}
++
++static struct acl_role_label *
++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
++                    const gid_t gid)
++{
++      return __lookup_acl_role_label(&running_polstate, task, uid, gid);
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label(const u64 ino, const dev_t dev,
++                    const struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
++      struct acl_subject_label *match;
++
++      match = role->subj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             (match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && !(match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
++                        const struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
++      struct acl_subject_label *match;
++
++      match = role->subj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             !(match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && (match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label(const u64 ino, const dev_t dev,
++                   const struct acl_subject_label *subj)
++{
++      unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
++      struct acl_object_label *match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             (match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && !(match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
++                   const struct acl_subject_label *subj)
++{
++      unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
++      struct acl_object_label *match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             !(match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && (match->mode & GR_DELETED))
++              return match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != ino || match->device != dev ||
++             (match->mode & GR_DELETED))) {
++              match = match->next;
++      }
++
++      if (match && !(match->mode & GR_DELETED))
++              return match;
++      else
++              return NULL;
++}
++
++struct name_entry *
++__lookup_name_entry(const struct gr_policy_state *state, const char *name)
++{
++      unsigned int len = strlen(name);
++      unsigned int key = full_name_hash(NULL, (const unsigned char *)name, len);
++      unsigned int index = key % state->name_set.n_size;
++      struct name_entry *match;
++
++      match = state->name_set.n_hash[index];
++
++      while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
++              match = match->next;
++
++      return match;
++}
++
++static struct name_entry *
++lookup_name_entry(const char *name)
++{
++      return __lookup_name_entry(&running_polstate, name);
++}
++
++static struct name_entry *
++lookup_name_entry_create(const char *name)
++{
++      unsigned int len = strlen(name);
++      unsigned int key = full_name_hash(NULL, (const unsigned char *)name, len);
++      unsigned int index = key % running_polstate.name_set.n_size;
++      struct name_entry *match;
++
++      match = running_polstate.name_set.n_hash[index];
++
++      while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++                       !match->deleted))
++              match = match->next;
++
++      if (match && match->deleted)
++              return match;
++
++      match = running_polstate.name_set.n_hash[index];
++
++      while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++                       match->deleted))
++              match = match->next;
++
++      if (match && !match->deleted)
++              return match;
++      else
++              return NULL;
++}
++
++static struct inodev_entry *
++lookup_inodev_entry(const u64 ino, const dev_t dev)
++{
++      unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
++      struct inodev_entry *match;
++
++      match = running_polstate.inodev_set.i_hash[index];
++
++      while (match && (match->nentry->inode != ino || match->nentry->device != dev))
++              match = match->next;
++
++      return match;
++}
++
++void
++__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
++{
++      unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
++                                  state->inodev_set.i_size);
++      struct inodev_entry **curr;
++
++      entry->prev = NULL;
++
++      curr = &state->inodev_set.i_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = entry;
++      
++      entry->next = *curr;
++      *curr = entry;
++
++      return;
++}
++
++static void
++insert_inodev_entry(struct inodev_entry *entry)
++{
++      __insert_inodev_entry(&running_polstate, entry);
++}
++
++void
++insert_acl_obj_label(struct acl_object_label *obj,
++                   struct acl_subject_label *subj)
++{
++      unsigned int index =
++          gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
++      struct acl_object_label **curr;
++
++      obj->prev = NULL;
++
++      curr = &subj->obj_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = obj;
++
++      obj->next = *curr;
++      *curr = obj;
++
++      return;
++}
++
++void
++insert_acl_subj_label(struct acl_subject_label *obj,
++                    struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
++      struct acl_subject_label **curr;
++
++      obj->prev = NULL;
++
++      curr = &role->subj_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = obj;
++
++      obj->next = *curr;
++      *curr = obj;
++
++      return;
++}
++
++/* derived from glibc fnmatch() 0: match, 1: no match*/
++
++static int
++glob_match(const char *p, const char *n)
++{
++      char c;
++
++      while ((c = *p++) != '\0') {
++      switch (c) {
++              case '?':
++                      if (*n == '\0')
++                              return 1;
++                      else if (*n == '/')
++                              return 1;
++                      break;
++              case '\\':
++                      if (*n != c)
++                              return 1;
++                      break;
++              case '*':
++                      for (c = *p++; c == '?' || c == '*'; c = *p++) {
++                              if (*n == '/')
++                                      return 1;
++                              else if (c == '?') {
++                                      if (*n == '\0')
++                                              return 1;
++                                      else
++                                              ++n;
++                              }
++                      }
++                      if (c == '\0') {
++                              return 0;
++                      } else {
++                              const char *endp;
++
++                              if ((endp = strchr(n, '/')) == NULL)
++                                      endp = n + strlen(n);
++
++                              if (c == '[') {
++                                      for (--p; n < endp; ++n)
++                                              if (!glob_match(p, n))
++                                                      return 0;
++                              } else if (c == '/') {
++                                      while (*n != '\0' && *n != '/')
++                                              ++n;
++                                      if (*n == '/' && !glob_match(p, n + 1))
++                                              return 0;
++                              } else {
++                                      for (--p; n < endp; ++n)
++                                              if (*n == c && !glob_match(p, n))
++                                                      return 0;
++                              }
++
++                              return 1;
++                      }
++              case '[':
++                      {
++                      int not;
++                      char cold;
++
++                      if (*n == '\0' || *n == '/')
++                              return 1;
++
++                      not = (*p == '!' || *p == '^');
++                      if (not)
++                              ++p;
++
++                      c = *p++;
++                      for (;;) {
++                              unsigned char fn = (unsigned char)*n;
++
++                              if (c == '\0')
++                                      return 1;
++                              else {
++                                      if (c == fn)
++                                              goto matched;
++                                      cold = c;
++                                      c = *p++;
++
++                                      if (c == '-' && *p != ']') {
++                                              unsigned char cend = *p++;
++
++                                              if (cend == '\0')
++                                                      return 1;
++
++                                              if (cold <= fn && fn <= cend)
++                                                      goto matched;
++
++                                              c = *p++;
++                                      }
++                              }
++
++                              if (c == ']')
++                                      break;
++                      }
++                      if (!not)
++                              return 1;
++                      break;
++              matched:
++                      while (c != ']') {
++                              if (c == '\0')
++                                      return 1;
++
++                              c = *p++;
++                      }
++                      if (not)
++                              return 1;
++              }
++              break;
++      default:
++              if (c != *n)
++                      return 1;
++      }
++
++      ++n;
++      }
++
++      if (*n == '\0')
++              return 0;
++
++      if (*n == '/')
++              return 0;
++
++      return 1;
++}
++
++static struct acl_object_label *
++chk_glob_label(struct acl_object_label *globbed,
++      const struct dentry *dentry, const struct vfsmount *mnt, char **path)
++{
++      struct acl_object_label *tmp;
++
++      if (*path == NULL)
++              *path = gr_to_filename_nolock(dentry, mnt);
++
++      tmp = globbed;
++
++      while (tmp) {
++              if (!glob_match(tmp->filename, *path))
++                      return tmp;
++              tmp = tmp->next;
++      }
++
++      return NULL;
++}
++
++static struct acl_object_label *
++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++          const u64 curr_ino, const dev_t curr_dev,
++          const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++      struct acl_subject_label *tmpsubj;
++      struct acl_object_label *retval;
++      struct acl_object_label *retval2;
++
++      tmpsubj = (struct acl_subject_label *) subj;
++      read_lock(&gr_inode_lock);
++      do {
++              retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
++              if (retval) {
++                      if (checkglob && retval->globbed) {
++                              retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
++                              if (retval2)
++                                      retval = retval2;
++                      }
++                      break;
++              }
++      } while ((tmpsubj = tmpsubj->parent_subject));
++      read_unlock(&gr_inode_lock);
++
++      return retval;
++}
++
++static struct acl_object_label *
++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++          struct dentry *curr_dentry,
++          const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++      int newglob = checkglob;
++      u64 inode;
++      dev_t device;
++
++      /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
++         as we don't want a / * rule to match instead of the / object
++         don't do this for create lookups that call this function though, since they're looking up
++         on the parent and thus need globbing checks on all paths
++      */
++      if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
++              newglob = GR_NO_GLOB;
++
++      spin_lock(&curr_dentry->d_lock);
++      inode = __get_ino(curr_dentry);
++      device = __get_dev(curr_dentry);
++      spin_unlock(&curr_dentry->d_lock);
++
++      return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
++}
++
++#ifdef CONFIG_HUGETLBFS
++static inline bool
++is_hugetlbfs_mnt(const struct vfsmount *mnt)
++{
++      int i;
++      for (i = 0; i < HUGE_MAX_HSTATE; i++) {
++              if (unlikely(hugetlbfs_vfsmount[i] == mnt))
++                      return true;
++      }
++
++      return false;
++}
++#endif
++
++static struct acl_object_label *
++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++            const struct acl_subject_label *subj, char *path, const int checkglob)
++{
++      struct dentry *dentry = (struct dentry *) l_dentry;
++      struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++      struct inode * inode = d_backing_inode(dentry);
++      struct mount *real_mnt = real_mount(mnt);
++      struct acl_object_label *retval;
++      struct dentry *parent;
++
++      read_seqlock_excl(&mount_lock);
++      write_seqlock(&rename_lock);
++
++      if (unlikely((mnt == shm_mnt && inode->i_nlink == 0) || mnt == pipe_mnt ||
++#ifdef CONFIG_NET
++          mnt == sock_mnt ||
++#endif
++#ifdef CONFIG_HUGETLBFS
++          (is_hugetlbfs_mnt(mnt) && inode->i_nlink == 0) ||
++#endif
++              /* ignore Eric Biederman */
++          IS_PRIVATE(inode))) {
++              retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
++              goto out;
++      }
++
++      for (;;) {
++              if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
++                      break;
++
++              if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++                      if (!mnt_has_parent(real_mnt))
++                              break;
++
++                      retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++                      if (retval != NULL)
++                              goto out;
++
++                      dentry = real_mnt->mnt_mountpoint;
++                      real_mnt = real_mnt->mnt_parent;
++                      mnt = &real_mnt->mnt;
++                      continue;
++              }
++
++              parent = dentry->d_parent;
++              retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++              if (retval != NULL)
++                      goto out;
++
++              dentry = parent;
++      }
++
++      retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++
++      /* gr_real_root is pinned so we don't have to hold a reference */
++      if (retval == NULL)
++              retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
++out:
++      write_sequnlock(&rename_lock);
++      read_sequnlock_excl(&mount_lock);
++
++      BUG_ON(retval == NULL);
++
++      return retval;
++}
++
++static struct acl_object_label *
++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++            const struct acl_subject_label *subj)
++{
++      char *path = NULL;
++      return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
++}
++
++static struct acl_object_label *
++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++            const struct acl_subject_label *subj)
++{
++      char *path = NULL;
++      return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
++}
++
++static struct acl_object_label *
++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++                   const struct acl_subject_label *subj, char *path)
++{
++      return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
++}
++
++struct acl_subject_label *
++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++             const struct acl_role_label *role)
++{
++      struct dentry *dentry = (struct dentry *) l_dentry;
++      struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++      struct mount *real_mnt = real_mount(mnt);
++      struct acl_subject_label *retval;
++      struct dentry *parent;
++
++      read_seqlock_excl(&mount_lock);
++      write_seqlock(&rename_lock);
++
++      for (;;) {
++              if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
++                      break;
++              if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++                      if (!mnt_has_parent(real_mnt))
++                              break;
++
++                      spin_lock(&dentry->d_lock);
++                      read_lock(&gr_inode_lock);
++                      retval =
++                              lookup_acl_subj_label(__get_ino(dentry),
++                                              __get_dev(dentry), role);
++                      read_unlock(&gr_inode_lock);
++                      spin_unlock(&dentry->d_lock);
++                      if (retval != NULL)
++                              goto out;
++
++                      dentry = real_mnt->mnt_mountpoint;
++                      real_mnt = real_mnt->mnt_parent;
++                      mnt = &real_mnt->mnt;
++                      continue;
++              }
++
++              spin_lock(&dentry->d_lock);
++              read_lock(&gr_inode_lock);
++              retval = lookup_acl_subj_label(__get_ino(dentry),
++                                        __get_dev(dentry), role);
++              read_unlock(&gr_inode_lock);
++              parent = dentry->d_parent;
++              spin_unlock(&dentry->d_lock);
++
++              if (retval != NULL)
++                      goto out;
++
++              dentry = parent;
++      }
++
++      spin_lock(&dentry->d_lock);
++      read_lock(&gr_inode_lock);
++      retval = lookup_acl_subj_label(__get_ino(dentry),
++                                __get_dev(dentry), role);
++      read_unlock(&gr_inode_lock);
++      spin_unlock(&dentry->d_lock);
++
++      if (unlikely(retval == NULL)) {
++              /* gr_real_root is pinned, we don't need to hold a reference */
++              read_lock(&gr_inode_lock);
++              retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
++                                        __get_dev(gr_real_root.dentry), role);
++              read_unlock(&gr_inode_lock);
++      }
++out:
++      write_sequnlock(&rename_lock);
++      read_sequnlock_excl(&mount_lock);
++
++      BUG_ON(retval == NULL);
++
++      return retval;
++}
++
++void
++assign_special_role(const char *rolename)
++{
++      struct acl_object_label *obj;
++      struct acl_role_label *r;
++      struct acl_role_label *assigned = NULL;
++      struct task_struct *tsk;
++      struct file *filp;
++
++      FOR_EACH_ROLE_START(r)
++              if (!strcmp(rolename, r->rolename) &&
++                  (r->roletype & GR_ROLE_SPECIAL)) {
++                      assigned = r;
++                      break;
++              }
++      FOR_EACH_ROLE_END(r)
++
++      if (!assigned)
++              return;
++
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++
++      tsk = current->real_parent;
++      if (tsk == NULL)
++              goto out_unlock;
++
++      filp = tsk->exec_file;
++      if (filp == NULL)
++              goto out_unlock;
++
++      tsk->is_writable = 0;
++      tsk->inherited = 0;
++
++      tsk->acl_sp_role = 1;
++      tsk->acl_role_id = ++acl_sp_role_value;
++      tsk->role = assigned;
++      tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
++
++      /* ignore additional mmap checks for processes that are writable
++         by the default ACL */
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              tsk->is_writable = 1;
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              tsk->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
++                      tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
++#endif
++
++out_unlock:
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++      return;
++}
++
++
++static void
++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
++{
++      struct task_struct *task = current;
++      const struct cred *cred = current_cred();
++
++      security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++                     GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++                     task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++                     1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
++
++      return;
++}
++
++static void
++gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
++{
++      struct task_struct *task = current;
++      const struct cred *cred = current_cred();
++
++      security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++                     GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++                     task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++                     'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
++
++      return;
++}
++
++static void
++gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
++{
++      struct task_struct *task = current;
++      const struct cred *cred = current_cred();
++
++      security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++                     GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++                     task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++                     'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
++
++      return;
++}
++
++static void
++gr_set_proc_res(struct task_struct *task)
++{
++      struct acl_subject_label *proc;
++      unsigned short i;
++
++      proc = task->acl;
++
++      if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
++              return;
++
++      for (i = 0; i < RLIM_NLIMITS; i++) {
++              unsigned long rlim_cur, rlim_max;
++
++              if (!(proc->resmask & (1U << i)))
++                      continue;
++
++              rlim_cur = proc->res[i].rlim_cur;
++              rlim_max = proc->res[i].rlim_max;
++
++              if (i == RLIMIT_NOFILE) {
++                      unsigned long saved_sysctl_nr_open = sysctl_nr_open;
++                      if (rlim_cur > saved_sysctl_nr_open)
++                              rlim_cur = saved_sysctl_nr_open;
++                      if (rlim_max > saved_sysctl_nr_open)
++                              rlim_max = saved_sysctl_nr_open;
++              }
++
++              task->signal->rlim[i].rlim_cur = rlim_cur;
++              task->signal->rlim[i].rlim_max = rlim_max;
++
++              if (i == RLIMIT_CPU)
++                      update_rlimit_cpu(task, rlim_cur);
++      }
++
++      return;
++}
++
++/* both of the below must be called with
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++   except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
++*/
++
++struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
++{
++      char *tmpname;
++      struct acl_subject_label *tmpsubj;
++      struct file *filp;
++      struct name_entry *nmatch;
++
++      filp = task->exec_file;
++      if (filp == NULL)
++              return NULL;
++
++      /* the following is to apply the correct subject
++         on binaries running when the RBAC system
++         is enabled, when the binaries have been
++         replaced or deleted since their execution
++         -----
++         when the RBAC system starts, the inode/dev
++         from exec_file will be one the RBAC system
++         is unaware of.  It only knows the inode/dev
++         of the present file on disk, or the absence
++         of it.
++      */
++
++      if (filename)
++              nmatch = __lookup_name_entry(state, filename);
++      else {
++              preempt_disable();
++              tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
++
++              nmatch = __lookup_name_entry(state, tmpname);
++              preempt_enable();
++      }
++      tmpsubj = NULL;
++      if (nmatch) {
++              if (nmatch->deleted)
++                      tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
++              else
++                      tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
++      }
++      /* this also works for the reload case -- if we don't match a potentially inherited subject
++         then we fall back to a normal lookup based on the binary's ino/dev
++      */
++      if (tmpsubj == NULL && fallback)
++              tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
++
++      return tmpsubj;
++}
++
++static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
++{
++      return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
++}
++
++void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
++{
++      struct acl_object_label *obj;
++      struct file *filp;
++
++      filp = task->exec_file;
++
++      task->acl = subj;
++      task->is_writable = 0;
++      /* ignore additional mmap checks for processes that are writable 
++         by the default ACL */
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++
++      gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
++#endif
++}
++
++static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
++{
++      __gr_apply_subject_to_task(&running_polstate, task, subj);
++}
++
++__u32
++gr_search_file(const struct dentry * dentry, const __u32 mode,
++             const struct vfsmount * mnt)
++{
++      __u32 retval = mode;
++      struct acl_subject_label *curracl;
++      struct acl_object_label *currobj;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return (mode & ~GR_AUDITS);
++
++      curracl = current->acl;
++
++      currobj = chk_obj_label(dentry, mnt, curracl);
++      retval = currobj->mode & mode;
++
++      /* if we're opening a specified transfer file for writing
++         (e.g. /dev/initctl), then transfer our role to init
++      */
++      if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
++                   current->role->roletype & GR_ROLE_PERSIST)) {
++              struct task_struct *task = init_pid_ns.child_reaper;
++
++              if (task->role != current->role) {
++                      struct acl_subject_label *subj;
++
++                      task->acl_sp_role = 0;
++                      task->acl_role_id = current->acl_role_id;
++                      task->role = current->role;
++                      rcu_read_lock();
++                      read_lock(&grsec_exec_file_lock);
++                      subj = gr_get_subject_for_task(task, NULL, 1);
++                      gr_apply_subject_to_task(task, subj);
++                      read_unlock(&grsec_exec_file_lock);
++                      rcu_read_unlock();
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
++              }
++      }
++
++      if (unlikely
++          ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
++           && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
++              __u32 new_mode = mode;
++
++              new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++              retval = new_mode;
++
++              if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
++                      new_mode |= GR_INHERIT;
++
++              if (!(mode & GR_NOLEARN))
++                      gr_log_learn(dentry, mnt, new_mode);
++      }
++
++      return retval;
++}
++
++struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
++                                            const struct dentry *parent,
++                                            const struct vfsmount *mnt)
++{
++      struct name_entry *match;
++      struct acl_object_label *matchpo;
++      struct acl_subject_label *curracl;
++      char *path;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return NULL;
++
++      preempt_disable();
++      path = gr_to_filename_rbac(new_dentry, mnt);
++      match = lookup_name_entry_create(path);
++
++      curracl = current->acl;
++
++      if (match) {
++              read_lock(&gr_inode_lock);
++              matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
++              read_unlock(&gr_inode_lock);
++
++              if (matchpo) {
++                      preempt_enable();
++                      return matchpo;
++              }
++      }
++
++      // lookup parent
++
++      matchpo = chk_obj_create_label(parent, mnt, curracl, path);
++
++      preempt_enable();
++      return matchpo;
++}
++
++__u32
++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
++              const struct vfsmount * mnt, const __u32 mode)
++{
++      struct acl_object_label *matchpo;
++      __u32 retval;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return (mode & ~GR_AUDITS);
++
++      matchpo = gr_get_create_object(new_dentry, parent, mnt);
++
++      retval = matchpo->mode & mode;
++
++      if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
++          && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++              __u32 new_mode = mode;
++
++              new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++              gr_log_learn(new_dentry, mnt, new_mode);
++              return new_mode;
++      }
++
++      return retval;
++}
++
++__u32
++gr_check_link(const struct dentry * new_dentry,
++            const struct dentry * parent_dentry,
++            const struct vfsmount * parent_mnt,
++            const struct dentry * old_dentry, const struct vfsmount * old_mnt)
++{
++      struct acl_object_label *obj;
++      __u32 oldmode, newmode;
++      __u32 needmode;
++      __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
++                         GR_DELETE | GR_INHERIT;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return (GR_CREATE | GR_LINK);
++
++      obj = chk_obj_label(old_dentry, old_mnt, current->acl);
++      oldmode = obj->mode;
++
++      obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
++      newmode = obj->mode;
++
++      needmode = newmode & checkmodes;
++
++      // old name for hardlink must have at least the permissions of the new name
++      if ((oldmode & needmode) != needmode)
++              goto bad;
++
++      // if old name had restrictions/auditing, make sure the new name does as well
++      needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
++
++      // don't allow hardlinking of suid/sgid/fcapped files without permission
++      if (is_privileged_binary(old_dentry))
++              needmode |= GR_SETID;
++
++      if ((newmode & needmode) != needmode)
++              goto bad;
++
++      // enforce minimum permissions
++      if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
++              return newmode;
++bad:
++      needmode = oldmode;
++      if (is_privileged_binary(old_dentry))
++              needmode |= GR_SETID;
++      
++      if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++              gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
++              return (GR_CREATE | GR_LINK);
++      } else if (newmode & GR_SUPPRESS)
++              return GR_SUPPRESS;
++      else
++              return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *task)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
++              return 1;
++
++      return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++      if (unlikely(!(gr_status & GR_READY) || !task))
++              return 0;
++
++      if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++          task->acl != current->acl)
++              return 1;
++
++      return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++      struct task_struct *p;
++      int ret = 0;
++
++      if (unlikely(!(gr_status & GR_READY) || !pid))
++              return ret;
++
++      read_lock(&tasklist_lock);
++      do_each_pid_task(pid, type, p) {
++              if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++                  p->acl != current->acl) {
++                      ret = 1;
++                      goto out;
++              }
++      } while_each_pid_task(pid, type, p);
++out:
++      read_unlock(&tasklist_lock);
++
++      return ret;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++      struct task_struct *p = current;
++
++      tsk->inherited = p->inherited;
++      tsk->acl_sp_role = 0;
++      tsk->acl_role_id = p->acl_role_id;
++      tsk->acl = p->acl;
++      tsk->role = p->role;
++      tsk->signal->used_accept = 0;
++      tsk->signal->curr_ip = p->signal->curr_ip;
++      tsk->signal->saved_ip = p->signal->saved_ip;
++      if (p->exec_file)
++              get_file(p->exec_file);
++      tsk->exec_file = p->exec_file;
++      tsk->is_writable = p->is_writable;
++      if (unlikely(p->signal->used_accept)) {
++              p->signal->curr_ip = 0;
++              p->signal->saved_ip = 0;
++      }
++
++      return;
++}
++
++extern int gr_process_kernel_setuid_ban(struct user_struct *user);
++
++int
++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
++{
++      unsigned int i;
++      __u16 num;
++      uid_t *uidlist;
++      uid_t curuid;
++      int realok = 0;
++      int effectiveok = 0;
++      int fsok = 0;
++      uid_t globalreal, globaleffective, globalfs;
++
++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
++      struct user_struct *user;
++
++      if (!uid_valid(real))
++              goto skipit;
++
++      /* find user based on global namespace */
++
++      globalreal = GR_GLOBAL_UID(real);
++
++      user = find_user(make_kuid(&init_user_ns, globalreal));
++      if (user == NULL)
++              goto skipit;
++
++      if (gr_process_kernel_setuid_ban(user)) {
++              /* for find_user */
++              free_uid(user);
++              return 1;
++      }
++
++      /* for find_user */
++      free_uid(user);
++
++skipit:
++#endif
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++              gr_log_learn_uid_change(real, effective, fs);
++
++      num = current->acl->user_trans_num;
++      uidlist = current->acl->user_transitions;
++
++      if (uidlist == NULL)
++              return 0;
++
++      if (!uid_valid(real)) {
++              realok = 1;
++              globalreal = (uid_t)-1;         
++      } else {
++              globalreal = GR_GLOBAL_UID(real);               
++      }
++      if (!uid_valid(effective)) {
++              effectiveok = 1;
++              globaleffective = (uid_t)-1;
++      } else {
++              globaleffective = GR_GLOBAL_UID(effective);
++      }
++      if (!uid_valid(fs)) {
++              fsok = 1;
++              globalfs = (uid_t)-1;
++      } else {
++              globalfs = GR_GLOBAL_UID(fs);
++      }
++
++      if (current->acl->user_trans_type & GR_ID_ALLOW) {
++              for (i = 0; i < num; i++) {
++                      curuid = uidlist[i];
++                      if (globalreal == curuid)
++                              realok = 1;
++                      if (globaleffective == curuid)
++                              effectiveok = 1;
++                      if (globalfs == curuid)
++                              fsok = 1;
++              }
++      } else if (current->acl->user_trans_type & GR_ID_DENY) {
++              for (i = 0; i < num; i++) {
++                      curuid = uidlist[i];
++                      if (globalreal == curuid)
++                              break;
++                      if (globaleffective == curuid)
++                              break;
++                      if (globalfs == curuid)
++                              break;
++              }
++              /* not in deny list */
++              if (i == num) {
++                      realok = 1;
++                      effectiveok = 1;
++                      fsok = 1;
++              }
++      }
++
++      if (realok && effectiveok && fsok)
++              return 0;
++      else {
++              gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
++              return 1;
++      }
++}
++
++int
++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
++{
++      unsigned int i;
++      __u16 num;
++      gid_t *gidlist;
++      gid_t curgid;
++      int realok = 0;
++      int effectiveok = 0;
++      int fsok = 0;
++      gid_t globalreal, globaleffective, globalfs;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++              gr_log_learn_gid_change(real, effective, fs);
++
++      num = current->acl->group_trans_num;
++      gidlist = current->acl->group_transitions;
++
++      if (gidlist == NULL)
++              return 0;
++
++      if (!gid_valid(real)) {
++              realok = 1;
++              globalreal = (gid_t)-1;         
++      } else {
++              globalreal = GR_GLOBAL_GID(real);
++      }
++      if (!gid_valid(effective)) {
++              effectiveok = 1;
++              globaleffective = (gid_t)-1;            
++      } else {
++              globaleffective = GR_GLOBAL_GID(effective);
++      }
++      if (!gid_valid(fs)) {
++              fsok = 1;
++              globalfs = (gid_t)-1;           
++      } else {
++              globalfs = GR_GLOBAL_GID(fs);
++      }
++
++      if (current->acl->group_trans_type & GR_ID_ALLOW) {
++              for (i = 0; i < num; i++) {
++                      curgid = gidlist[i];
++                      if (globalreal == curgid)
++                              realok = 1;
++                      if (globaleffective == curgid)
++                              effectiveok = 1;
++                      if (globalfs == curgid)
++                              fsok = 1;
++              }
++      } else if (current->acl->group_trans_type & GR_ID_DENY) {
++              for (i = 0; i < num; i++) {
++                      curgid = gidlist[i];
++                      if (globalreal == curgid)
++                              break;
++                      if (globaleffective == curgid)
++                              break;
++                      if (globalfs == curgid)
++                              break;
++              }
++              /* not in deny list */
++              if (i == num) {
++                      realok = 1;
++                      effectiveok = 1;
++                      fsok = 1;
++              }
++      }
++
++      if (realok && effectiveok && fsok)
++              return 0;
++      else {
++              gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
++              return 1;
++      }
++}
++
++extern int gr_acl_is_capable(const int cap);
++
++void
++gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
++{
++      struct acl_role_label *role = task->role;
++      struct acl_role_label *origrole = role;
++      struct acl_subject_label *subj = NULL;
++      struct acl_object_label *obj;
++      struct file *filp;
++      uid_t uid;
++      gid_t gid;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      uid = GR_GLOBAL_UID(kuid);
++      gid = GR_GLOBAL_GID(kgid);
++
++      filp = task->exec_file;
++
++      /* kernel process, we'll give them the kernel role */
++      if (unlikely(!filp)) {
++              task->role = running_polstate.kernel_role;
++              task->acl = running_polstate.kernel_role->root_label;
++              return;
++      } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
++              /* save the current ip at time of role lookup so that the proper
++                 IP will be learned for role_allowed_ip */
++              task->signal->saved_ip = task->signal->curr_ip;
++              role = lookup_acl_role_label(task, uid, gid);
++      }
++
++      /* don't change the role if we're not a privileged process */
++      if (role && task->role != role &&
++          (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
++           ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
++              return;
++
++      task->role = role;
++
++      if (task->inherited) {
++              /* if we reached our subject through inheritance, then first see
++                 if there's a subject of the same name in the new role that has
++                 an object that would result in the same inherited subject
++              */
++              subj = gr_get_subject_for_task(task, task->acl->filename, 0);
++              if (subj) {
++                      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
++                      if (!(obj->mode & GR_INHERIT))
++                              subj = NULL;
++              }
++              
++      }
++      if (subj == NULL) {
++              /* otherwise:
++                 perform subject lookup in possibly new role
++                 we can use this result below in the case where role == task->role
++              */
++              subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
++      }
++
++      /* if we changed uid/gid, but result in the same role
++         and are using inheritance, don't lose the inherited subject
++         if current subject is other than what normal lookup
++         would result in, we arrived via inheritance, don't
++         lose subject
++      */
++      if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
++                                 (subj == task->acl)))
++              task->acl = subj;
++
++      /* leave task->inherited unaffected */
++
++      task->is_writable = 0;
++
++      /* ignore additional mmap checks for processes that are writable 
++         by the default ACL */
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++      obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
++#endif
++
++      gr_set_proc_res(task);
++
++      return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++                const int unsafe_flags)
++{
++      struct task_struct *task = current;
++      struct acl_subject_label *newacl;
++      struct acl_object_label *obj;
++      __u32 retmode;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      newacl = chk_subj_label(dentry, mnt, task->role);
++
++      /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
++         did an exec
++      */
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
++          (task->parent->acl->mode & GR_POVERRIDE))) {
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++              goto skip_check;
++      }
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
++           !(task->role->roletype & GR_ROLE_GOD) &&
++           !gr_search_file(dentry, GR_PTRACERD, mnt) &&
++           !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++              if (unsafe_flags & LSM_UNSAFE_SHARE)
++                      gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
++              else
++                      gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
++              return -EACCES;
++      }
++
++skip_check:
++
++      obj = chk_obj_label(dentry, mnt, task->acl);
++      retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
++
++      if (!(task->acl->mode & GR_INHERITLEARN) &&
++          ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
++              if (obj->nested)
++                      task->acl = obj->nested;
++              else
++                      task->acl = newacl;
++              task->inherited = 0;
++      } else {
++              task->inherited = 1;
++              if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
++                      gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
++      }
++
++      task->is_writable = 0;
++
++      /* ignore additional mmap checks for processes that are writable 
++         by the default ACL */
++      obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++      obj = chk_obj_label(dentry, mnt, task->role->root_label);
++      if (unlikely(obj->mode & GR_WRITE))
++              task->is_writable = 1;
++
++      gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
++#endif
++      return 0;
++}
++
++/* always called with valid inodev ptr */
++static void
++do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
++{
++      struct acl_object_label *matchpo;
++      struct acl_subject_label *matchps;
++      struct acl_subject_label *subj;
++      struct acl_role_label *role;
++      unsigned int x;
++
++      FOR_EACH_ROLE_START(role)
++              FOR_EACH_SUBJECT_START(role, subj, x)
++                      if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++                              matchpo->mode |= GR_DELETED;
++              FOR_EACH_SUBJECT_END(subj,x)
++              FOR_EACH_NESTED_SUBJECT_START(role, subj)
++                      /* nested subjects aren't in the role's subj_hash table */
++                      if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++                              matchpo->mode |= GR_DELETED;
++              FOR_EACH_NESTED_SUBJECT_END(subj)
++              if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
++                      matchps->mode |= GR_DELETED;
++      FOR_EACH_ROLE_END(role)
++
++      inodev->nentry->deleted = 1;
++
++      return;
++}
++
++void
++gr_handle_delete(const u64 ino, const dev_t dev)
++{
++      struct inodev_entry *inodev;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      write_lock(&gr_inode_lock);
++      inodev = lookup_inodev_entry(ino, dev);
++      if (inodev != NULL)
++              do_handle_delete(inodev, ino, dev);
++      write_unlock(&gr_inode_lock);
++
++      return;
++}
++
++static void
++update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
++                   const u64 newinode, const dev_t newdevice,
++                   struct acl_subject_label *subj)
++{
++      unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
++      struct acl_object_label *match;
++
++      match = subj->obj_hash[index];
++
++      while (match && (match->inode != oldinode ||
++             match->device != olddevice ||
++             !(match->mode & GR_DELETED)))
++              match = match->next;
++
++      if (match && (match->inode == oldinode)
++          && (match->device == olddevice)
++          && (match->mode & GR_DELETED)) {
++              if (match->prev == NULL) {
++                      subj->obj_hash[index] = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = NULL;
++              } else {
++                      match->prev->next = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = match->prev;
++              }
++              match->prev = NULL;
++              match->next = NULL;
++              match->inode = newinode;
++              match->device = newdevice;
++              match->mode &= ~GR_DELETED;
++
++              insert_acl_obj_label(match, subj);
++      }
++
++      return;
++}
++
++static void
++update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
++                    const u64 newinode, const dev_t newdevice,
++                    struct acl_role_label *role)
++{
++      unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
++      struct acl_subject_label *match;
++
++      match = role->subj_hash[index];
++
++      while (match && (match->inode != oldinode ||
++             match->device != olddevice ||
++             !(match->mode & GR_DELETED)))
++              match = match->next;
++
++      if (match && (match->inode == oldinode)
++          && (match->device == olddevice)
++          && (match->mode & GR_DELETED)) {
++              if (match->prev == NULL) {
++                      role->subj_hash[index] = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = NULL;
++              } else {
++                      match->prev->next = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = match->prev;
++              }
++              match->prev = NULL;
++              match->next = NULL;
++              match->inode = newinode;
++              match->device = newdevice;
++              match->mode &= ~GR_DELETED;
++
++              insert_acl_subj_label(match, role);
++      }
++
++      return;
++}
++
++static void
++update_inodev_entry(const u64 oldinode, const dev_t olddevice,
++                  const u64 newinode, const dev_t newdevice)
++{
++      unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
++      struct inodev_entry *match;
++
++      match = running_polstate.inodev_set.i_hash[index];
++
++      while (match && (match->nentry->inode != oldinode ||
++             match->nentry->device != olddevice || !match->nentry->deleted))
++              match = match->next;
++
++      if (match && (match->nentry->inode == oldinode)
++          && (match->nentry->device == olddevice) &&
++          match->nentry->deleted) {
++              if (match->prev == NULL) {
++                      running_polstate.inodev_set.i_hash[index] = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = NULL;
++              } else {
++                      match->prev->next = match->next;
++                      if (match->next != NULL)
++                              match->next->prev = match->prev;
++              }
++              match->prev = NULL;
++              match->next = NULL;
++              match->nentry->inode = newinode;
++              match->nentry->device = newdevice;
++              match->nentry->deleted = 0;
++
++              insert_inodev_entry(match);
++      }
++
++      return;
++}
++
++static void
++__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
++{
++      struct acl_subject_label *subj;
++      struct acl_role_label *role;
++      unsigned int x;
++
++      FOR_EACH_ROLE_START(role)
++              update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
++
++              FOR_EACH_NESTED_SUBJECT_START(role, subj)
++                      if ((subj->inode == ino) && (subj->device == dev)) {
++                              subj->inode = ino;
++                              subj->device = dev;
++                      }
++                      /* nested subjects aren't in the role's subj_hash table */
++                      update_acl_obj_label(matchn->inode, matchn->device,
++                                           ino, dev, subj);
++              FOR_EACH_NESTED_SUBJECT_END(subj)
++              FOR_EACH_SUBJECT_START(role, subj, x)
++                      update_acl_obj_label(matchn->inode, matchn->device,
++                                           ino, dev, subj);
++              FOR_EACH_SUBJECT_END(subj,x)
++      FOR_EACH_ROLE_END(role)
++
++      update_inodev_entry(matchn->inode, matchn->device, ino, dev);
++
++      return;
++}
++
++static void
++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
++               const struct vfsmount *mnt)
++{
++      u64 ino = __get_ino(dentry);
++      dev_t dev = __get_dev(dentry);
++
++      __do_handle_create(matchn, ino, dev);   
++
++      return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      struct name_entry *matchn;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      preempt_disable();
++      matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
++
++      if (unlikely((unsigned long)matchn)) {
++              write_lock(&gr_inode_lock);
++              do_handle_create(matchn, dentry, mnt);
++              write_unlock(&gr_inode_lock);
++      }
++      preempt_enable();
++
++      return;
++}
++
++void
++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
++{
++      struct name_entry *matchn;
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      preempt_disable();
++      matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
++
++      if (unlikely((unsigned long)matchn)) {
++              write_lock(&gr_inode_lock);
++              __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
++              write_unlock(&gr_inode_lock);
++      }
++      preempt_enable();
++
++      return;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++               struct dentry *old_dentry,
++               struct dentry *new_dentry,
++               struct vfsmount *mnt, const __u8 replace, unsigned int flags)
++{
++      struct name_entry *matchn;
++      struct name_entry *matchn2 = NULL;
++      struct inodev_entry *inodev;
++      struct inode *inode = d_backing_inode(new_dentry);
++      struct inode *old_inode = d_backing_inode(old_dentry);
++      u64 old_ino = __get_ino(old_dentry);
++      dev_t old_dev = __get_dev(old_dentry);
++      unsigned int exchange = flags & RENAME_EXCHANGE;
++
++      /* vfs_rename swaps the name and parent link for old_dentry and
++         new_dentry
++         at this point, old_dentry has the new name, parent link, and inode
++         for the renamed file
++         if a file is being replaced by a rename, new_dentry has the inode
++         and name for the replaced file
++      */
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      preempt_disable();
++      matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
++
++      /* exchange cases:
++         a filename exists for the source, but not dest
++              do a recreate on source
++         a filename exists for the dest, but not source
++              do a recreate on dest
++         a filename exists for both source and dest
++              delete source and dest, then create source and dest
++         a filename exists for neither source nor dest
++              no updates needed
++
++         the name entry lookups get us the old inode/dev associated with
++         each name, so do the deletes first (if possible) so that when
++         we do the create, we pick up on the right entries
++      */
++
++      if (exchange)
++              matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
++
++      /* we wouldn't have to check d_inode if it weren't for
++         NFS silly-renaming
++       */
++
++      write_lock(&gr_inode_lock);
++      if (unlikely((replace || exchange) && inode)) {
++              u64 new_ino = __get_ino(new_dentry);
++              dev_t new_dev = __get_dev(new_dentry);
++
++              inodev = lookup_inodev_entry(new_ino, new_dev);
++              if (inodev != NULL && ((inode->i_nlink <= 1) || d_is_dir(new_dentry)))
++                      do_handle_delete(inodev, new_ino, new_dev);
++      }
++
++      inodev = lookup_inodev_entry(old_ino, old_dev);
++      if (inodev != NULL && ((old_inode->i_nlink <= 1) || d_is_dir(old_dentry)))
++              do_handle_delete(inodev, old_ino, old_dev);
++
++      if (unlikely(matchn != NULL))
++              do_handle_create(matchn, old_dentry, mnt);
++
++      if (unlikely(matchn2 != NULL))
++              do_handle_create(matchn2, new_dentry, mnt);
++
++      write_unlock(&gr_inode_lock);
++      preempt_enable();
++
++      return;
++}
++
++#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
++static const unsigned long res_learn_bumps[GR_NLIMITS] = {
++      [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
++      [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
++      [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
++      [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
++      [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
++      [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
++      [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
++      [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
++      [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
++      [RLIMIT_AS] = GR_RLIM_AS_BUMP,
++      [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
++      [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
++      [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
++      [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
++      [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
++      [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
++};
++
++void
++gr_learn_resource(const struct task_struct *task,
++                const int res, const unsigned long wanted, const int gt)
++{
++      struct acl_subject_label *acl;
++      const struct cred *cred;
++
++      if (unlikely((gr_status & GR_READY) &&
++                   task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
++              goto skip_reslog;
++
++      gr_log_resource(task, res, wanted, gt);
++skip_reslog:
++
++      if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
++              return;
++
++      acl = task->acl;
++
++      if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
++                 !(acl->resmask & (1U << (unsigned short) res))))
++              return;
++
++      if (wanted >= acl->res[res].rlim_cur) {
++              unsigned long res_add;
++
++              res_add = wanted + res_learn_bumps[res];
++
++              acl->res[res].rlim_cur = res_add;
++
++              if (wanted > acl->res[res].rlim_max)
++                      acl->res[res].rlim_max = res_add;
++
++              /* only log the subject filename, since resource logging is supported for
++                 single-subject learning only */
++              rcu_read_lock();
++              cred = __task_cred(task);
++              security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++                             task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
++                             acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
++                             "", (unsigned long) res, &task->signal->saved_ip);
++              rcu_read_unlock();
++      }
++
++      return;
++}
++EXPORT_SYMBOL_GPL(gr_learn_resource);
++#endif
++
++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++      struct task_struct *task = current;
++        struct acl_subject_label *proc;
++      unsigned long flags;
++
++        if (unlikely(!(gr_status & GR_READY)))
++                return;
++
++      flags = pax_get_flags(task);
++
++        proc = task->acl;
++
++      if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
++              flags &= ~MF_PAX_PAGEEXEC;
++      if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
++              flags &= ~MF_PAX_SEGMEXEC;
++      if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
++              flags &= ~MF_PAX_RANDMMAP;
++      if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
++              flags &= ~MF_PAX_EMUTRAMP;
++      if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
++              flags &= ~MF_PAX_MPROTECT;
++
++      if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
++              flags |= MF_PAX_PAGEEXEC;
++      if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
++              flags |= MF_PAX_SEGMEXEC;
++      if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
++              flags |= MF_PAX_RANDMMAP;
++      if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
++              flags |= MF_PAX_EMUTRAMP;
++      if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
++              flags |= MF_PAX_MPROTECT;
++
++      pax_set_flags(task, flags);
++
++        return;
++}
++#endif
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++      struct file *filp;
++      struct task_struct *tmp = task;
++      struct task_struct *curtemp = current;
++      __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++#endif
++
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++      filp = task->exec_file;
++
++      while (task_pid_nr(tmp) > 0) {
++              if (tmp == curtemp)
++                      break;
++              tmp = tmp->real_parent;
++      }
++
++      if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
++                              ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
++              read_unlock(&grsec_exec_file_lock);
++              read_unlock(&tasklist_lock);
++              return 1;
++      }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (!(gr_status & GR_READY)) {
++              read_unlock(&grsec_exec_file_lock);
++              read_unlock(&tasklist_lock);
++              return 0;
++      }
++#endif
++
++      retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++
++      if (retmode & GR_NOPTRACE)
++              return 1;
++
++      if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
++          && (current->acl != task->acl || (current->acl != current->role->root_label
++          && task_pid_nr(current) != task_pid_nr(task))))
++              return 1;
++
++      return 0;
++}
++
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return;
++
++      if (!(current->role->roletype & GR_ROLE_GOD))
++              return;
++
++      seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
++                      p->role->rolename, gr_task_roletype_to_char(p),
++                      p->acl->filename);
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++      struct task_struct *tmp = task;
++      struct task_struct *curtemp = current;
++      __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++#endif
++      if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
++              read_lock(&tasklist_lock);
++              while (task_pid_nr(tmp) > 0) {
++                      if (tmp == curtemp)
++                              break;
++                      tmp = tmp->real_parent;
++              }
++
++              if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
++                                      ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
++                      read_unlock(&tasklist_lock);
++                      gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++                      return 1;
++              }
++              read_unlock(&tasklist_lock);
++      }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      if (!(gr_status & GR_READY))
++              return 0;
++#endif
++
++      read_lock(&grsec_exec_file_lock);
++      if (unlikely(!task->exec_file)) {
++              read_unlock(&grsec_exec_file_lock);
++              return 0;
++      }
++
++      retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
++      read_unlock(&grsec_exec_file_lock);
++
++      if (retmode & GR_NOPTRACE) {
++              gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++              return 1;
++      }
++              
++      if (retmode & GR_PTRACERD) {
++              switch (request) {
++              case PTRACE_SEIZE:
++              case PTRACE_POKETEXT:
++              case PTRACE_POKEDATA:
++              case PTRACE_POKEUSR:
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) && !defined(CONFIG_ARM64)
++              case PTRACE_SETREGS:
++              case PTRACE_SETFPREGS:
++#endif
++#ifdef CONFIG_COMPAT
++#ifdef CONFIG_ARM64
++              case COMPAT_PTRACE_SETREGS:
++              case COMPAT_PTRACE_SETVFPREGS:
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++              case COMPAT_PTRACE_SETHBPREGS:
++#endif
++#endif
++#endif
++#ifdef CONFIG_X86
++              case PTRACE_SETFPXREGS:
++#endif
++#ifdef CONFIG_ALTIVEC
++              case PTRACE_SETVRREGS:
++#endif
++#ifdef CONFIG_ARM
++              case PTRACE_SET_SYSCALL:
++              case PTRACE_SETVFPREGS:
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++              case PTRACE_SETHBPREGS:
++#endif
++#endif
++                      return 1;
++              default:
++                      return 0;
++              }
++      } else if (!(current->acl->mode & GR_POVERRIDE) &&
++                 !(current->role->roletype & GR_ROLE_GOD) &&
++                 (current->acl != task->acl)) {
++              gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++              return 1;
++      }
++
++      return 0;
++}
++
++static int is_writable_mmap(const struct file *filp)
++{
++      struct task_struct *task = current;
++      struct acl_object_label *obj, *obj2;
++      struct dentry *dentry = filp->f_path.dentry;
++      struct vfsmount *mnt = filp->f_path.mnt;
++      struct inode *inode = d_backing_inode(dentry);
++
++      if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
++          !task->is_writable && d_is_reg(dentry) && (mnt != shm_mnt || (inode->i_nlink > 0))) {
++              obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
++              obj2 = chk_obj_label(dentry, mnt, task->role->root_label);
++              if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
++                      gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, dentry, mnt);
++                      return 1;
++              }
++      }
++      return 0;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
++{
++      __u32 mode;
++
++      if (unlikely(!file || !(prot & PROT_EXEC)))
++              return 1;
++
++      if (is_writable_mmap(file))
++              return 0;
++
++      mode =
++          gr_search_file(file->f_path.dentry,
++                         GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++                         file->f_path.mnt);
++
++      if (!gr_tpe_allow(file))
++              return 0;
++
++      if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      } else if (unlikely(!(mode & GR_EXEC))) {
++              return 0;
++      } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 1;
++      }
++
++      return 1;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++      __u32 mode;
++
++      if (unlikely(!file || !(prot & PROT_EXEC)))
++              return 1;
++
++      if (is_writable_mmap(file))
++              return 0;
++
++      mode =
++          gr_search_file(file->f_path.dentry,
++                         GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++                         file->f_path.mnt);
++
++      if (!gr_tpe_allow(file))
++              return 0;
++
++      if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      } else if (unlikely(!(mode & GR_EXEC))) {
++              return 0;
++      } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++              return 1;
++      }
++
++      return 1;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++      unsigned long runtime, cputime;
++      cputime_t utime, stime;
++      unsigned int wday, cday;
++      __u8 whr, chr;
++      __u8 wmin, cmin;
++      __u8 wsec, csec;
++      struct timespec curtime, starttime;
++
++      if (unlikely(!(gr_status & GR_READY) || !task->acl ||
++                   !(task->acl->mode & GR_PROCACCT)))
++              return;
++      
++      curtime = ns_to_timespec(ktime_get_ns());
++      starttime = ns_to_timespec(task->start_time);
++      runtime = curtime.tv_sec - starttime.tv_sec;
++      wday = runtime / (60 * 60 * 24);
++      runtime -= wday * (60 * 60 * 24);
++      whr = runtime / (60 * 60);
++      runtime -= whr * (60 * 60);
++      wmin = runtime / 60;
++      runtime -= wmin * 60;
++      wsec = runtime;
++
++      task_cputime(task, &utime, &stime);
++      cputime = cputime_to_secs(utime + stime);
++      cday = cputime / (60 * 60 * 24);
++      cputime -= cday * (60 * 60 * 24);
++      chr = cputime / (60 * 60);
++      cputime -= chr * (60 * 60);
++      cmin = cputime / 60;
++      cputime -= cmin * 60;
++      csec = cputime;
++
++      gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
++
++      return;
++}
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++      struct task_struct *task;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      const struct cred *cred;
++#endif
++      int ret = 0;
++
++      /* restrict taskstats viewing to un-chrooted root users
++         who have the 'view' subject flag if the RBAC system is enabled
++      */
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      task = find_task_by_vpid(pid);
++      if (task) {
++#ifdef CONFIG_GRKERNSEC_CHROOT
++              if (proc_is_chrooted(task))
++                      ret = -EACCES;
++#endif
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++              cred = __task_cred(task);
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++              if (gr_is_global_nonroot(cred->uid))
++                      ret = -EACCES;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++              if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
++                      ret = -EACCES;
++#endif
++#endif
++              if (gr_status & GR_READY) {
++                      if (!(task->acl->mode & GR_VIEW))
++                              ret = -EACCES;
++              }
++      } else
++              ret = -ENOENT;
++
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      return ret;
++}
++#endif
++
++/* AUXV entries are filled via a descendant of search_binary_handler
++   after we've already applied the subject for the target
++*/
++int gr_acl_enable_at_secure(void)
++{
++      if (unlikely(!(gr_status & GR_READY)))
++              return 0;
++
++      if (current->acl->mode & GR_ATSECURE)
++              return 1;
++
++      return 0;
++}
++      
++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
++{
++      struct task_struct *task = current;
++      struct dentry *dentry = file->f_path.dentry;
++      struct vfsmount *mnt = file->f_path.mnt;
++      struct acl_object_label *obj, *tmp;
++      struct acl_subject_label *subj;
++      unsigned int bufsize;
++      int is_not_root;
++      char *path;
++      dev_t dev = __get_dev(dentry);
++
++      if (unlikely(!(gr_status & GR_READY)))
++              return 1;
++
++      if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++              return 1;
++
++      /* ignore Eric Biederman */
++      if (IS_PRIVATE(d_backing_inode(dentry)))
++              return 1;
++
++      subj = task->acl;
++      read_lock(&gr_inode_lock);
++      do {
++              obj = lookup_acl_obj_label(ino, dev, subj);
++              if (obj != NULL) {
++                      read_unlock(&gr_inode_lock);
++                      return (obj->mode & GR_FIND) ? 1 : 0;
++              }
++      } while ((subj = subj->parent_subject));
++      read_unlock(&gr_inode_lock);
++      
++      /* this is purely an optimization since we're looking for an object
++         for the directory we're doing a readdir on
++         if it's possible for any globbed object to match the entry we're
++         filling into the directory, then the object we find here will be
++         an anchor point with attached globbed objects
++      */
++      obj = chk_obj_label_noglob(dentry, mnt, task->acl);
++      if (obj->globbed == NULL)
++              return (obj->mode & GR_FIND) ? 1 : 0;
++
++      is_not_root = ((obj->filename[0] == '/') &&
++                 (obj->filename[1] == '\0')) ? 0 : 1;
++      bufsize = PAGE_SIZE - namelen - is_not_root;
++
++      /* check bufsize > PAGE_SIZE || bufsize == 0 */
++      if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
++              return 1;
++
++      preempt_disable();
++      path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++                         bufsize);
++
++      bufsize = strlen(path);
++
++      /* if base is "/", don't append an additional slash */
++      if (is_not_root)
++              *(path + bufsize) = '/';
++      memcpy(path + bufsize + is_not_root, name, namelen);
++      *(path + bufsize + namelen + is_not_root) = '\0';
++
++      tmp = obj->globbed;
++      while (tmp) {
++              if (!glob_match(tmp->filename, path)) {
++                      preempt_enable();
++                      return (tmp->mode & GR_FIND) ? 1 : 0;
++              }
++              tmp = tmp->next;
++      }
++      preempt_enable();
++      return (obj->mode & GR_FIND) ? 1 : 0;
++}
++
++void gr_put_exec_file(struct task_struct *task)
++{
++      struct file *filp;  
++
++      write_lock(&grsec_exec_file_lock);
++      filp = task->exec_file;   
++      task->exec_file = NULL;
++      write_unlock(&grsec_exec_file_lock);
++
++      if (filp)
++              fput(filp);
++
++      return;
++}
++
++
++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
++EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
++#endif
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL_GPL(gr_check_user_change);
++EXPORT_SYMBOL_GPL(gr_check_group_change);
++#endif
++
+diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
+new file mode 100644
+index 0000000..9adc75c
+--- /dev/null
++++ b/grsecurity/gracl_alloc.c
+@@ -0,0 +1,105 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++
++static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
++struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
++
++static int
++alloc_pop(void)
++{
++      if (current_alloc_state->alloc_stack_next == 1)
++              return 0;
++
++      kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
++
++      current_alloc_state->alloc_stack_next--;
++
++      return 1;
++}
++
++static int
++alloc_push(void *buf)
++{
++      if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
++              return 1;
++
++      current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
++
++      current_alloc_state->alloc_stack_next++;
++
++      return 0;
++}
++
++void *
++acl_alloc(unsigned long len)
++{
++      void *ret = NULL;
++
++      if (!len || len > PAGE_SIZE)
++              goto out;
++
++      ret = kmalloc(len, GFP_KERNEL);
++
++      if (ret) {
++              if (alloc_push(ret)) {
++                      kfree(ret);
++                      ret = NULL;
++              }
++      }
++
++out:
++      return ret;
++}
++
++void *
++acl_alloc_num(unsigned long num, unsigned long len)
++{
++      if (!len || (num > (PAGE_SIZE / len)))
++              return NULL;
++
++      return acl_alloc(num * len);
++}
++
++void
++acl_free_all(void)
++{
++      if (!current_alloc_state->alloc_stack)
++              return;
++
++      while (alloc_pop()) ;
++
++      if (current_alloc_state->alloc_stack) {
++              if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
++                      kfree(current_alloc_state->alloc_stack);
++              else
++                      vfree(current_alloc_state->alloc_stack);
++      }
++
++      current_alloc_state->alloc_stack = NULL;
++      current_alloc_state->alloc_stack_size = 1;
++      current_alloc_state->alloc_stack_next = 1;
++
++      return;
++}
++
++int
++acl_alloc_stack_init(unsigned long size)
++{
++      if ((size * sizeof (void *)) <= PAGE_SIZE)
++              current_alloc_state->alloc_stack =
++                  (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
++      else
++              current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
++
++      current_alloc_state->alloc_stack_size = size;
++      current_alloc_state->alloc_stack_next = 1;
++
++      if (!current_alloc_state->alloc_stack)
++              return 0;
++      else
++              return 1;
++}
+diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
+new file mode 100644
+index 0000000..8747091
+--- /dev/null
++++ b/grsecurity/gracl_cap.c
+@@ -0,0 +1,96 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern const char *captab_log[];
++extern int captab_log_entries;
++
++int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap, bool log)
++{
++      struct acl_subject_label *curracl;
++
++      if (!gr_acl_is_enabled())
++              return 1;
++
++      curracl = task->acl;
++
++      if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++              if (log)
++                      security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++                             task->role->roletype, GR_GLOBAL_UID(cred->uid),
++                             GR_GLOBAL_GID(cred->gid), task->exec_file ?
++                             gr_to_filename(task->exec_file->f_path.dentry,
++                             task->exec_file->f_path.mnt) : curracl->filename,
++                             curracl->filename, 0UL,
++                             0UL, "", (unsigned long) cap, &task->signal->saved_ip);
++              return 1;
++      }
++
++      return 0;
++}
++
++int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap, bool log)
++{
++      struct acl_subject_label *curracl;
++      kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++      kernel_cap_t cap_audit = __cap_empty_set;
++
++      if (!gr_acl_is_enabled())
++              return 1;
++
++      curracl = task->acl;
++
++      cap_drop = curracl->cap_lower;
++      cap_mask = curracl->cap_mask;
++      cap_audit = curracl->cap_invert_audit;
++
++      while ((curracl = curracl->parent_subject)) {
++              /* if the cap isn't specified in the current computed mask but is specified in the
++                 current level subject, and is lowered in the current level subject, then add
++                 it to the set of dropped capabilities
++                 otherwise, add the current level subject's mask to the current computed mask
++               */
++              if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++                      cap_raise(cap_mask, cap);
++                      if (cap_raised(curracl->cap_lower, cap))
++                              cap_raise(cap_drop, cap);
++                      if (cap_raised(curracl->cap_invert_audit, cap))
++                              cap_raise(cap_audit, cap);
++              }
++      }
++
++      if (!cap_raised(cap_drop, cap)) {
++              if (log && cap_raised(cap_audit, cap))
++                      gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
++              return 1;
++      }
++
++      /* only learn the capability use if the process has the capability in the
++         general case, the two uses in sys.c of gr_learn_cap are an exception
++         to this rule to ensure any role transition involves what the full-learned
++         policy believes in a privileged process
++      */
++      if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap, log))
++              return 1;
++
++      if (log && (cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
++              gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
++
++      return 0;
++}
++
++int
++gr_acl_is_capable(const int cap)
++{
++      return gr_task_acl_is_capable(current, current_cred(), cap, true);
++}
++
++int
++gr_acl_is_capable_nolog(const int cap)
++{
++      return gr_task_acl_is_capable(current, current_cred(), cap, false);
++}
++
+diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
+new file mode 100644
+index 0000000..a43dd06
+--- /dev/null
++++ b/grsecurity/gracl_compat.c
+@@ -0,0 +1,269 @@
++#include <linux/kernel.h>
++#include <linux/gracl.h>
++#include <linux/compat.h>
++#include <linux/gracl_compat.h>
++
++#include <asm/uaccess.h>
++
++int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
++{
++      struct gr_arg_wrapper_compat uwrapcompat;
++
++        if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
++                return -EFAULT;
++
++        if ((uwrapcompat.version != GRSECURITY_VERSION) ||
++          (uwrapcompat.size != sizeof(struct gr_arg_compat)))  
++                return -EINVAL;
++
++      uwrap->arg = compat_ptr(uwrapcompat.arg);
++      uwrap->version = uwrapcompat.version;
++      uwrap->size = sizeof(struct gr_arg);
++
++        return 0;
++}
++
++int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++      struct gr_arg_compat argcompat;
++
++        if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
++                return -EFAULT;
++
++      arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
++      arg->role_db.num_pointers = argcompat.role_db.num_pointers;
++      arg->role_db.num_roles = argcompat.role_db.num_roles;
++      arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
++      arg->role_db.num_subjects = argcompat.role_db.num_subjects;
++      arg->role_db.num_objects = argcompat.role_db.num_objects;
++
++      memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
++      memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
++      memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
++      memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
++      arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
++      arg->segv_device = argcompat.segv_device;
++      arg->segv_inode = argcompat.segv_inode;
++      arg->segv_uid = argcompat.segv_uid;
++      arg->num_sprole_pws = argcompat.num_sprole_pws;
++      arg->mode = argcompat.mode;
++
++      return 0;
++}
++
++int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++      struct acl_object_label_compat objcompat;
++
++      if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
++                return -EFAULT;
++
++      obj->filename = compat_ptr(objcompat.filename);
++      obj->inode = objcompat.inode;
++      obj->device = objcompat.device;
++      obj->mode = objcompat.mode;
++
++      obj->nested = compat_ptr(objcompat.nested);
++      obj->globbed = compat_ptr(objcompat.globbed);
++
++      obj->prev = compat_ptr(objcompat.prev);
++      obj->next = compat_ptr(objcompat.next);
++
++      return 0;
++}
++
++int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++      unsigned int i;
++      struct acl_subject_label_compat subjcompat;
++
++      if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
++                return -EFAULT;
++
++      subj->filename = compat_ptr(subjcompat.filename);
++      subj->inode = subjcompat.inode;
++      subj->device = subjcompat.device;
++      subj->mode = subjcompat.mode;
++      subj->cap_mask = subjcompat.cap_mask;
++      subj->cap_lower = subjcompat.cap_lower;
++      subj->cap_invert_audit = subjcompat.cap_invert_audit;
++
++      for (i = 0; i < GR_NLIMITS; i++) {
++              if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
++                      subj->res[i].rlim_cur = RLIM_INFINITY;
++              else
++                      subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
++              if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
++                      subj->res[i].rlim_max = RLIM_INFINITY;
++              else
++                      subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
++      }
++      subj->resmask = subjcompat.resmask;
++
++      subj->user_trans_type = subjcompat.user_trans_type;
++      subj->group_trans_type = subjcompat.group_trans_type;
++      subj->user_transitions = compat_ptr(subjcompat.user_transitions);
++      subj->group_transitions = compat_ptr(subjcompat.group_transitions);
++      subj->user_trans_num = subjcompat.user_trans_num;
++      subj->group_trans_num = subjcompat.group_trans_num;
++
++      memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
++      memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
++      subj->ip_type = subjcompat.ip_type;
++      subj->ips = compat_ptr(subjcompat.ips);
++      subj->ip_num = subjcompat.ip_num;
++      subj->inaddr_any_override = subjcompat.inaddr_any_override;
++
++      subj->crashes = subjcompat.crashes;
++      subj->expires = subjcompat.expires;
++
++      subj->parent_subject = compat_ptr(subjcompat.parent_subject);
++      subj->hash = compat_ptr(subjcompat.hash);
++      subj->prev = compat_ptr(subjcompat.prev);
++      subj->next = compat_ptr(subjcompat.next);
++
++      subj->obj_hash = compat_ptr(subjcompat.obj_hash);
++      subj->obj_hash_size = subjcompat.obj_hash_size;
++      subj->pax_flags = subjcompat.pax_flags;
++
++      return 0;
++}
++
++int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++      struct acl_role_label_compat rolecompat;
++
++      if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
++                return -EFAULT;
++
++      role->rolename = compat_ptr(rolecompat.rolename);
++      role->uidgid = rolecompat.uidgid;
++      role->roletype = rolecompat.roletype;
++
++      role->auth_attempts = rolecompat.auth_attempts;
++      role->expires = rolecompat.expires;
++
++      role->root_label = compat_ptr(rolecompat.root_label);
++      role->hash = compat_ptr(rolecompat.hash);
++
++      role->prev = compat_ptr(rolecompat.prev);
++      role->next = compat_ptr(rolecompat.next);
++
++      role->transitions = compat_ptr(rolecompat.transitions);
++      role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
++      role->domain_children = compat_ptr(rolecompat.domain_children);
++      role->domain_child_num = rolecompat.domain_child_num;
++
++      role->umask = rolecompat.umask;
++
++      role->subj_hash = compat_ptr(rolecompat.subj_hash);
++      role->subj_hash_size = rolecompat.subj_hash_size;
++
++      return 0;
++}
++
++int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++      struct role_allowed_ip_compat roleip_compat;
++
++      if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
++                return -EFAULT;
++
++      roleip->addr = roleip_compat.addr;
++      roleip->netmask = roleip_compat.netmask;
++
++      roleip->prev = compat_ptr(roleip_compat.prev);
++      roleip->next = compat_ptr(roleip_compat.next);
++
++      return 0;
++}
++
++int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
++{
++      struct role_transition_compat trans_compat;
++
++      if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
++                return -EFAULT;
++
++      trans->rolename = compat_ptr(trans_compat.rolename);
++
++      trans->prev = compat_ptr(trans_compat.prev);
++      trans->next = compat_ptr(trans_compat.next);
++
++      return 0;
++
++}
++
++int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++      struct gr_hash_struct_compat hash_compat;
++
++      if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
++                return -EFAULT;
++
++      hash->table = compat_ptr(hash_compat.table);
++      hash->nametable = compat_ptr(hash_compat.nametable);
++      hash->first = compat_ptr(hash_compat.first);
++
++      hash->table_size = hash_compat.table_size;
++      hash->used_size = hash_compat.used_size;
++
++      hash->type = hash_compat.type;
++
++      return 0;
++}
++
++int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
++{
++      compat_uptr_t ptrcompat;
++
++      if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
++                return -EFAULT;
++
++      *(void **)ptr = compat_ptr(ptrcompat);
++
++      return 0;
++}
++
++int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++      struct acl_ip_label_compat ip_compat;
++
++      if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
++                return -EFAULT;
++
++      ip->iface = compat_ptr(ip_compat.iface);
++      ip->addr = ip_compat.addr;
++      ip->netmask = ip_compat.netmask;
++      ip->low = ip_compat.low;
++      ip->high = ip_compat.high;
++      ip->mode = ip_compat.mode;
++      ip->type = ip_compat.type;
++
++      memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
++
++      ip->prev = compat_ptr(ip_compat.prev);
++      ip->next = compat_ptr(ip_compat.next);
++
++      return 0;
++}
++
++int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++      struct sprole_pw_compat pw_compat;
++
++      if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
++                return -EFAULT;
++
++      pw->rolename = compat_ptr(pw_compat.rolename);
++      memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
++      memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
++
++      return 0;
++}
++
++size_t get_gr_arg_wrapper_size_compat(void)
++{
++      return sizeof(struct gr_arg_wrapper_compat);
++}
++
+diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
+new file mode 100644
+index 0000000..fce7f71
+--- /dev/null
++++ b/grsecurity/gracl_fs.c
+@@ -0,0 +1,448 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/stat.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++umode_t
++gr_acl_umask(void)
++{
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      return current->role->umask;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++                        const struct vfsmount * mnt)
++{
++      __u32 mode;
++
++      if (unlikely(d_is_negative(dentry)))
++              return GR_FIND;
++
++      mode =
++          gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
++
++      if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++              return mode;
++      } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++              return 0;
++      } else if (unlikely(!(mode & GR_FIND)))
++              return 0;
++
++      return GR_FIND;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++                 int acc_mode)
++{
++      __u32 reqmode = GR_FIND;
++      __u32 mode;
++
++      if (unlikely(d_is_negative(dentry)))
++              return reqmode;
++
++      if (acc_mode & MAY_APPEND)
++              reqmode |= GR_APPEND;
++      else if (acc_mode & MAY_WRITE)
++              reqmode |= GR_WRITE;
++      if ((acc_mode & MAY_READ) && !d_is_dir(dentry))
++              reqmode |= GR_READ;
++
++      mode =
++          gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++                         mnt);
++
++      if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return reqmode;
++      } else
++          if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++      {
++              gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return 0;
++      } else if (unlikely((mode & reqmode) != reqmode))
++              return 0;
++
++      return reqmode;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++                  const struct dentry * p_dentry,
++                  const struct vfsmount * p_mnt, int open_flags, int acc_mode,
++                  const int imode)
++{
++      __u32 reqmode = GR_WRITE | GR_CREATE;
++      __u32 mode;
++
++      if (acc_mode & MAY_APPEND)
++              reqmode |= GR_APPEND;
++      // if a directory was required or the directory already exists, then
++      // don't count this open as a read
++      if ((acc_mode & MAY_READ) &&
++          !((open_flags & O_DIRECTORY) || d_is_dir(dentry)))
++              reqmode |= GR_READ;
++      if ((open_flags & O_CREAT) &&
++          ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
++              reqmode |= GR_SETID;
++
++      mode =
++          gr_check_create(dentry, p_dentry, p_mnt,
++                          reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++      if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return reqmode;
++      } else
++          if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++      {
++              gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : reqmode &
++                             GR_APPEND ? " appending" : "");
++              return 0;
++      } else if (unlikely((mode & reqmode) != reqmode))
++              return 0;
++
++      return reqmode;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
++                   const int fmode)
++{
++      __u32 mode, reqmode = GR_FIND;
++
++      if ((fmode & S_IXOTH) && !d_is_dir(dentry))
++              reqmode |= GR_EXEC;
++      if (fmode & S_IWOTH)
++              reqmode |= GR_WRITE;
++      if (fmode & S_IROTH)
++              reqmode |= GR_READ;
++
++      mode =
++          gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++                         mnt);
++
++      if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : "",
++                             reqmode & GR_EXEC ? " executing" : "");
++              return reqmode;
++      } else
++          if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++      {
++              gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++                             reqmode & GR_READ ? " reading" : "",
++                             reqmode & GR_WRITE ? " writing" : "",
++                             reqmode & GR_EXEC ? " executing" : "");
++              return 0;
++      } else if (unlikely((mode & reqmode) != reqmode))
++              return 0;
++
++      return reqmode;
++}
++
++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
++{
++      __u32 mode;
++
++      mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
++
++      if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
++              return mode;
++      } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
++              return 0;
++      } else if (unlikely((mode & (reqmode)) != (reqmode)))
++              return 0;
++
++      return (reqmode);
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
++                   umode_t *modeptr)
++{
++      umode_t mode;
++      struct inode *inode = d_backing_inode(dentry);
++
++      *modeptr &= ~gr_acl_umask();
++      mode = *modeptr;
++
++      if (unlikely(inode && S_ISSOCK(inode->i_mode)))
++              return 1;
++
++      if (unlikely(!d_is_dir(dentry) &&
++                   ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
++              return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++                                 GR_CHMOD_ACL_MSG);
++      } else {
++              return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
++      }
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
++                         GR_UNIXCONNECT_ACL_MSG);
++}
++
++/* hardlinks require at minimum create and link permission,
++   any additional privilege required is based on the
++   privilege of the file being linked to
++*/
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++                 const struct dentry * parent_dentry,
++                 const struct vfsmount * parent_mnt,
++                 const struct dentry * old_dentry,
++                 const struct vfsmount * old_mnt, const struct filename *to)
++{
++      __u32 mode;
++      __u32 needmode = GR_CREATE | GR_LINK;
++      __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
++
++      mode =
++          gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
++                        old_mnt);
++
++      if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
++              gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
++              return mode;
++      } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
++              return 0;
++      } else if (unlikely((mode & needmode) != needmode))
++              return 0;
++
++      return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++                    const struct dentry * parent_dentry,
++                    const struct vfsmount * parent_mnt, const struct filename *from)
++{
++      __u32 needmode = GR_WRITE | GR_CREATE;
++      __u32 mode;
++
++      mode =
++          gr_check_create(new_dentry, parent_dentry, parent_mnt,
++                          GR_CREATE | GR_AUDIT_CREATE |
++                          GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
++
++      if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
++              gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
++              return mode;
++      } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
++              return 0;
++      } else if (unlikely((mode & needmode) != needmode))
++              return 0;
++
++      return (GR_WRITE | GR_CREATE);
++}
++
++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
++{
++      __u32 mode;
++
++      mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++      if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++              gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
++              return mode;
++      } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++              gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
++              return 0;
++      } else if (unlikely((mode & (reqmode)) != (reqmode)))
++              return 0;
++
++      return (reqmode);
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++                  const struct dentry * parent_dentry,
++                  const struct vfsmount * parent_mnt,
++                  const int mode)
++{
++      __u32 reqmode = GR_WRITE | GR_CREATE;
++      if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
++              reqmode |= GR_SETID;
++
++      return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++                                reqmode, GR_MKNOD_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry *new_dentry,
++                  const struct dentry *parent_dentry,
++                  const struct vfsmount *parent_mnt)
++{
++      return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++                                GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
++}
++
++#define RENAME_CHECK_SUCCESS(old, new) \
++      (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
++       ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
++
++int
++gr_acl_handle_rename(struct dentry *new_dentry,
++                   struct dentry *parent_dentry,
++                   const struct vfsmount *parent_mnt,
++                   struct dentry *old_dentry,
++                   struct inode *old_parent_inode,
++                   struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
++{
++      __u32 comp1, comp2;
++      int error = 0;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      if (flags & RENAME_EXCHANGE) {
++              comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++                                     GR_AUDIT_READ | GR_AUDIT_WRITE |
++                                     GR_SUPPRESS, parent_mnt);
++              comp2 =
++                  gr_search_file(old_dentry,
++                                 GR_READ | GR_WRITE | GR_AUDIT_READ |
++                                 GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++      } else if (d_is_negative(new_dentry)) {
++              comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
++                                      GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
++                                      GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
++              comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
++                                     GR_DELETE | GR_AUDIT_DELETE |
++                                     GR_AUDIT_READ | GR_AUDIT_WRITE |
++                                     GR_SUPPRESS, old_mnt);
++      } else {
++              comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++                                     GR_CREATE | GR_DELETE |
++                                     GR_AUDIT_CREATE | GR_AUDIT_DELETE |
++                                     GR_AUDIT_READ | GR_AUDIT_WRITE |
++                                     GR_SUPPRESS, parent_mnt);
++              comp2 =
++                  gr_search_file(old_dentry,
++                                 GR_READ | GR_WRITE | GR_AUDIT_READ |
++                                 GR_DELETE | GR_AUDIT_DELETE |
++                                 GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++      }
++
++      if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
++          ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
++              gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
++      else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
++               && !(comp2 & GR_SUPPRESS)) {
++              gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
++              error = -EACCES;
++      } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
++              error = -EACCES;
++
++      return error;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++      u16 id;
++      char *rolename;
++
++      if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
++          !(current->role->roletype & GR_ROLE_PERSIST))) {
++              id = current->acl_role_id;
++              rolename = current->role->rolename;
++              gr_set_acls(1);
++              gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
++      }
++
++      gr_put_exec_file(current);
++      return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
++          !(current->acl->mode & GR_POVERRIDE) &&
++          !(current->role->roletype & GR_ROLE_GOD))
++              return -EACCES;
++
++      return 0;
++}
+diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
+new file mode 100644
+index 0000000..5da5304
+--- /dev/null
++++ b/grsecurity/gracl_ip.c
+@@ -0,0 +1,387 @@
++#include <linux/kernel.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#define GR_BIND                       0x01
++#define GR_CONNECT            0x02
++#define GR_INVERT             0x04
++#define GR_BINDOVERRIDE               0x08
++#define GR_CONNECTOVERRIDE    0x10
++#define GR_SOCK_FAMILY                0x20
++
++static const char * gr_protocols[IPPROTO_MAX] = {
++      "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
++      "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
++      "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
++      "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
++      "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
++      "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
++      "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
++      "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
++      "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
++      "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", 
++      "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", 
++      "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
++      "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
++      "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
++      "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
++      "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
++      "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
++      "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
++      "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
++      "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
++      "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
++      "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
++      "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
++      "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
++      "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
++      "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
++      "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
++      "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
++      "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
++      "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
++      "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
++      "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
++      };
++
++static const char * gr_socktypes[SOCK_MAX] = {
++      "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", 
++      "unknown:7", "unknown:8", "unknown:9", "packet"
++      };
++
++static const char * gr_sockfamilies[AF_MAX] = {
++      "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
++      "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
++      "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
++      "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf", "alg",
++      "nfc", "vsock", "kcm", "qipcrtr"
++      };
++
++const char *
++gr_proto_to_name(unsigned char proto)
++{
++      return gr_protocols[proto];
++}
++
++const char *
++gr_socktype_to_name(unsigned char type)
++{
++      return gr_socktypes[type];
++}
++
++const char *
++gr_sockfamily_to_name(unsigned char family)
++{
++      return gr_sockfamilies[family];
++}
++
++extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
++
++int
++gr_search_socket(const int domain, const int type, const int protocol)
++{
++      struct acl_subject_label *curr;
++      const struct cred *cred = current_cred();
++
++      if (unlikely(!gr_acl_is_enabled()))
++              goto exit;
++
++      if ((domain < 0) || (type < 0) || (protocol < 0) ||
++          (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
++              goto exit;      // let the kernel handle it
++
++      curr = current->acl;
++
++      if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
++              /* the family is allowed, if this is PF_INET allow it only if
++                 the extra sock type/protocol checks pass */
++              if (domain == PF_INET)
++                      goto inet_check;
++              goto exit;
++      } else {
++              if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++                      __u32 fakeip = 0;
++                      security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                                     current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                                     GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                                     gr_to_filename(current->exec_file->f_path.dentry,
++                                     current->exec_file->f_path.mnt) :
++                                     curr->filename, curr->filename,
++                                     &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
++                                     &current->signal->saved_ip);
++                      goto exit;
++              }
++              goto exit_fail;
++      }
++
++inet_check:
++      /* the rest of this checking is for IPv4 only */
++      if (!curr->ips)
++              goto exit;
++
++      if ((curr->ip_type & (1U << type)) &&
++          (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
++              goto exit;
++
++      if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++              /* we don't place acls on raw sockets , and sometimes
++                 dgram/ip sockets are opened for ioctl and not
++                 bind/connect, so we'll fake a bind learn log */
++              if (type == SOCK_RAW || type == SOCK_PACKET) {
++                      __u32 fakeip = 0;
++                      security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                                     current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                                     GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                                     gr_to_filename(current->exec_file->f_path.dentry,
++                                     current->exec_file->f_path.mnt) :
++                                     curr->filename, curr->filename,
++                                     &fakeip, 0, type,
++                                     protocol, GR_CONNECT, &current->signal->saved_ip);
++              } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
++                      __u32 fakeip = 0;
++                      security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                                     current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                                     GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                                     gr_to_filename(current->exec_file->f_path.dentry,
++                                     current->exec_file->f_path.mnt) :
++                                     curr->filename, curr->filename,
++                                     &fakeip, 0, type,
++                                     protocol, GR_BIND, &current->signal->saved_ip);
++              }
++              /* we'll log when they use connect or bind */
++              goto exit;
++      }
++
++exit_fail:
++      if (domain == PF_INET)
++              gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), 
++                          gr_socktype_to_name(type), gr_proto_to_name(protocol));
++      else if (rcu_access_pointer(net_families[domain]) != NULL)
++              gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), 
++                          gr_socktype_to_name(type), protocol);
++
++      return 0;
++exit:
++      return 1;
++}
++
++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
++{
++      if ((ip->mode & mode) &&
++          (ip_port >= ip->low) &&
++          (ip_port <= ip->high) &&
++          ((ntohl(ip_addr) & our_netmask) ==
++           (ntohl(our_addr) & our_netmask))
++          && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
++          && (ip->type & (1U << type))) {
++              if (ip->mode & GR_INVERT)
++                      return 2; // specifically denied
++              else
++                      return 1; // allowed
++      }
++
++      return 0; // not specifically allowed, may continue parsing
++}
++
++static int
++gr_search_connectbind(const int full_mode, struct sock *sk,
++                    struct sockaddr_in *addr, const int type)
++{
++      char iface[IFNAMSIZ] = {0};
++      struct acl_subject_label *curr;
++      struct acl_ip_label *ip;
++      struct inet_sock *isk;
++      struct net_device *dev;
++      struct in_device *idev;
++      unsigned long i;
++      int ret;
++      int mode = full_mode & (GR_BIND | GR_CONNECT);
++      __u32 ip_addr = 0;
++      __u32 our_addr;
++      __u32 our_netmask;
++      char *p;
++      __u16 ip_port = 0;
++      const struct cred *cred = current_cred();
++
++      if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
++              return 0;
++
++      curr = current->acl;
++      isk = inet_sk(sk);
++
++      /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
++      if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
++              addr->sin_addr.s_addr = curr->inaddr_any_override;
++      if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
++              struct sockaddr_in saddr;
++              int err;
++
++              saddr.sin_family = AF_INET;
++              saddr.sin_addr.s_addr = curr->inaddr_any_override;
++              saddr.sin_port = isk->inet_sport;
++
++              err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++              if (err)
++                      return err;
++
++              err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++              if (err)
++                      return err;
++      }
++
++      if (!curr->ips)
++              return 0;
++
++      ip_addr = addr->sin_addr.s_addr;
++      ip_port = ntohs(addr->sin_port);
++
++      if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++              security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++                             current->role->roletype, GR_GLOBAL_UID(cred->uid),
++                             GR_GLOBAL_GID(cred->gid), current->exec_file ?
++                             gr_to_filename(current->exec_file->f_path.dentry,
++                             current->exec_file->f_path.mnt) :
++                             curr->filename, curr->filename,
++                             &ip_addr, ip_port, type,
++                             sk->sk_protocol, mode, &current->signal->saved_ip);
++              return 0;
++      }
++
++      for (i = 0; i < curr->ip_num; i++) {
++              ip = *(curr->ips + i);
++              if (ip->iface != NULL) {
++                      strncpy(iface, ip->iface, IFNAMSIZ - 1);
++                      p = strchr(iface, ':');
++                      if (p != NULL)
++                              *p = '\0';
++                      dev = dev_get_by_name(sock_net(sk), iface);
++                      if (dev == NULL)
++                              continue;
++                      idev = in_dev_get(dev);
++                      if (idev == NULL) {
++                              dev_put(dev);
++                              continue;
++                      }
++                      rcu_read_lock();
++                      for_ifa(idev) {
++                              if (!strcmp(ip->iface, ifa->ifa_label)) {
++                                      our_addr = ifa->ifa_address;
++                                      our_netmask = 0xffffffff;
++                                      ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++                                      if (ret == 1) {
++                                              rcu_read_unlock();
++                                              in_dev_put(idev);
++                                              dev_put(dev);
++                                              return 0;
++                                      } else if (ret == 2) {
++                                              rcu_read_unlock();
++                                              in_dev_put(idev);
++                                              dev_put(dev);
++                                              goto denied;
++                                      }
++                              }
++                      } endfor_ifa(idev);
++                      rcu_read_unlock();
++                      in_dev_put(idev);
++                      dev_put(dev);
++              } else {
++                      our_addr = ip->addr;
++                      our_netmask = ip->netmask;
++                      ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++                      if (ret == 1)
++                              return 0;
++                      else if (ret == 2)
++                              goto denied;
++              }
++      }
++
++denied:
++      if (mode == GR_BIND)
++              gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++      else if (mode == GR_CONNECT)
++              gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++
++      return -EACCES;
++}
++
++int
++gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
++{
++      /* always allow disconnection of dgram sockets with connect */
++      if (addr->sin_family == AF_UNSPEC)
++              return 0;
++      return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
++{
++      return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int gr_search_listen(struct socket *sock)
++{
++      struct sock *sk = sock->sk;
++      struct sockaddr_in addr;
++
++      addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++      addr.sin_port = inet_sk(sk)->inet_sport;
++
++      return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int gr_search_accept(struct socket *sock)
++{
++      struct sock *sk = sock->sk;
++      struct sockaddr_in addr;
++
++      addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++      addr.sin_port = inet_sk(sk)->inet_sport;
++
++      return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int
++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
++{
++      if (addr)
++              return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
++      else {
++              struct sockaddr_in sin;
++              const struct inet_sock *inet = inet_sk(sk);
++
++              sin.sin_addr.s_addr = inet->inet_daddr;
++              sin.sin_port = inet->inet_dport;
++
++              return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++      }
++}
++
++int
++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
++{
++      struct sockaddr_in sin;
++
++      if (unlikely(skb->len < sizeof (struct udphdr)))
++              return 0;       // skip this packet
++
++      sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
++      sin.sin_port = udp_hdr(skb)->source;
++
++      return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++}
+diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
+new file mode 100644
+index 0000000..c5abda5
+--- /dev/null
++++ b/grsecurity/gracl_learn.c
+@@ -0,0 +1,209 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/poll.h>
++#include <linux/string.h>
++#include <linux/file.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/grinternal.h>
++
++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
++                                 size_t count, loff_t *ppos);
++extern int gr_acl_is_enabled(void);
++
++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
++static int gr_learn_attached;
++
++/* use a 512k buffer */
++#define LEARN_BUFFER_SIZE (512 * 1024)
++
++static DEFINE_SPINLOCK(gr_learn_lock);
++static DEFINE_MUTEX(gr_learn_user_mutex);
++
++/* we need to maintain two buffers, so that the kernel context of grlearn
++   uses a semaphore around the userspace copying, and the other kernel contexts
++   use a spinlock when copying into the buffer, since they cannot sleep
++*/
++static char *learn_buffer;
++static char *learn_buffer_user;
++static int learn_buffer_len;
++static int learn_buffer_user_len;
++
++static ssize_t
++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
++{
++      DECLARE_WAITQUEUE(wait, current);
++      ssize_t retval = 0;
++
++      add_wait_queue(&learn_wait, &wait);
++      do {
++              mutex_lock(&gr_learn_user_mutex);
++              set_current_state(TASK_INTERRUPTIBLE);
++              spin_lock(&gr_learn_lock);
++              if (learn_buffer_len) {
++                      set_current_state(TASK_RUNNING);
++                      break;
++              }
++              spin_unlock(&gr_learn_lock);
++              mutex_unlock(&gr_learn_user_mutex);
++              if (file->f_flags & O_NONBLOCK) {
++                      retval = -EAGAIN;
++                      goto out;
++              }
++              if (signal_pending(current)) {
++                      retval = -ERESTARTSYS;
++                      goto out;
++              }
++
++              schedule();
++      } while (1);
++
++      memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
++      learn_buffer_user_len = learn_buffer_len;
++      retval = learn_buffer_len;
++      learn_buffer_len = 0;
++
++      spin_unlock(&gr_learn_lock);
++
++      if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
++              retval = -EFAULT;
++
++      mutex_unlock(&gr_learn_user_mutex);
++out:
++      set_current_state(TASK_RUNNING);
++      remove_wait_queue(&learn_wait, &wait);
++      return retval;
++}
++
++static unsigned int
++poll_learn(struct file * file, poll_table * wait)
++{
++      poll_wait(file, &learn_wait, wait);
++
++      if (learn_buffer_len)
++              return (POLLIN | POLLRDNORM);
++
++      return 0;
++}
++
++void
++gr_clear_learn_entries(void)
++{
++      char *tmp;
++
++      mutex_lock(&gr_learn_user_mutex);
++      spin_lock(&gr_learn_lock);
++      tmp = learn_buffer;
++      learn_buffer = NULL;
++      spin_unlock(&gr_learn_lock);
++      if (tmp)
++              vfree(tmp);
++      if (learn_buffer_user != NULL) {
++              vfree(learn_buffer_user);
++              learn_buffer_user = NULL;
++      }
++      learn_buffer_len = 0;
++      mutex_unlock(&gr_learn_user_mutex);
++
++      return;
++}
++
++void
++gr_add_learn_entry(const char *fmt, ...)
++{
++      va_list args;
++      unsigned int len;
++
++      if (!gr_learn_attached)
++              return;
++
++      spin_lock(&gr_learn_lock);
++
++      /* leave a gap at the end so we know when it's "full" but don't have to
++         compute the exact length of the string we're trying to append
++      */
++      if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
++              spin_unlock(&gr_learn_lock);
++              wake_up_interruptible(&learn_wait);
++              return;
++      }
++      if (learn_buffer == NULL) {
++              spin_unlock(&gr_learn_lock);
++              return;
++      }
++
++      va_start(args, fmt);
++      len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
++      va_end(args);
++
++      learn_buffer_len += len + 1;
++
++      spin_unlock(&gr_learn_lock);
++      wake_up_interruptible(&learn_wait);
++
++      return;
++}
++
++static int
++open_learn(struct inode *inode, struct file *file)
++{
++      if (file->f_mode & FMODE_READ && gr_learn_attached)
++              return -EBUSY;
++      if (file->f_mode & FMODE_READ) {
++              int retval = 0;
++              mutex_lock(&gr_learn_user_mutex);
++              if (learn_buffer == NULL)
++                      learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
++              if (learn_buffer_user == NULL)
++                      learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
++              if (learn_buffer == NULL) {
++                      retval = -ENOMEM;
++                      goto out_error;
++              }
++              if (learn_buffer_user == NULL) {
++                      retval = -ENOMEM;
++                      goto out_error;
++              }
++              learn_buffer_len = 0;
++              learn_buffer_user_len = 0;
++              gr_learn_attached = 1;
++out_error:
++              mutex_unlock(&gr_learn_user_mutex);
++              return retval;
++      }
++      return 0;
++}
++
++static int
++close_learn(struct inode *inode, struct file *file)
++{
++      if (file->f_mode & FMODE_READ) {
++              char *tmp = NULL;
++              mutex_lock(&gr_learn_user_mutex);
++              spin_lock(&gr_learn_lock);
++              tmp = learn_buffer;
++              learn_buffer = NULL;
++              spin_unlock(&gr_learn_lock);
++              if (tmp)
++                      vfree(tmp);
++              if (learn_buffer_user != NULL) {
++                      vfree(learn_buffer_user);
++                      learn_buffer_user = NULL;
++              }
++              learn_buffer_len = 0;
++              learn_buffer_user_len = 0;
++              gr_learn_attached = 0;
++              mutex_unlock(&gr_learn_user_mutex);
++      }
++
++      return 0;
++}
++              
++const struct file_operations grsec_fops = {
++      .read           = read_learn,
++      .write          = write_grsec_handler,
++      .open           = open_learn,
++      .release        = close_learn,
++      .poll           = poll_learn,
++};
+diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
+new file mode 100644
+index 0000000..d943ba9
+--- /dev/null
++++ b/grsecurity/gracl_policy.c
+@@ -0,0 +1,1784 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/lglock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/sysctl.h>
++#include <linux/netdevice.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/security.h>
++#include <linux/grinternal.h>
++#include <linux/pid_namespace.h>
++#include <linux/stop_machine.h>
++#include <linux/fdtable.h>
++#include <linux/percpu.h>
++#include <linux/lglock.h>
++#include <linux/hugetlb.h>
++#include <linux/posix-timers.h>
++#include "../fs/mount.h"
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++extern struct gr_policy_state *polstate;
++
++#define FOR_EACH_ROLE_START(role) \
++      role = polstate->role_list; \
++      while (role) {
++
++#define FOR_EACH_ROLE_END(role) \
++              role = role->prev; \
++      }
++
++struct path gr_real_root;
++
++extern struct gr_alloc_state *current_alloc_state;
++
++u16 acl_sp_role_value;
++
++static DEFINE_MUTEX(gr_dev_mutex);
++
++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
++extern void gr_clear_learn_entries(void);
++
++struct gr_arg *gr_usermode __read_only;
++unsigned char *gr_system_salt __read_only;
++unsigned char *gr_system_sum __read_only;
++
++static unsigned int gr_auth_attempts = 0;
++static unsigned long gr_auth_expires = 0UL;
++
++struct acl_object_label *fakefs_obj_rw;
++struct acl_object_label *fakefs_obj_rwx;
++
++extern int gr_init_uidset(void);
++extern void gr_free_uidset(void);
++extern int gr_find_and_remove_uid(uid_t uid);
++
++extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
++extern void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
++extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
++extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
++extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
++extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
++extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
++extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
++extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
++extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
++extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
++extern void assign_special_role(const char *rolename);
++extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
++extern int gr_rbac_disable(void *unused);
++extern void gr_enable_rbac_system(void);
++
++static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++      if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++      if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++      if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++      if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++      if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++      if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++      if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
++{
++      if (copy_from_user(trans, userp, sizeof(struct role_transition)))
++              return -EFAULT;
++
++      return 0;
++}
++
++int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
++{
++      if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
++{
++      if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
++              return -EFAULT;
++
++      if ((uwrap->version != GRSECURITY_VERSION) ||
++          (uwrap->size != sizeof(struct gr_arg)))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++      if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static size_t get_gr_arg_wrapper_size_normal(void)
++{
++      return sizeof(struct gr_arg_wrapper);
++}
++
++#ifdef CONFIG_COMPAT
++extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
++extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
++extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
++extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
++extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
++extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
++extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
++extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
++extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
++extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
++extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
++extern size_t get_gr_arg_wrapper_size_compat(void);
++
++int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
++int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
++int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
++int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
++int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
++int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
++int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
++int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
++int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
++int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
++int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
++size_t (* get_gr_arg_wrapper_size)(void) __read_only;
++
++#else
++#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
++#define copy_gr_arg copy_gr_arg_normal
++#define copy_gr_hash_struct copy_gr_hash_struct_normal
++#define copy_acl_object_label copy_acl_object_label_normal
++#define copy_acl_subject_label copy_acl_subject_label_normal
++#define copy_acl_role_label copy_acl_role_label_normal
++#define copy_acl_ip_label copy_acl_ip_label_normal
++#define copy_pointer_from_array copy_pointer_from_array_normal
++#define copy_sprole_pw copy_sprole_pw_normal
++#define copy_role_transition copy_role_transition_normal
++#define copy_role_allowed_ip copy_role_allowed_ip_normal
++#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
++#endif
++
++static struct acl_subject_label *
++lookup_subject_map(const struct acl_subject_label *userp)
++{
++      unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
++      struct subject_map *match;
++
++      match = polstate->subj_map_set.s_hash[index];
++
++      while (match && match->user != userp)
++              match = match->next;
++
++      if (match != NULL)
++              return match->kernel;
++      else
++              return NULL;
++}
++
++static void
++insert_subj_map_entry(struct subject_map *subjmap)
++{
++      unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
++      struct subject_map **curr;
++
++      subjmap->prev = NULL;
++
++      curr = &polstate->subj_map_set.s_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = subjmap;
++
++      subjmap->next = *curr;
++      *curr = subjmap;
++
++      return;
++}
++
++static void
++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
++{
++      unsigned int index =
++          gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
++      struct acl_role_label **curr;
++      struct acl_role_label *tmp, *tmp2;
++
++      curr = &polstate->acl_role_set.r_hash[index];
++
++      /* simple case, slot is empty, just set it to our role */
++      if (*curr == NULL) {
++              *curr = role;
++      } else {
++              /* example:
++                 1 -> 2 -> 3 (adding 2 -> 3 to here)
++                 2 -> 3
++              */
++              /* first check to see if we can already be reached via this slot */
++              tmp = *curr;
++              while (tmp && tmp != role)
++                      tmp = tmp->next;
++              if (tmp == role) {
++                      /* we don't need to add ourselves to this slot's chain */
++                      return;
++              }
++              /* we need to add ourselves to this chain, two cases */
++              if (role->next == NULL) {
++                      /* simple case, append the current chain to our role */
++                      role->next = *curr;
++                      *curr = role;
++              } else {
++                      /* 1 -> 2 -> 3 -> 4
++                         2 -> 3 -> 4
++                         3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
++                      */                         
++                      /* trickier case: walk our role's chain until we find
++                         the role for the start of the current slot's chain */
++                      tmp = role;
++                      tmp2 = *curr;
++                      while (tmp->next && tmp->next != tmp2)
++                              tmp = tmp->next;
++                      if (tmp->next == tmp2) {
++                              /* from example above, we found 3, so just
++                                 replace this slot's chain with ours */
++                              *curr = role;
++                      } else {
++                              /* we didn't find a subset of our role's chain
++                                 in the current slot's chain, so append their
++                                 chain to ours, and set us as the first role in
++                                 the slot's chain
++
++                                 we could fold this case with the case above,
++                                 but making it explicit for clarity
++                              */
++                              tmp->next = tmp2;
++                              *curr = role;
++                      }
++              }
++      }
++
++      return;
++}
++
++static void
++insert_acl_role_label(struct acl_role_label *role)
++{
++      int i;
++
++      if (polstate->role_list == NULL) {
++              polstate->role_list = role;
++              role->prev = NULL;
++      } else {
++              role->prev = polstate->role_list;
++              polstate->role_list = role;
++      }
++      
++      /* used for hash chains */
++      role->next = NULL;
++
++      if (role->roletype & GR_ROLE_DOMAIN) {
++              for (i = 0; i < role->domain_child_num; i++)
++                      __insert_acl_role_label(role, role->domain_children[i]);
++      } else
++              __insert_acl_role_label(role, role->uidgid);
++}
++                                      
++static int
++insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
++{
++      struct name_entry **curr, *nentry;
++      struct inodev_entry *ientry;
++      unsigned int len = strlen(name);
++      unsigned int key = full_name_hash(NULL, (const unsigned char *)name, len);
++      unsigned int index = key % polstate->name_set.n_size;
++
++      curr = &polstate->name_set.n_hash[index];
++
++      while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
++              curr = &((*curr)->next);
++
++      if (*curr != NULL)
++              return 1;
++
++      nentry = acl_alloc(sizeof (struct name_entry));
++      if (nentry == NULL)
++              return 0;
++      ientry = acl_alloc(sizeof (struct inodev_entry));
++      if (ientry == NULL)
++              return 0;
++      ientry->nentry = nentry;
++
++      nentry->key = key;
++      nentry->name = name;
++      nentry->inode = inode;
++      nentry->device = device;
++      nentry->len = len;
++      nentry->deleted = deleted;
++
++      nentry->prev = NULL;
++      curr = &polstate->name_set.n_hash[index];
++      if (*curr != NULL)
++              (*curr)->prev = nentry;
++      nentry->next = *curr;
++      *curr = nentry;
++
++      /* insert us into the table searchable by inode/dev */
++      __insert_inodev_entry(polstate, ientry);
++
++      return 1;
++}
++
++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
++
++static void *
++create_table(__u32 * len, int elementsize)
++{
++      unsigned int table_sizes[] = {
++              7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
++              32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
++              4194301, 8388593, 16777213, 33554393, 67108859
++      };
++      void *newtable = NULL;
++      unsigned int pwr = 0;
++
++      while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
++             table_sizes[pwr] <= *len)
++              pwr++;
++
++      if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
++              return newtable;
++
++      if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
++              newtable =
++                  kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
++      else
++              newtable = vmalloc(table_sizes[pwr] * elementsize);
++
++      *len = table_sizes[pwr];
++
++      return newtable;
++}
++
++static int
++init_variables(const struct gr_arg *arg, bool reload)
++{
++      struct task_struct *reaper = init_pid_ns.child_reaper;
++      unsigned int stacksize;
++
++      polstate->subj_map_set.s_size = arg->role_db.num_subjects;
++      polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
++      polstate->name_set.n_size = arg->role_db.num_objects;
++      polstate->inodev_set.i_size = arg->role_db.num_objects;
++
++      if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
++          !polstate->name_set.n_size || !polstate->inodev_set.i_size)
++              return 1;
++
++      if (!reload) {
++              if (!gr_init_uidset())
++                      return 1;
++      }
++
++      /* set up the stack that holds allocation info */
++
++      stacksize = arg->role_db.num_pointers + 5;
++
++      if (!acl_alloc_stack_init(stacksize))
++              return 1;
++
++      if (!reload) {
++              /* grab reference for the real root dentry and vfsmount */
++              get_fs_root(reaper->fs, &gr_real_root);
++      
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++      printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", gr_get_dev_from_dentry(gr_real_root.dentry), gr_get_ino_from_dentry(gr_real_root.dentry));
++#endif
++
++              fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
++              if (fakefs_obj_rw == NULL)
++                      return 1;
++              fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
++      
++              fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
++              if (fakefs_obj_rwx == NULL)
++                      return 1;
++              fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
++      }
++
++      polstate->subj_map_set.s_hash =
++          (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
++      polstate->acl_role_set.r_hash =
++          (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
++      polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
++      polstate->inodev_set.i_hash =
++          (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
++
++      if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
++          !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
++              return 1;
++
++      memset(polstate->subj_map_set.s_hash, 0,
++             sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
++      memset(polstate->acl_role_set.r_hash, 0,
++             sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
++      memset(polstate->name_set.n_hash, 0,
++             sizeof (struct name_entry *) * polstate->name_set.n_size);
++      memset(polstate->inodev_set.i_hash, 0,
++             sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
++
++      return 0;
++}
++
++/* free information not needed after startup
++   currently contains user->kernel pointer mappings for subjects
++*/
++
++static void
++free_init_variables(void)
++{
++      __u32 i;
++
++      if (polstate->subj_map_set.s_hash) {
++              for (i = 0; i < polstate->subj_map_set.s_size; i++) {
++                      if (polstate->subj_map_set.s_hash[i]) {
++                              kfree(polstate->subj_map_set.s_hash[i]);
++                              polstate->subj_map_set.s_hash[i] = NULL;
++                      }
++              }
++
++              if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
++                  PAGE_SIZE)
++                      kfree(polstate->subj_map_set.s_hash);
++              else
++                      vfree(polstate->subj_map_set.s_hash);
++      }
++
++      return;
++}
++
++static void
++free_variables(bool reload)
++{
++      struct acl_subject_label *s;
++      struct acl_role_label *r;
++      struct task_struct *task, *task2;
++      unsigned int x;
++
++      if (!reload) {
++              gr_clear_learn_entries();
++
++              read_lock(&tasklist_lock);
++              do_each_thread(task2, task) {
++                      task->acl_sp_role = 0;
++                      task->acl_role_id = 0;
++                      task->inherited = 0;
++                      task->acl = NULL;
++                      task->role = NULL;
++              } while_each_thread(task2, task);
++              read_unlock(&tasklist_lock);
++
++              kfree(fakefs_obj_rw);
++              fakefs_obj_rw = NULL;
++              kfree(fakefs_obj_rwx);
++              fakefs_obj_rwx = NULL;
++
++              /* release the reference to the real root dentry and vfsmount */
++              path_put(&gr_real_root);
++              memset(&gr_real_root, 0, sizeof(gr_real_root));
++      }
++
++      /* free all object hash tables */
++
++      FOR_EACH_ROLE_START(r)
++              if (r->subj_hash == NULL)
++                      goto next_role;
++              FOR_EACH_SUBJECT_START(r, s, x)
++                      if (s->obj_hash == NULL)
++                              break;
++                      if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++                              kfree(s->obj_hash);
++                      else
++                              vfree(s->obj_hash);
++              FOR_EACH_SUBJECT_END(s, x)
++              FOR_EACH_NESTED_SUBJECT_START(r, s)
++                      if (s->obj_hash == NULL)
++                              break;
++                      if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++                              kfree(s->obj_hash);
++                      else
++                              vfree(s->obj_hash);
++              FOR_EACH_NESTED_SUBJECT_END(s)
++              if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
++                      kfree(r->subj_hash);
++              else
++                      vfree(r->subj_hash);
++              r->subj_hash = NULL;
++next_role:
++      FOR_EACH_ROLE_END(r)
++
++      acl_free_all();
++
++      if (polstate->acl_role_set.r_hash) {
++              if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
++                  PAGE_SIZE)
++                      kfree(polstate->acl_role_set.r_hash);
++              else
++                      vfree(polstate->acl_role_set.r_hash);
++      }
++      if (polstate->name_set.n_hash) {
++              if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
++                  PAGE_SIZE)
++                      kfree(polstate->name_set.n_hash);
++              else
++                      vfree(polstate->name_set.n_hash);
++      }
++
++      if (polstate->inodev_set.i_hash) {
++              if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
++                  PAGE_SIZE)
++                      kfree(polstate->inodev_set.i_hash);
++              else
++                      vfree(polstate->inodev_set.i_hash);
++      }
++
++      if (!reload)
++              gr_free_uidset();
++
++      memset(&polstate->name_set, 0, sizeof (struct name_db));
++      memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
++      memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
++      memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
++
++      polstate->default_role = NULL;
++      polstate->kernel_role = NULL;
++      polstate->role_list = NULL;
++
++      return;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
++
++static int alloc_and_copy_string(char **name, unsigned int maxlen)
++{
++      unsigned int len = strnlen_user(*name, maxlen);
++      char *tmp;
++
++      if (!len || len >= maxlen)
++              return -EINVAL;
++
++      if ((tmp = (char *) acl_alloc(len)) == NULL)
++              return -ENOMEM;
++
++      if (copy_from_user(tmp, *name, len))
++              return -EFAULT;
++
++      tmp[len-1] = '\0';
++      *name = tmp;
++
++      return 0;
++}
++
++static int
++copy_user_glob(struct acl_object_label *obj)
++{
++      struct acl_object_label *g_tmp, **guser;
++      int error;
++
++      if (obj->globbed == NULL)
++              return 0;
++
++      guser = &obj->globbed;
++      while (*guser) {
++              g_tmp = (struct acl_object_label *)
++                      acl_alloc(sizeof (struct acl_object_label));
++              if (g_tmp == NULL)
++                      return -ENOMEM;
++
++              if (copy_acl_object_label(g_tmp, *guser))
++                      return -EFAULT;
++
++              error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
++              if (error)
++                      return error;
++
++              *guser = g_tmp;
++              guser = &(g_tmp->next);
++      }
++
++      return 0;
++}
++
++static int
++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
++             struct acl_role_label *role)
++{
++      struct acl_object_label *o_tmp;
++      int ret;
++
++      while (userp) {
++              if ((o_tmp = (struct acl_object_label *)
++                   acl_alloc(sizeof (struct acl_object_label))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_acl_object_label(o_tmp, userp))
++                      return -EFAULT;
++
++              userp = o_tmp->prev;
++
++              ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
++              if (ret)
++                      return ret;
++
++              insert_acl_obj_label(o_tmp, subj);
++              if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
++                                     o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
++                      return -ENOMEM;
++
++              ret = copy_user_glob(o_tmp);
++              if (ret)
++                      return ret;
++
++              if (o_tmp->nested) {
++                      int already_copied;
++
++                      o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
++                      if (IS_ERR(o_tmp->nested))
++                              return PTR_ERR(o_tmp->nested);
++
++                      /* insert into nested subject list if we haven't copied this one yet
++                         to prevent duplicate entries */
++                      if (!already_copied) {
++                              o_tmp->nested->next = role->hash->first;
++                              role->hash->first = o_tmp->nested;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static __u32
++count_user_subjs(struct acl_subject_label *userp)
++{
++      struct acl_subject_label s_tmp;
++      __u32 num = 0;
++
++      while (userp) {
++              if (copy_acl_subject_label(&s_tmp, userp))
++                      break;
++
++              userp = s_tmp.prev;
++      }
++
++      return num;
++}
++
++static int
++copy_user_allowedips(struct acl_role_label *rolep)
++{
++      struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
++
++      ruserip = rolep->allowed_ips;
++
++      while (ruserip) {
++              rlast = rtmp;
++
++              if ((rtmp = (struct role_allowed_ip *)
++                   acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_role_allowed_ip(rtmp, ruserip))
++                      return -EFAULT;
++
++              ruserip = rtmp->prev;
++
++              if (!rlast) {
++                      rtmp->prev = NULL;
++                      rolep->allowed_ips = rtmp;
++              } else {
++                      rlast->next = rtmp;
++                      rtmp->prev = rlast;
++              }
++
++              if (!ruserip)
++                      rtmp->next = NULL;
++      }
++
++      return 0;
++}
++
++static int
++copy_user_transitions(struct acl_role_label *rolep)
++{
++      struct role_transition *rusertp, *rtmp = NULL, *rlast;
++      int error;
++
++      rusertp = rolep->transitions;
++
++      while (rusertp) {
++              rlast = rtmp;
++
++              if ((rtmp = (struct role_transition *)
++                   acl_alloc(sizeof (struct role_transition))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_role_transition(rtmp, rusertp))
++                      return -EFAULT;
++
++              rusertp = rtmp->prev;
++
++              error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
++              if (error)
++                      return error;
++
++              if (!rlast) {
++                      rtmp->prev = NULL;
++                      rolep->transitions = rtmp;
++              } else {
++                      rlast->next = rtmp;
++                      rtmp->prev = rlast;
++              }
++
++              if (!rusertp)
++                      rtmp->next = NULL;
++      }
++
++      return 0;
++}
++
++static __u32 count_user_objs(const struct acl_object_label __user *userp)
++{
++      struct acl_object_label o_tmp;
++      __u32 num = 0;
++
++      while (userp) {
++              if (copy_acl_object_label(&o_tmp, userp))
++                      break;
++
++              userp = o_tmp.prev;
++              num++;
++      }
++
++      return num;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
++{
++      struct acl_subject_label *s_tmp = NULL, *s_tmp2;
++      __u32 num_objs;
++      struct acl_ip_label **i_tmp, *i_utmp2;
++      struct gr_hash_struct ghash;
++      struct subject_map *subjmap;
++      unsigned int i_num;
++      int err;
++
++      if (already_copied != NULL)
++              *already_copied = 0;
++
++      s_tmp = lookup_subject_map(userp);
++
++      /* we've already copied this subject into the kernel, just return
++         the reference to it, and don't copy it over again
++      */
++      if (s_tmp) {
++              if (already_copied != NULL)
++                      *already_copied = 1;
++              return(s_tmp);
++      }
++
++      if ((s_tmp = (struct acl_subject_label *)
++          acl_alloc(sizeof (struct acl_subject_label))) == NULL)
++              return ERR_PTR(-ENOMEM);
++
++      subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
++      if (subjmap == NULL)
++              return ERR_PTR(-ENOMEM);
++
++      subjmap->user = userp;
++      subjmap->kernel = s_tmp;
++      insert_subj_map_entry(subjmap);
++
++      if (copy_acl_subject_label(s_tmp, userp))
++              return ERR_PTR(-EFAULT);
++
++      err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
++      if (err)
++              return ERR_PTR(err);
++
++      if (!strcmp(s_tmp->filename, "/"))
++              role->root_label = s_tmp;
++
++      if (copy_gr_hash_struct(&ghash, s_tmp->hash))
++              return ERR_PTR(-EFAULT);
++
++      /* copy user and group transition tables */
++
++      if (s_tmp->user_trans_num) {
++              uid_t *uidlist;
++
++              uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
++              if (uidlist == NULL)
++                      return ERR_PTR(-ENOMEM);
++              if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
++                      return ERR_PTR(-EFAULT);
++
++              s_tmp->user_transitions = uidlist;
++      }
++
++      if (s_tmp->group_trans_num) {
++              gid_t *gidlist;
++
++              gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
++              if (gidlist == NULL)
++                      return ERR_PTR(-ENOMEM);
++              if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
++                      return ERR_PTR(-EFAULT);
++
++              s_tmp->group_transitions = gidlist;
++      }
++
++      /* set up object hash table */
++      num_objs = count_user_objs(ghash.first);
++
++      s_tmp->obj_hash_size = num_objs;
++      s_tmp->obj_hash =
++          (struct acl_object_label **)
++          create_table(&(s_tmp->obj_hash_size), sizeof(void *));
++
++      if (!s_tmp->obj_hash)
++              return ERR_PTR(-ENOMEM);
++
++      memset(s_tmp->obj_hash, 0,
++             s_tmp->obj_hash_size *
++             sizeof (struct acl_object_label *));
++
++      /* add in objects */
++      err = copy_user_objs(ghash.first, s_tmp, role);
++
++      if (err)
++              return ERR_PTR(err);
++
++      /* set pointer for parent subject */
++      if (s_tmp->parent_subject) {
++              s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
++
++              if (IS_ERR(s_tmp2))
++                      return s_tmp2;
++
++              s_tmp->parent_subject = s_tmp2;
++      }
++
++      /* add in ip acls */
++
++      if (!s_tmp->ip_num) {
++              s_tmp->ips = NULL;
++              goto insert;
++      }
++
++      i_tmp =
++          (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
++                                             sizeof (struct acl_ip_label *));
++
++      if (!i_tmp)
++              return ERR_PTR(-ENOMEM);
++
++      for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
++              *(i_tmp + i_num) =
++                  (struct acl_ip_label *)
++                  acl_alloc(sizeof (struct acl_ip_label));
++              if (!*(i_tmp + i_num))
++                      return ERR_PTR(-ENOMEM);
++
++              if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
++                      return ERR_PTR(-EFAULT);
++
++              if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
++                      return ERR_PTR(-EFAULT);
++              
++              if ((*(i_tmp + i_num))->iface == NULL)
++                      continue;
++
++              err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
++              if (err)
++                      return ERR_PTR(err);
++      }
++
++      s_tmp->ips = i_tmp;
++
++insert:
++      if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
++                             s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
++              return ERR_PTR(-ENOMEM);
++
++      return s_tmp;
++}
++
++static int
++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++      struct acl_subject_label s_pre;
++      struct acl_subject_label * ret;
++      int err;
++
++      while (userp) {
++              if (copy_acl_subject_label(&s_pre, userp))
++                      return -EFAULT;
++              
++              ret = do_copy_user_subj(userp, role, NULL);
++
++              err = PTR_ERR(ret);
++              if (IS_ERR(ret))
++                      return err;
++
++              insert_acl_subj_label(ret, role);
++
++              userp = s_pre.prev;
++      }
++
++      return 0;
++}
++
++static int
++copy_user_acl(struct gr_arg *arg)
++{
++      struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++      struct acl_subject_label *subj_list;
++      struct sprole_pw *sptmp;
++      struct gr_hash_struct *ghash;
++      uid_t *domainlist;
++      unsigned int r_num;
++      int err = 0;
++      __u16 i;
++      __u32 num_subjs;
++
++      /* we need a default and kernel role */
++      if (arg->role_db.num_roles < 2)
++              return -EINVAL;
++
++      /* copy special role authentication info from userspace */
++
++      polstate->num_sprole_pws = arg->num_sprole_pws;
++      polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
++
++      if (!polstate->acl_special_roles && polstate->num_sprole_pws)
++              return -ENOMEM;
++
++      for (i = 0; i < polstate->num_sprole_pws; i++) {
++              sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
++              if (!sptmp)
++                      return -ENOMEM;
++              if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
++                      return -EFAULT;
++
++              err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
++              if (err)
++                      return err;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++              printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
++#endif
++
++              polstate->acl_special_roles[i] = sptmp;
++      }
++
++      r_utmp = (struct acl_role_label **) arg->role_db.r_table;
++
++      for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
++              r_tmp = acl_alloc(sizeof (struct acl_role_label));
++
++              if (!r_tmp)
++                      return -ENOMEM;
++
++              if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
++                      return -EFAULT;
++
++              if (copy_acl_role_label(r_tmp, r_utmp2))
++                      return -EFAULT;
++
++              err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
++              if (err)
++                      return err;
++
++              if (!strcmp(r_tmp->rolename, "default")
++                  && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
++                      polstate->default_role = r_tmp;
++              } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
++                      polstate->kernel_role = r_tmp;
++              }
++
++              if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
++                      return -ENOMEM;
++
++              if (copy_gr_hash_struct(ghash, r_tmp->hash))
++                      return -EFAULT;
++
++              r_tmp->hash = ghash;
++
++              num_subjs = count_user_subjs(r_tmp->hash->first);
++
++              r_tmp->subj_hash_size = num_subjs;
++              r_tmp->subj_hash =
++                  (struct acl_subject_label **)
++                  create_table(&(r_tmp->subj_hash_size), sizeof(void *));
++
++              if (!r_tmp->subj_hash)
++                      return -ENOMEM;
++
++              err = copy_user_allowedips(r_tmp);
++              if (err)
++                      return err;
++
++              /* copy domain info */
++              if (r_tmp->domain_children != NULL) {
++                      domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
++                      if (domainlist == NULL)
++                              return -ENOMEM;
++
++                      if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
++                              return -EFAULT;
++
++                      r_tmp->domain_children = domainlist;
++              }
++
++              err = copy_user_transitions(r_tmp);
++              if (err)
++                      return err;
++
++              memset(r_tmp->subj_hash, 0,
++                     r_tmp->subj_hash_size *
++                     sizeof (struct acl_subject_label *));
++
++              /* acquire the list of subjects, then NULL out
++                 the list prior to parsing the subjects for this role,
++                 as during this parsing the list is replaced with a list
++                 of *nested* subjects for the role
++              */
++              subj_list = r_tmp->hash->first;
++
++              /* set nested subject list to null */
++              r_tmp->hash->first = NULL;
++
++              err = copy_user_subjs(subj_list, r_tmp);
++
++              if (err)
++                      return err;
++
++              insert_acl_role_label(r_tmp);
++      }
++
++      if (polstate->default_role == NULL || polstate->kernel_role == NULL)
++              return -EINVAL;
++
++      return err;
++}
++
++static int gracl_reload_apply_policies(void *reload)
++{
++      struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
++      struct task_struct *task, *task2;
++      struct acl_role_label *role, *rtmp;
++      struct acl_subject_label *subj;
++      const struct cred *cred;
++      int role_applied;
++      int ret = 0;
++
++      memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
++      memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
++
++      /* first make sure we'll be able to apply the new policy cleanly */
++      do_each_thread(task2, task) {
++              if (task->exec_file == NULL)
++                      continue;
++              role_applied = 0;
++              if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
++                      /* preserve special roles */
++                      FOR_EACH_ROLE_START(role)
++                              if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
++                                      rtmp = task->role;
++                                      task->role = role;
++                                      role_applied = 1;
++                                      break;
++                              }
++                      FOR_EACH_ROLE_END(role)
++              }
++              if (!role_applied) {
++                      cred = __task_cred(task);
++                      rtmp = task->role;
++                      task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
++              }
++              /* this handles non-nested inherited subjects, nested subjects will still
++                 be dropped currently */
++              subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
++              task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
++              /* change the role back so that we've made no modifications to the policy */
++              task->role = rtmp;
++
++              if (subj == NULL || task->tmpacl == NULL) {
++                      ret = -EINVAL;
++                      goto out;
++              }
++      } while_each_thread(task2, task);
++
++      /* now actually apply the policy */
++
++      do_each_thread(task2, task) {
++              if (task->exec_file) {
++                      role_applied = 0;
++                      if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
++                              /* preserve special roles */
++                              FOR_EACH_ROLE_START(role)
++                                      if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
++                                              task->role = role;
++                                              role_applied = 1;
++                                              break;
++                                      }
++                              FOR_EACH_ROLE_END(role)
++                      }
++                      if (!role_applied) {
++                              cred = __task_cred(task);
++                              task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
++                      }
++                      /* this handles non-nested inherited subjects, nested subjects will still
++                         be dropped currently */
++                      if (!reload_state->oldmode && task->inherited)
++                              subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
++                      else {
++                              /* looked up and tagged to the task previously */
++                              subj = task->tmpacl;
++                      }
++                      /* subj will be non-null */
++                      __gr_apply_subject_to_task(polstate, task, subj);
++                      if (reload_state->oldmode) {
++                              task->acl_role_id = 0;
++                              task->acl_sp_role = 0;
++                              task->inherited = 0;
++                      }
++              } else {
++                      // it's a kernel process
++                      task->role = polstate->kernel_role;
++                      task->acl = polstate->kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++                      task->acl->mode &= ~GR_PROCFIND;
++#endif
++              }
++      } while_each_thread(task2, task);
++
++      memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
++      memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
++
++out:
++
++      return ret;
++}
++
++static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
++{
++      struct gr_reload_state new_reload_state = { };
++      int err;
++
++      new_reload_state.oldpolicy_ptr = polstate;
++      new_reload_state.oldalloc_ptr = current_alloc_state;
++      new_reload_state.oldmode = oldmode;
++
++      current_alloc_state = &new_reload_state.newalloc;
++      polstate = &new_reload_state.newpolicy;
++
++      /* everything relevant is now saved off, copy in the new policy */
++      if (init_variables(args, true)) {
++              gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
++              err = -ENOMEM;
++              goto error;
++      }
++
++      err = copy_user_acl(args);
++      free_init_variables();
++      if (err)
++              goto error;
++      /* the new policy is copied in, with the old policy available via saved_state
++         first go through applying roles, making sure to preserve special roles
++         then apply new subjects, making sure to preserve inherited and nested subjects,
++         though currently only inherited subjects will be preserved
++      */
++      err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
++      if (err)
++              goto error;
++
++      /* we've now applied the new policy, so restore the old policy state to free it */
++      polstate = &new_reload_state.oldpolicy;
++      current_alloc_state = &new_reload_state.oldalloc;
++      free_variables(true);
++
++      /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
++         to running_polstate/current_alloc_state inside stop_machine
++      */
++      err = 0;
++      goto out;
++error:
++      /* on error of loading the new policy, we'll just keep the previous
++         policy set around
++      */
++      free_variables(true);
++
++      /* doesn't affect runtime, but maintains consistent state */
++out:
++      polstate = new_reload_state.oldpolicy_ptr;
++      current_alloc_state = new_reload_state.oldalloc_ptr;
++
++      return err;
++}
++
++static int
++gracl_init(struct gr_arg *args)
++{
++      int error = 0;
++
++      memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
++      memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
++
++      if (init_variables(args, false)) {
++              gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
++              error = -ENOMEM;
++              goto out;
++      }
++
++      error = copy_user_acl(args);
++      free_init_variables();
++      if (error)
++              goto out;
++
++      error = gr_set_acls(0);
++      if (error)
++              goto out;
++
++      gr_enable_rbac_system();
++
++      return 0;
++
++out:
++      free_variables(false);
++      return error;
++}
++
++static int
++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
++                       unsigned char **sum)
++{
++      struct acl_role_label *r;
++      struct role_allowed_ip *ipp;
++      struct role_transition *trans;
++      unsigned int i;
++      int found = 0;
++      u32 curr_ip = current->signal->curr_ip;
++
++      current->signal->saved_ip = curr_ip;
++
++      /* check transition table */
++
++      for (trans = current->role->transitions; trans; trans = trans->next) {
++              if (!strcmp(rolename, trans->rolename)) {
++                      found = 1;
++                      break;
++              }
++      }
++
++      if (!found)
++              return 0;
++
++      /* handle special roles that do not require authentication
++         and check ip */
++
++      FOR_EACH_ROLE_START(r)
++              if (!strcmp(rolename, r->rolename) &&
++                  (r->roletype & GR_ROLE_SPECIAL)) {
++                      found = 0;
++                      if (r->allowed_ips != NULL) {
++                              for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
++                                      if ((ntohl(curr_ip) & ipp->netmask) ==
++                                           (ntohl(ipp->addr) & ipp->netmask))
++                                              found = 1;
++                              }
++                      } else
++                              found = 2;
++                      if (!found)
++                              return 0;
++
++                      if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
++                          ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
++                              *salt = NULL;
++                              *sum = NULL;
++                              return 1;
++                      }
++              }
++      FOR_EACH_ROLE_END(r)
++
++      for (i = 0; i < polstate->num_sprole_pws; i++) {
++              if (!strcmp(rolename, (const char *)polstate->acl_special_roles[i]->rolename)) {
++                      *salt = polstate->acl_special_roles[i]->salt;
++                      *sum = polstate->acl_special_roles[i]->sum;
++                      return 1;
++              }
++      }
++
++      return 0;
++}
++
++int gr_check_secure_terminal(struct task_struct *task)
++{
++      struct task_struct *p, *p2, *p3;
++      struct files_struct *files;
++      struct fdtable *fdt;
++      struct file *our_file = NULL, *file;
++      struct inode *our_inode = NULL;
++      int i;
++
++      if (task->signal->tty == NULL)
++              return 1;
++
++      files = get_files_struct(task);
++      if (files != NULL) {
++              rcu_read_lock();
++              fdt = files_fdtable(files);
++              for (i=0; i < fdt->max_fds; i++) {
++                      file = fcheck_files(files, i);
++                      if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
++                              get_file(file);
++                              our_file = file;
++                      }
++              }
++              rcu_read_unlock();
++              put_files_struct(files);
++      }
++
++      if (our_file == NULL)
++              return 1;
++
++      our_inode = d_backing_inode(our_file->f_path.dentry);
++
++      read_lock(&tasklist_lock);
++      do_each_thread(p2, p) {
++              files = get_files_struct(p);
++              if (files == NULL ||
++                  (p->signal && p->signal->tty == task->signal->tty)) {
++                      if (files != NULL)
++                              put_files_struct(files);
++                      continue;
++              }
++              rcu_read_lock();
++              fdt = files_fdtable(files);
++              for (i=0; i < fdt->max_fds; i++) {
++                      struct inode *inode = NULL;
++                      file = fcheck_files(files, i);
++                      if (file)
++                              inode = d_backing_inode(file->f_path.dentry);
++                      if (inode && S_ISCHR(inode->i_mode) && inode->i_rdev == our_inode->i_rdev) {
++                              p3 = task;
++                              while (task_pid_nr(p3) > 0) {
++                                      if (p3 == p)
++                                              break;
++                                      p3 = p3->real_parent;
++                              }
++                              if (p3 == p)
++                                      break;
++                              gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
++                              gr_handle_alertkill(p);
++                              rcu_read_unlock();
++                              put_files_struct(files);
++                              read_unlock(&tasklist_lock);
++                              fput(our_file);
++                              return 0;
++                      }
++              }
++              rcu_read_unlock();
++              put_files_struct(files);
++      } while_each_thread(p2, p);
++      read_unlock(&tasklist_lock);
++
++      fput(our_file);
++      return 1;
++}
++
++ssize_t
++write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
++{
++      struct gr_arg_wrapper uwrap;
++      unsigned char *sprole_salt = NULL;
++      unsigned char *sprole_sum = NULL;
++      int error = 0;
++      int error2 = 0;
++      size_t req_count = 0;
++      unsigned char oldmode = 0;
++
++      mutex_lock(&gr_dev_mutex);
++
++      if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
++              error = -EPERM;
++              goto out;
++      }
++
++#ifdef CONFIG_COMPAT
++      pax_open_kernel();
++      if (in_compat_syscall()) {
++              copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
++              copy_gr_arg = &copy_gr_arg_compat;
++              copy_acl_object_label = &copy_acl_object_label_compat;
++              copy_acl_subject_label = &copy_acl_subject_label_compat;
++              copy_acl_role_label = &copy_acl_role_label_compat;
++              copy_acl_ip_label = &copy_acl_ip_label_compat;
++              copy_role_allowed_ip = &copy_role_allowed_ip_compat;
++              copy_role_transition = &copy_role_transition_compat;
++              copy_sprole_pw = &copy_sprole_pw_compat;
++              copy_gr_hash_struct = &copy_gr_hash_struct_compat;
++              copy_pointer_from_array = &copy_pointer_from_array_compat;
++              get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
++      } else {
++              copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
++              copy_gr_arg = &copy_gr_arg_normal;
++              copy_acl_object_label = &copy_acl_object_label_normal;
++              copy_acl_subject_label = &copy_acl_subject_label_normal;
++              copy_acl_role_label = &copy_acl_role_label_normal;
++              copy_acl_ip_label = &copy_acl_ip_label_normal;
++              copy_role_allowed_ip = &copy_role_allowed_ip_normal;
++              copy_role_transition = &copy_role_transition_normal;
++              copy_sprole_pw = &copy_sprole_pw_normal;
++              copy_gr_hash_struct = &copy_gr_hash_struct_normal;
++              copy_pointer_from_array = &copy_pointer_from_array_normal;
++              get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
++      }
++      pax_close_kernel();
++#endif
++
++      req_count = get_gr_arg_wrapper_size();
++
++      if (count != req_count) {
++              gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
++              error = -EINVAL;
++              goto out;
++      }
++
++      
++      if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
++              gr_auth_expires = 0;
++              gr_auth_attempts = 0;
++      }
++
++      error = copy_gr_arg_wrapper(buf, &uwrap);
++      if (error)
++              goto out;
++
++      error = copy_gr_arg(uwrap.arg, gr_usermode);
++      if (error)
++              goto out;
++
++      if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++          gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++          time_after(gr_auth_expires, get_seconds())) {
++              error = -EBUSY;
++              goto out;
++      }
++
++      /* if non-root trying to do anything other than use a special role,
++         do not attempt authentication, do not count towards authentication
++         locking
++       */
++
++      if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
++          gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
++          gr_is_global_nonroot(current_uid())) {
++              error = -EPERM;
++              goto out;
++      }
++
++      /* ensure pw and special role name are null terminated */
++
++      gr_usermode->pw[GR_PW_LEN - 1] = '\0';
++      gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
++
++      /* Okay. 
++       * We have our enough of the argument structure..(we have yet
++       * to copy_from_user the tables themselves) . Copy the tables
++       * only if we need them, i.e. for loading operations. */
++
++      switch (gr_usermode->mode) {
++      case GR_STATUS:
++                      if (gr_acl_is_enabled()) {
++                              error = 1;
++                              if (!gr_check_secure_terminal(current))
++                                      error = 3;
++                      } else
++                              error = 2;
++                      goto out;
++      case GR_SHUTDOWN:
++              if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++                      stop_machine(gr_rbac_disable, NULL, NULL);
++                      free_variables(false);
++                      memset(gr_usermode, 0, sizeof(struct gr_arg));
++                      memset(gr_system_salt, 0, GR_SALT_LEN);
++                      memset(gr_system_sum, 0, GR_SHA_LEN);
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
++              } else if (gr_acl_is_enabled()) {
++                      gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
++                      error = -EPERM;
++              } else {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
++                      error = -EAGAIN;
++              }
++              break;
++      case GR_ENABLE:
++              if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
++                      gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
++              else {
++                      if (gr_acl_is_enabled())
++                              error = -EAGAIN;
++                      else
++                              error = error2;
++                      gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
++              }
++              break;
++      case GR_OLDRELOAD:
++              oldmode = 1;
++      case GR_RELOAD:
++              if (!gr_acl_is_enabled()) {
++                      gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
++                      error = -EAGAIN;
++              } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++                      error2 = gracl_reload(gr_usermode, oldmode);
++                      if (!error2)
++                              gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
++                      else {
++                              gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++                              error = error2;
++                      }
++              } else {
++                      gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++                      error = -EPERM;
++              }
++              break;
++      case GR_SEGVMOD:
++              if (unlikely(!gr_acl_is_enabled())) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
++                      error = -EAGAIN;
++                      break;
++              }
++
++              if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
++                      if (gr_usermode->segv_device && gr_usermode->segv_inode) {
++                              struct acl_subject_label *segvacl;
++                              segvacl =
++                                  lookup_acl_subj_label(gr_usermode->segv_inode,
++                                                        gr_usermode->segv_device,
++                                                        current->role);
++                              if (segvacl) {
++                                      segvacl->crashes = 0;
++                                      segvacl->expires = 0;
++                              }
++                      } else
++                              gr_find_and_remove_uid(gr_usermode->segv_uid);
++              } else {
++                      gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
++                      error = -EPERM;
++              }
++              break;
++      case GR_SPROLE:
++      case GR_SPROLEPAM:
++              if (unlikely(!gr_acl_is_enabled())) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
++                      error = -EAGAIN;
++                      break;
++              }
++
++              if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
++                      current->role->expires = 0;
++                      current->role->auth_attempts = 0;
++              }
++
++              if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++                  time_after(current->role->expires, get_seconds())) {
++                      error = -EBUSY;
++                      goto out;
++              }
++
++              if (lookup_special_role_auth
++                  (gr_usermode->mode, (const char *)gr_usermode->sp_role, &sprole_salt, &sprole_sum)
++                  && ((!sprole_salt && !sprole_sum)
++                      || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
++                      char *p = "";
++                      assign_special_role((const char *)gr_usermode->sp_role);
++                      read_lock(&tasklist_lock);
++                      if (current->real_parent)
++                              p = current->real_parent->role->rolename;
++                      read_unlock(&tasklist_lock);
++                      gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
++                                      p, acl_sp_role_value);
++              } else {
++                      gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
++                      error = -EPERM;
++                      if(!(current->role->auth_attempts++))
++                              current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++                      goto out;
++              }
++              break;
++      case GR_UNSPROLE:
++              if (unlikely(!gr_acl_is_enabled())) {
++                      gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
++                      error = -EAGAIN;
++                      break;
++              }
++
++              if (current->role->roletype & GR_ROLE_SPECIAL) {
++                      char *p = "";
++                      int i = 0;
++
++                      read_lock(&tasklist_lock);
++                      if (current->real_parent) {
++                              p = current->real_parent->role->rolename;
++                              i = current->real_parent->acl_role_id;
++                      }
++                      read_unlock(&tasklist_lock);
++
++                      gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
++                      gr_set_acls(1);
++              } else {
++                      error = -EPERM;
++                      goto out;
++              }
++              break;
++      default:
++              gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
++              error = -EINVAL;
++              break;
++      }
++
++      if (error != -EPERM)
++              goto out;
++
++      if(!(gr_auth_attempts++))
++              gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++      out:
++      mutex_unlock(&gr_dev_mutex);
++
++      if (!error)
++              error = req_count;
++
++      return error;
++}
++
++int
++gr_set_acls(const int type)
++{
++      struct task_struct *task, *task2;
++      struct acl_role_label *role = current->role;
++      struct acl_subject_label *subj;
++      __u16 acl_role_id = current->acl_role_id;
++      const struct cred *cred;
++      int ret;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++      do_each_thread(task2, task) {
++              /* check to see if we're called from the exit handler,
++                 if so, only replace ACLs that have inherited the admin
++                 ACL */
++
++              if (type && (task->role != role ||
++                           task->acl_role_id != acl_role_id))
++                      continue;
++
++              task->acl_role_id = 0;
++              task->acl_sp_role = 0;
++              task->inherited = 0;
++
++              if (task->exec_file) {
++                      cred = __task_cred(task);
++                      task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
++                      subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
++                      if (subj == NULL) {
++                              ret = -EINVAL;
++                              read_unlock(&grsec_exec_file_lock);
++                              read_unlock(&tasklist_lock);
++                              rcu_read_unlock();
++                              gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
++                              return ret;
++                      }
++                      __gr_apply_subject_to_task(polstate, task, subj);
++              } else {
++                      // it's a kernel process
++                      task->role = polstate->kernel_role;
++                      task->acl = polstate->kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++                      task->acl->mode &= ~GR_PROCFIND;
++#endif
++              }
++      } while_each_thread(task2, task);
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      return 0;
++}
+diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
+new file mode 100644
+index 0000000..dfba8fd
+--- /dev/null
++++ b/grsecurity/gracl_res.c
+@@ -0,0 +1,74 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grinternal.h>
++
++static const char *restab_log[] = {
++      [RLIMIT_CPU] = "RLIMIT_CPU",
++      [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
++      [RLIMIT_DATA] = "RLIMIT_DATA",
++      [RLIMIT_STACK] = "RLIMIT_STACK",
++      [RLIMIT_CORE] = "RLIMIT_CORE",
++      [RLIMIT_RSS] = "RLIMIT_RSS",
++      [RLIMIT_NPROC] = "RLIMIT_NPROC",
++      [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
++      [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
++      [RLIMIT_AS] = "RLIMIT_AS",
++      [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
++      [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
++      [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
++      [RLIMIT_NICE] = "RLIMIT_NICE",
++      [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
++      [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
++      [GR_CRASH_RES] = "RLIMIT_CRASH"
++};
++
++void
++gr_log_resource(const struct task_struct *task,
++              const int res, const unsigned long wanted, const int gt)
++{
++      const struct cred *cred;
++      unsigned long rlim;
++
++      if (!gr_acl_is_enabled() && !grsec_resource_logging)
++              return;
++
++      // not yet supported resource
++      if (unlikely(!restab_log[res]))
++              return;
++
++      /*
++       * not really security relevant, too much userland code shared
++       * from pulseaudio that blindly attempts to violate limits in a loop,
++       * resulting in log spam
++       */
++      if (res == RLIMIT_NICE)
++              return;
++
++      if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
++              rlim = task_rlimit_max(task, res);
++      else
++              rlim = task_rlimit(task, res);
++
++      if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
++              return;
++
++      rcu_read_lock();
++      cred = __task_cred(task);
++
++      if (res == RLIMIT_NPROC && 
++          (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || 
++           cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
++              goto out_rcu_unlock;
++      else if (res == RLIMIT_MEMLOCK &&
++               cap_raised(cred->cap_effective, CAP_IPC_LOCK))
++              goto out_rcu_unlock;
++      rcu_read_unlock();
++
++      gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
++
++      return;
++out_rcu_unlock:
++      rcu_read_unlock();
++      return;
++}
+diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
+new file mode 100644
+index 0000000..02c5a2b
+--- /dev/null
++++ b/grsecurity/gracl_segv.c
+@@ -0,0 +1,306 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++#include <linux/magic.h>
++#include <linux/pagemap.h>
++#include "../fs/btrfs/async-thread.h"
++#include "../fs/btrfs/ctree.h"
++#include "../fs/btrfs/btrfs_inode.h"
++#endif
++
++static struct crash_uid *uid_set;
++static unsigned short uid_used;
++static DEFINE_SPINLOCK(gr_uid_lock);
++extern rwlock_t gr_inode_lock;
++extern struct acl_subject_label *
++      lookup_acl_subj_label(const u64 inode, const dev_t dev,
++                            const struct acl_role_label *role);
++
++int
++gr_init_uidset(void)
++{
++      uid_set =
++          kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
++      uid_used = 0;
++
++      return uid_set ? 1 : 0;
++}
++
++void
++gr_free_uidset(void)
++{
++      if (uid_set) {
++              struct crash_uid *tmpset;
++              spin_lock(&gr_uid_lock);
++              tmpset = uid_set;
++              uid_set = NULL;
++              uid_used = 0;
++              spin_unlock(&gr_uid_lock);
++              if (tmpset)
++                      kfree(tmpset);
++      }
++
++      return;
++}
++
++int
++gr_find_uid(const uid_t uid)
++{
++      struct crash_uid *tmp = uid_set;
++      uid_t buid;
++      int low = 0, high = uid_used - 1, mid;
++
++      while (high >= low) {
++              mid = (low + high) >> 1;
++              buid = tmp[mid].uid;
++              if (buid == uid)
++                      return mid;
++              if (buid > uid)
++                      high = mid - 1;
++              if (buid < uid)
++                      low = mid + 1;
++      }
++
++      return -1;
++}
++
++static void
++gr_insertsort(void)
++{
++      unsigned short i, j;
++      struct crash_uid index;
++
++      for (i = 1; i < uid_used; i++) {
++              index = uid_set[i];
++              j = i;
++              while ((j > 0) && uid_set[j - 1].uid > index.uid) {
++                      uid_set[j] = uid_set[j - 1];
++                      j--;
++              }
++              uid_set[j] = index;
++      }
++
++      return;
++}
++
++static void
++gr_insert_uid(const kuid_t kuid, const unsigned long expires)
++{
++      int loc;
++      uid_t uid = GR_GLOBAL_UID(kuid);
++
++      if (uid_used == GR_UIDTABLE_MAX)
++              return;
++
++      loc = gr_find_uid(uid);
++
++      if (loc >= 0) {
++              uid_set[loc].expires = expires;
++              return;
++      }
++
++      uid_set[uid_used].uid = uid;
++      uid_set[uid_used].expires = expires;
++      uid_used++;
++
++      gr_insertsort();
++
++      return;
++}
++
++void
++gr_remove_uid(const unsigned short loc)
++{
++      unsigned short i;
++
++      for (i = loc + 1; i < uid_used; i++)
++              uid_set[i - 1] = uid_set[i];
++
++      uid_used--;
++
++      return;
++}
++
++int gr_find_and_remove_uid(uid_t uid)
++{
++      int loc;
++
++      spin_lock(&gr_uid_lock);
++      loc = gr_find_uid(uid);
++      if (loc >= 0)
++              gr_remove_uid(loc);
++      spin_unlock(&gr_uid_lock);
++
++      return loc >= 0 ? 1 : 0;
++}
++
++int
++gr_check_crash_uid(const kuid_t kuid)
++{
++      int loc;
++      int ret = 0;
++      uid_t uid;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      uid = GR_GLOBAL_UID(kuid);
++
++      spin_lock(&gr_uid_lock);
++      loc = gr_find_uid(uid);
++
++      if (loc < 0)
++              goto out_unlock;
++
++      if (time_before_eq(uid_set[loc].expires, get_seconds()))
++              gr_remove_uid(loc);
++      else
++              ret = 1;
++
++out_unlock:
++      spin_unlock(&gr_uid_lock);
++      return ret;
++}
++
++extern int gr_fake_force_sig(int sig, struct task_struct *t);
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++      struct acl_subject_label *curr;
++      struct task_struct *tsk, *tsk2;
++      const struct cred *cred;
++      const struct cred *cred2;
++
++      if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
++              return;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return;
++
++      curr = task->acl;
++
++      if (!(curr->resmask & (1U << GR_CRASH_RES)))
++              return;
++
++      if (time_before_eq(curr->expires, get_seconds())) {
++              curr->expires = 0;
++              curr->crashes = 0;
++      }
++
++      curr->crashes++;
++
++      if (!curr->expires)
++              curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
++
++      if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++          time_after(curr->expires, get_seconds())) {
++              int is_priv = is_privileged_binary(task->mm->exe_file->f_path.dentry);
++
++              rcu_read_lock();
++              cred = __task_cred(task);
++              if (gr_is_global_nonroot(cred->uid) && is_priv) {
++                      gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++                      spin_lock(&gr_uid_lock);
++                      gr_insert_uid(cred->uid, curr->expires);
++                      spin_unlock(&gr_uid_lock);
++                      curr->expires = 0;
++                      curr->crashes = 0;
++                      read_lock(&tasklist_lock);
++                      do_each_thread(tsk2, tsk) {
++                              cred2 = __task_cred(tsk);
++                              if (tsk != task && uid_eq(cred2->uid, cred->uid))
++                                      gr_fake_force_sig(SIGKILL, tsk);
++                      } while_each_thread(tsk2, tsk);
++                      read_unlock(&tasklist_lock);
++              } else {
++                      gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++                      read_lock(&tasklist_lock);
++                      read_lock(&grsec_exec_file_lock);
++                      do_each_thread(tsk2, tsk) {
++                              if (likely(tsk != task)) {
++                                      // if this thread has the same subject as the one that triggered
++                                      // RES_CRASH and it's the same binary, kill it
++                                      if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
++                                              gr_fake_force_sig(SIGKILL, tsk);
++                              }
++                      } while_each_thread(tsk2, tsk);
++                      read_unlock(&grsec_exec_file_lock);
++                      read_unlock(&tasklist_lock);
++              }
++              rcu_read_unlock();
++      }
++
++      return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++      struct acl_subject_label *curr;
++      struct dentry *dentry;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return 0;
++
++      read_lock(&gr_inode_lock);
++      dentry = filp->f_path.dentry;
++      curr = lookup_acl_subj_label(gr_get_ino_from_dentry(dentry), gr_get_dev_from_dentry(dentry),
++                                   current->role);
++      read_unlock(&gr_inode_lock);
++
++      if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
++          (!curr->crashes && !curr->expires))
++              return 0;
++
++      if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++          time_after(curr->expires, get_seconds()))
++              return 1;
++      else if (time_before_eq(curr->expires, get_seconds())) {
++              curr->crashes = 0;
++              curr->expires = 0;
++      }
++
++      return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++      struct acl_subject_label *curracl;
++      __u32 curr_ip;
++      struct task_struct *p, *p2;
++
++      if (unlikely(!gr_acl_is_enabled()))
++              return;
++
++      curracl = task->acl;
++      curr_ip = task->signal->curr_ip;
++
++      if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
++              read_lock(&tasklist_lock);
++              do_each_thread(p2, p) {
++                      if (p->signal->curr_ip == curr_ip)
++                              gr_fake_force_sig(SIGKILL, p);
++              } while_each_thread(p2, p);
++              read_unlock(&tasklist_lock);
++      } else if (curracl->mode & GR_KILLPROC)
++              gr_fake_force_sig(SIGKILL, task);
++
++      return;
++}
+diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
+new file mode 100644
+index 0000000..6b0c9cc
+--- /dev/null
++++ b/grsecurity/gracl_shm.c
+@@ -0,0 +1,40 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++              const u64 shm_createtime, const kuid_t cuid, const int shmid)
++{
++      struct task_struct *task;
++
++      if (!gr_acl_is_enabled())
++              return 1;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++
++      task = find_task_by_vpid(shm_cprid);
++
++      if (unlikely(!task))
++              task = find_task_by_vpid(shm_lapid);
++
++      if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
++                            (task_pid_nr(task) == shm_lapid)) &&
++                   (task->acl->mode & GR_PROTSHM) &&
++                   (task->acl != current->acl))) {
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++              gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
++              return 0;
++      }
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      return 1;
++}
+diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
+new file mode 100644
+index 0000000..bc0be01
+--- /dev/null
++++ b/grsecurity/grsec_chdir.c
+@@ -0,0 +1,19 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++      if ((grsec_enable_chdir && grsec_enable_group &&
++           in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
++                                            !grsec_enable_group)) {
++              gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
++      }
++#endif
++      return;
++}
+diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
+new file mode 100644
+index 0000000..1964ab1c
+--- /dev/null
++++ b/grsecurity/grsec_chroot.c
+@@ -0,0 +1,506 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/types.h>
++#include <linux/namei.h>
++#include "../fs/mount.h"
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++int gr_init_ran;
++#endif
++
++void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++      struct dentry *tmpd = dentry;
++
++      read_seqlock_excl(&mount_lock);
++      write_seqlock(&rename_lock);
++
++      while (tmpd != mnt->mnt_root) {
++              atomic_inc(&tmpd->chroot_refcnt);
++              tmpd = tmpd->d_parent;
++      }
++      atomic_inc(&tmpd->chroot_refcnt);
++
++      write_sequnlock(&rename_lock);
++      read_sequnlock_excl(&mount_lock);
++#endif
++}
++
++void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++      struct dentry *tmpd = dentry;
++
++      read_seqlock_excl(&mount_lock);
++      write_seqlock(&rename_lock);
++
++      while (tmpd != mnt->mnt_root) {
++              atomic_dec(&tmpd->chroot_refcnt);
++              tmpd = tmpd->d_parent;
++      }
++      atomic_dec(&tmpd->chroot_refcnt);
++
++      write_sequnlock(&rename_lock);
++      read_sequnlock_excl(&mount_lock);
++#endif
++}
++
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++static struct dentry *get_closest_chroot(struct dentry *dentry)
++{
++      write_seqlock(&rename_lock);
++      do {
++              if (atomic_read(&dentry->chroot_refcnt)) {
++                      write_sequnlock(&rename_lock);
++                      return dentry;
++              }
++              dentry = dentry->d_parent;
++      } while (!IS_ROOT(dentry));
++      write_sequnlock(&rename_lock);
++      return NULL;
++}
++#endif
++
++int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
++                       struct dentry *newdentry, struct vfsmount *newmnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++      struct dentry *chroot;
++
++      if (unlikely(!grsec_enable_chroot_rename))
++              return 0;
++
++      if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
++              return 0;
++
++      chroot = get_closest_chroot(olddentry);
++
++      if (chroot == NULL)
++              return 0;
++
++      if (is_subdir(newdentry, chroot))
++              return 0;
++
++      gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
++
++      return 1;
++#else
++      return 0;
++#endif
++}
++
++void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
++                           path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++                           && gr_init_ran
++#endif
++         )
++              task->gr_is_chrooted = 1;
++      else {
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++              if (task_pid_nr(task) == 1 && !gr_init_ran)
++                      gr_init_ran = 1;
++#endif
++              task->gr_is_chrooted = 0;
++      }
++
++      task->gr_chroot_dentry = path->dentry;
++#endif
++      return;
++}
++
++void gr_clear_chroot_entries(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++      task->gr_is_chrooted = 0;
++      task->gr_chroot_dentry = NULL;
++#endif
++      return;
++}     
++
++int
++gr_handle_chroot_unix(const pid_t pid)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++      struct task_struct *p;
++
++      if (unlikely(!grsec_enable_chroot_unix))
++              return 1;
++
++      if (likely(!proc_is_chrooted(current)))
++              return 1;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      p = find_task_by_vpid_unrestricted(pid);
++      if (unlikely(p && !have_same_root(current, p))) {
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++              gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
++              return 0;
++      }
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++#endif
++      return 1;
++}
++
++int
++gr_handle_chroot_nice(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      if (grsec_enable_chroot_nice && (niceval < task_nice(p))
++                      && proc_is_chrooted(current)) {
++              gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      struct task_struct *p;
++      int ret = 0;
++      if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
++              return ret;
++
++      read_lock(&tasklist_lock);
++      do_each_pid_task(pid, type, p) {
++              if (!have_same_root(current, p)) {
++                      ret = 1;
++                      goto out;
++              }
++      } while_each_pid_task(pid, type, p);
++out:
++      read_unlock(&tasklist_lock);
++      return ret;
++#endif
++      return 0;
++}
++
++int
++gr_pid_is_chrooted(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
++              return 0;
++
++      if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
++          !have_same_root(current, p)) {
++              return 1;
++      }
++#endif
++      return 0;
++}
++
++EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
++
++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
++{
++      struct path path, currentroot;
++      int ret = 0;
++
++      path.dentry = (struct dentry *)u_dentry;
++      path.mnt = (struct vfsmount *)u_mnt;
++      get_fs_root(current->fs, &currentroot);
++      if (path_is_under(&path, &currentroot))
++              ret = 1;
++      path_put(&currentroot);
++
++      return ret;
++}
++#endif
++
++int
++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      if (!grsec_enable_chroot_fchdir)
++              return 1;
++
++      if (!proc_is_chrooted(current))
++              return 1;
++      else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
++              return 0;
++      }
++#endif
++      return 1;
++}
++
++int
++gr_chroot_pathat(int dfd, struct dentry *u_dentry, struct vfsmount *u_mnt, unsigned flags)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      struct fd f;
++      struct path fd_path;
++      struct path file_path;
++
++      if (!grsec_enable_chroot_fchdir)
++              return 0;
++
++      if (!proc_is_chrooted(current) || dfd == -1 || dfd == AT_FDCWD)
++              return 0;
++
++      if (flags & LOOKUP_RCU)
++              return -ECHILD;
++
++      f = fdget_raw(dfd);
++      if (!f.file)
++              return 0;
++
++      fd_path = f.file->f_path;
++      path_get(&fd_path);
++      fdput(f);
++
++      file_path.dentry = u_dentry;
++      file_path.mnt = u_mnt;
++
++      if (!gr_is_outside_chroot(u_dentry, u_mnt) && !path_is_under(&file_path, &fd_path)) {
++              path_put(&fd_path);
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_PATHAT_MSG, u_dentry, u_mnt);
++              return -ENOENT;
++      }
++      path_put(&fd_path);
++#endif
++      return 0;
++}
++
++int
++gr_chroot_fhandle(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      if (!grsec_enable_chroot_fchdir)
++              return 1;
++
++      if (!proc_is_chrooted(current))
++              return 1;
++      else {
++              gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
++              return 0;
++      }
++#endif
++      return 1;
++}
++
++int
++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++              const u64 shm_createtime)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++      struct task_struct *p;
++
++      if (unlikely(!grsec_enable_chroot_shmat))
++              return 1;
++
++      if (likely(!proc_is_chrooted(current)))
++              return 1;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++
++      if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
++              if (time_before_eq64(p->start_time, shm_createtime)) {
++                      if (have_same_root(current, p)) {
++                              goto allow;
++                      } else {
++                              read_unlock(&tasklist_lock);
++                              rcu_read_unlock();
++                              gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++                              return 0;
++                      }
++              }
++              /* creator exited, pid reuse, fall through to next check */
++      }
++      if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
++              if (unlikely(!have_same_root(current, p))) {
++                      read_unlock(&tasklist_lock);
++                      rcu_read_unlock();
++                      gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++                      return 0;
++              }
++      }
++
++allow:
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++#endif
++      return 1;
++}
++
++void
++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++      if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
++              gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
++#endif
++      return;
++}
++
++int
++gr_handle_chroot_mknod(const struct dentry *dentry,
++                     const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++      if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && 
++          proc_is_chrooted(current)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_mount(const struct dentry *dentry,
++                     const struct vfsmount *mnt, const char *dev_name)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++      if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
++              gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_pivot(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++      if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++      if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
++          !gr_is_outside_chroot(dentry, mnt)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++extern const char *captab_log[];
++extern int captab_log_entries;
++
++int
++gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
++              kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++              if (cap_raised(chroot_caps, cap)) {
++                      if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
++                              gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
++                      }
++                      return 0;
++              }
++      }
++#endif
++      return 1;
++}
++
++int
++gr_chroot_is_capable(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      return gr_task_chroot_is_capable(current, current_cred(), cap);
++#endif
++      return 1;
++}
++
++int
++gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
++              kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++              if (cap_raised(chroot_caps, cap)) {
++                      return 0;
++              }
++      }
++#endif
++      return 1;
++}
++
++int
++gr_chroot_is_capable_nolog(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      return gr_task_chroot_is_capable_nolog(current, cap);
++#endif
++      return 1;
++}
++
++int
++gr_handle_chroot_sysctl(const int op)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++      if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
++          proc_is_chrooted(current))
++              return -EACCES;
++#endif
++      return 0;
++}
++
++void
++gr_handle_chroot_chdir(const struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++      if (grsec_enable_chroot_chdir)
++              set_fs_pwd(current->fs, path);
++#endif
++      return;
++}
++
++int
++gr_handle_chroot_chmod(const struct dentry *dentry,
++                     const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++      /* allow chmod +s on directories, but not files */
++      if (grsec_enable_chroot_chmod && !d_is_dir(dentry) &&
++          ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
++          proc_is_chrooted(current)) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
+new file mode 100644
+index 0000000..ba8d997
+--- /dev/null
++++ b/grsecurity/grsec_disabled.c
+@@ -0,0 +1,445 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kdev_t.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/sysctl.h>
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++      return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++__u32
++gr_handle_sysctl(const struct ctl_table * table, const int op)
++{
++      return 0;
++}
++#endif
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++      return 0;
++}
++#endif
++
++int
++gr_acl_is_enabled(void)
++{
++      return 0;
++}
++
++int
++gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap, bool log)
++{
++      return 0;
++}
++
++void
++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) 
++{
++      return;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++      return 0;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++      return;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++      return 0;
++}
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++      return 0;
++}
++
++int
++gr_set_acls(const int type)
++{
++      return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *tsk)
++{
++      return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++      return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++      return 0;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++      return;
++}
++
++void
++gr_set_pax_flags(struct task_struct *task)
++{
++      return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++                const int unsafe_share)
++{
++      return 0;
++}
++
++void
++gr_handle_delete(const u64 ino, const dev_t dev)
++{
++      return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++      return;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++      return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++      return 0;
++}
++
++int
++gr_check_crash_uid(const kuid_t uid)
++{
++      return 0;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++               struct dentry *old_dentry,
++               struct dentry *new_dentry,
++               struct vfsmount *mnt, const __u8 replace, unsigned int flags)
++{
++      return;
++}
++
++int
++gr_search_socket(const int family, const int type, const int protocol)
++{
++      return 1;
++}
++
++int
++gr_search_connectbind(const int mode, const struct socket *sock,
++                    const struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++      return;
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++                        const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++                 int acc_mode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
++                 unsigned int *vm_flags)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry * dentry,
++                     const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry,
++                   const struct vfsmount * mnt, const int fmode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
++                  umode_t *mode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++void
++grsecurity_init(void)
++{
++      return;
++}
++
++umode_t gr_acl_umask(void)
++{
++      return 0;
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++                  const struct dentry * parent_dentry,
++                  const struct vfsmount * parent_mnt,
++                  const int mode)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry * new_dentry,
++                  const struct dentry * parent_dentry,
++                  const struct vfsmount * parent_mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++                    const struct dentry * parent_dentry,
++                    const struct vfsmount * parent_mnt, const struct filename *from)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++                 const struct dentry * parent_dentry,
++                 const struct vfsmount * parent_mnt,
++                 const struct dentry * old_dentry,
++                 const struct vfsmount * old_mnt, const struct filename *to)
++{
++      return 1;
++}
++
++int
++gr_acl_handle_rename(const struct dentry *new_dentry,
++                   const struct dentry *parent_dentry,
++                   const struct vfsmount *parent_mnt,
++                   const struct dentry *old_dentry,
++                   const struct inode *old_parent_inode,
++                   const struct vfsmount *old_mnt, const struct filename *newname,
++                   unsigned int flags)
++{
++      return 0;
++}
++
++int
++gr_acl_handle_filldir(const struct file *file, const char *name,
++                    const int namelen, const u64 ino)
++{
++      return 1;
++}
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++              const u64 shm_createtime, const kuid_t cuid, const int shmid)
++{
++      return 1;
++}
++
++int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++int
++gr_search_accept(const struct socket *sock)
++{
++      return 0;
++}
++
++int
++gr_search_listen(const struct socket *sock)
++{
++      return 0;
++}
++
++int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++      return 1;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++                  const struct dentry * p_dentry,
++                  const struct vfsmount * p_mnt, int open_flags, int acc_mode,
++                  const int imode)
++{
++      return 1;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++      return;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++      return 1;
++}
++
++void
++gr_set_role_label(const kuid_t uid, const kgid_t gid)
++{
++      return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++      return 0;
++}
++
++int
++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
++{
++      return 0;
++}
++
++int
++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
++{
++      return 0;
++}
++
++int
++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
++{
++      return 0;
++}
++
++int
++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
++{
++      return 0;
++}
++
++int gr_acl_enable_at_secure(void)
++{
++      return 0;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++      return d_backing_inode(dentry)->i_sb->s_dev;
++}
++
++u64 gr_get_ino_from_dentry(struct dentry *dentry)
++{
++      return d_backing_inode(dentry)->i_ino;
++}
++
++void gr_put_exec_file(struct task_struct *task)
++{
++      return;
++}
++
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL_GPL(gr_check_user_change);
++EXPORT_SYMBOL_GPL(gr_check_group_change);
++#endif
+diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
+new file mode 100644
+index 0000000..808006e
+--- /dev/null
++++ b/grsecurity/grsec_exec.c
+@@ -0,0 +1,188 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/binfmts.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/grdefs.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++#include <linux/module.h>
++#include <linux/compat.h>
++
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++static char gr_exec_arg_buf[132];
++static DEFINE_MUTEX(gr_exec_arg_mutex);
++#endif
++
++struct user_arg_ptr {
++#ifdef CONFIG_COMPAT
++      bool is_compat;
++#endif
++      union {
++              const char __user *const __user *native;
++#ifdef CONFIG_COMPAT
++              const compat_uptr_t __user *compat;
++#endif
++      } ptr;
++};
++
++extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
++
++void
++gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++      char *grarg = gr_exec_arg_buf;
++      unsigned int i, x, execlen = 0;
++      char c;
++
++      if (!((grsec_enable_execlog && grsec_enable_group &&
++             in_group_p(grsec_audit_gid))
++            || (grsec_enable_execlog && !grsec_enable_group)))
++              return;
++
++      mutex_lock(&gr_exec_arg_mutex);
++      memset(grarg, 0, sizeof(gr_exec_arg_buf));
++
++      for (i = 0; i < bprm->argc && execlen < 128; i++) {
++              const char __user *p;
++              unsigned int len;
++
++              p = get_user_arg_ptr(argv, i);
++              if (IS_ERR(p))
++                      goto log;
++
++              len = strnlen_user(p, 128 - execlen);
++              if (len > 128 - execlen)
++                      len = 128 - execlen;
++              else if (len > 0)
++                      len--;
++              if (copy_from_user(grarg + execlen, p, len))
++                      goto log;
++
++              /* rewrite unprintable characters */
++              for (x = 0; x < len; x++) {
++                      c = *(grarg + execlen + x);
++                      if (c < 32 || c > 126)
++                              *(grarg + execlen + x) = ' ';
++              }
++
++              execlen += len;
++              *(grarg + execlen) = ' ';
++              *(grarg + execlen + 1) = '\0';
++              execlen++;
++      }
++
++      log:
++      gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
++                      bprm->file->f_path.mnt, grarg);
++      mutex_unlock(&gr_exec_arg_mutex);
++#endif
++      return;
++}
++
++#ifdef CONFIG_GRKERNSEC
++extern int gr_acl_is_capable(const int cap);
++extern int gr_acl_is_capable_nolog(const int cap);
++extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap, bool log);
++extern int gr_chroot_is_capable(const int cap);
++extern int gr_chroot_is_capable_nolog(const int cap);
++extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
++extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
++#endif
++
++const char *captab_log[] = {
++      "CAP_CHOWN",
++      "CAP_DAC_OVERRIDE",
++      "CAP_DAC_READ_SEARCH",
++      "CAP_FOWNER",
++      "CAP_FSETID",
++      "CAP_KILL",
++      "CAP_SETGID",
++      "CAP_SETUID",
++      "CAP_SETPCAP",
++      "CAP_LINUX_IMMUTABLE",
++      "CAP_NET_BIND_SERVICE",
++      "CAP_NET_BROADCAST",
++      "CAP_NET_ADMIN",
++      "CAP_NET_RAW",
++      "CAP_IPC_LOCK",
++      "CAP_IPC_OWNER",
++      "CAP_SYS_MODULE",
++      "CAP_SYS_RAWIO",
++      "CAP_SYS_CHROOT",
++      "CAP_SYS_PTRACE",
++      "CAP_SYS_PACCT",
++      "CAP_SYS_ADMIN",
++      "CAP_SYS_BOOT",
++      "CAP_SYS_NICE",
++      "CAP_SYS_RESOURCE",
++      "CAP_SYS_TIME",
++      "CAP_SYS_TTY_CONFIG",
++      "CAP_MKNOD",
++      "CAP_LEASE",
++      "CAP_AUDIT_WRITE",
++      "CAP_AUDIT_CONTROL",
++      "CAP_SETFCAP",
++      "CAP_MAC_OVERRIDE",
++      "CAP_MAC_ADMIN",
++      "CAP_SYSLOG",
++      "CAP_WAKE_ALARM",
++      "CAP_BLOCK_SUSPEND",
++      "CAP_AUDIT_READ"
++};
++
++int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
++
++int gr_is_capable(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_task_acl_is_capable(task, cred, cap, true) && gr_task_chroot_is_capable(task, cred, cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++int gr_is_capable_nolog(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++int gr_task_is_capable_nolog(const struct task_struct *task, const struct cred *cred, const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++      if (gr_task_acl_is_capable(task, cred, cap, false) && gr_task_chroot_is_capable_nolog(task, cap))
++              return 1;
++      return 0;
++#else
++      return 1;
++#endif
++}
++
++EXPORT_SYMBOL_GPL(gr_is_capable);
++EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
++EXPORT_SYMBOL_GPL(gr_task_is_capable);
++EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
+diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
+new file mode 100644
+index 0000000..cdec49b
+--- /dev/null
++++ b/grsecurity/grsec_fifo.c
+@@ -0,0 +1,26 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
++             const struct dentry *dir, const int flag, const int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_FIFO
++      const struct cred *cred = current_cred();
++      struct inode *inode = d_backing_inode(dentry);
++      struct inode *dir_inode = d_backing_inode(dir);
++
++      if (grsec_enable_fifo && S_ISFIFO(inode->i_mode) &&
++          !(flag & O_EXCL) && (dir_inode->i_mode & S_ISVTX) &&
++          !uid_eq(inode->i_uid, dir_inode->i_uid) &&
++          !uid_eq(cred->fsuid, inode->i_uid)) {
++              if (!inode_permission(inode, acc_mode))
++                      gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(inode->i_uid), GR_GLOBAL_GID(inode->i_gid));
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
+new file mode 100644
+index 0000000..8ca18bf
+--- /dev/null
++++ b/grsecurity/grsec_fork.c
+@@ -0,0 +1,23 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/errno.h>
++
++void
++gr_log_forkfail(const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++      if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
++              switch (retval) {
++                      case -EAGAIN:
++                              gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
++                              break;
++                      case -ENOMEM:
++                              gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
++                              break;
++              }
++      }
++#endif
++      return;
++}
+diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
+new file mode 100644
+index 0000000..6822208
+--- /dev/null
++++ b/grsecurity/grsec_init.c
+@@ -0,0 +1,294 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/gracl.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++int grsec_enable_ptrace_readexec __read_only;
++int grsec_enable_setxid __read_only;
++int grsec_enable_symlinkown __read_only;
++kgid_t grsec_symlinkown_gid __read_only;
++int grsec_enable_brute __read_only;
++int grsec_enable_link __read_only;
++int grsec_enable_dmesg __read_only;
++int grsec_enable_harden_ptrace __read_only;
++int grsec_enable_harden_ipc __read_only;
++int grsec_enable_fifo __read_only;
++int grsec_enable_execlog __read_only;
++int grsec_enable_signal __read_only;
++int grsec_enable_forkfail __read_only;
++int grsec_enable_audit_ptrace __read_only;
++int grsec_enable_time __read_only;
++int grsec_enable_group __read_only;
++kgid_t grsec_audit_gid __read_only;
++int grsec_enable_chdir __read_only;
++int grsec_enable_mount __read_only;
++int grsec_enable_rofs __read_only;
++int grsec_deny_new_usb __read_only;
++int grsec_enable_chroot_findtask __read_only;
++int grsec_enable_chroot_mount __read_only;
++int grsec_enable_chroot_shmat __read_only;
++int grsec_enable_chroot_fchdir __read_only;
++int grsec_enable_chroot_double __read_only;
++int grsec_enable_chroot_pivot __read_only;
++int grsec_enable_chroot_chdir __read_only;
++int grsec_enable_chroot_chmod __read_only;
++int grsec_enable_chroot_mknod __read_only;
++int grsec_enable_chroot_nice __read_only;
++int grsec_enable_chroot_execlog __read_only;
++int grsec_enable_chroot_caps __read_only;
++int grsec_enable_chroot_rename __read_only;
++int grsec_enable_chroot_sysctl __read_only;
++int grsec_enable_chroot_unix __read_only;
++int grsec_enable_tpe __read_only;
++kgid_t grsec_tpe_gid __read_only;
++int grsec_enable_blackhole __read_only;
++#ifdef CONFIG_IPV6_MODULE
++EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
++#endif
++int grsec_lastack_retries __read_only;
++int grsec_enable_tpe_all __read_only;
++int grsec_enable_tpe_invert __read_only;
++int grsec_enable_socket_all __read_only;
++kgid_t grsec_socket_all_gid __read_only;
++int grsec_enable_socket_client __read_only;
++kgid_t grsec_socket_client_gid __read_only;
++int grsec_enable_socket_server __read_only;
++kgid_t grsec_socket_server_gid __read_only;
++int grsec_resource_logging __read_only;
++int grsec_disable_privio __read_only;
++int grsec_enable_log_rwxmaps __read_only;
++int grsec_enable_harden_tty __read_only;
++int grsec_lock __read_only;
++
++DEFINE_SPINLOCK(grsec_alert_lock);
++unsigned long grsec_alert_wtime = 0;
++unsigned long grsec_alert_fyet = 0;
++
++DEFINE_SPINLOCK(grsec_audit_lock);
++
++DEFINE_RWLOCK(grsec_exec_file_lock);
++
++char *gr_shared_page[4];
++
++char *gr_alert_log_fmt;
++char *gr_audit_log_fmt;
++char *gr_alert_log_buf;
++char *gr_audit_log_buf;
++
++extern struct gr_arg *gr_usermode;
++extern unsigned char *gr_system_salt;
++extern unsigned char *gr_system_sum;
++
++void __init
++grsecurity_init(void)
++{
++      int j;
++      /* create the per-cpu shared pages */
++
++#ifdef CONFIG_X86
++      memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
++#endif
++
++      for (j = 0; j < 4; j++) {
++              gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
++              if (gr_shared_page[j] == NULL) {
++                      panic("Unable to allocate grsecurity shared page");
++                      return;
++              }
++      }
++
++      /* allocate log buffers */
++      gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
++      if (!gr_alert_log_fmt) {
++              panic("Unable to allocate grsecurity alert log format buffer");
++              return;
++      }
++      gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
++      if (!gr_audit_log_fmt) {
++              panic("Unable to allocate grsecurity audit log format buffer");
++              return;
++      }
++      gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++      if (!gr_alert_log_buf) {
++              panic("Unable to allocate grsecurity alert log buffer");
++              return;
++      }
++      gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++      if (!gr_audit_log_buf) {
++              panic("Unable to allocate grsecurity audit log buffer");
++              return;
++      }
++
++      /* allocate memory for authentication structure */
++      gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
++      gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
++      gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
++
++      if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
++              panic("Unable to allocate grsecurity authentication structure");
++              return;
++      }
++
++#ifdef CONFIG_GRKERNSEC_IO
++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
++      grsec_disable_privio = 1;
++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++      grsec_disable_privio = 1;
++#else
++      grsec_disable_privio = 0;
++#endif
++#endif
++
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++      /* for backward compatibility, tpe_invert always defaults to on if
++         enabled in the kernel
++      */
++      grsec_enable_tpe_invert = 1;
++#endif
++
++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++#ifndef CONFIG_GRKERNSEC_SYSCTL
++      grsec_lock = 1;
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      grsec_enable_log_rwxmaps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++      grsec_enable_group = 1;
++      grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++      grsec_enable_ptrace_readexec = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++      grsec_enable_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      grsec_enable_harden_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
++      grsec_enable_harden_ipc = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_TTY
++      grsec_enable_harden_tty = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      grsec_enable_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++      grsec_enable_link = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      grsec_enable_brute = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++      grsec_enable_dmesg = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      grsec_enable_blackhole = 1;
++      grsec_lastack_retries = 4;
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++      grsec_enable_fifo = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++      grsec_enable_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++      grsec_enable_setxid = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++      grsec_enable_signal = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++      grsec_enable_forkfail = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++      grsec_enable_time = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++      grsec_resource_logging = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      grsec_enable_chroot_findtask = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++      grsec_enable_chroot_unix = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++      grsec_enable_chroot_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      grsec_enable_chroot_fchdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++      grsec_enable_chroot_shmat = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++      grsec_enable_audit_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++      grsec_enable_chroot_double = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++      grsec_enable_chroot_pivot = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++      grsec_enable_chroot_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++      grsec_enable_chroot_chmod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++      grsec_enable_chroot_mknod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      grsec_enable_chroot_nice = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++      grsec_enable_chroot_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      grsec_enable_chroot_caps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++      grsec_enable_chroot_rename = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++      grsec_enable_chroot_sysctl = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      grsec_enable_symlinkown = 1;
++      grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++      grsec_enable_tpe = 1;
++      grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++      grsec_enable_tpe_all = 1;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++      grsec_enable_socket_all = 1;
++      grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++      grsec_enable_socket_client = 1;
++      grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      grsec_enable_socket_server = 1;
++      grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
++      grsec_deny_new_usb = 1;
++#endif
++
++      return;
++}
+diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
+new file mode 100644
+index 0000000..6a8ed69
+--- /dev/null
++++ b/grsecurity/grsec_ipc.c
+@@ -0,0 +1,48 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/ipc_namespace.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
++{
++#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
++      int write;
++      int orig_granted_mode;
++      kuid_t euid;
++      kgid_t egid;
++
++      if (!grsec_enable_harden_ipc)
++              return 1;
++
++      euid = current_euid();
++      egid = current_egid();
++
++      write = requested_mode & 00002;
++      orig_granted_mode = ipcp->mode;
++
++      if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
++              orig_granted_mode >>= 6;
++      else {
++              /* if likely wrong permissions, lock to user */
++              if (orig_granted_mode & 0007)
++                      orig_granted_mode = 0;
++              /* otherwise do a egid-only check */
++              else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
++                      orig_granted_mode >>= 3;
++              /* otherwise, no access */
++              else
++                      orig_granted_mode = 0;
++      }
++      if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
++          !ns_capable_noaudit(ns->user_ns, CAP_IPC_OWNER)) {
++              gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
++              return 0;
++      }
++#endif
++      return 1;
++}
+diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
+new file mode 100644
+index 0000000..84c44a0
+--- /dev/null
++++ b/grsecurity/grsec_link.c
+@@ -0,0 +1,65 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int gr_get_symlinkown_enabled(void)
++{
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid))
++              return 1;
++#endif
++      return 0;
++}
++
++int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
++{
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      const struct inode *link_inode = d_backing_inode(link->dentry);
++
++      if (target && !uid_eq(link_inode->i_uid, target->i_uid)) {
++              gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, GR_GLOBAL_UID(link_inode->i_uid), GR_GLOBAL_UID(target->i_uid));
++              return 1;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_follow_link(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++      struct inode *inode = d_backing_inode(dentry);
++      struct inode *parent = d_backing_inode(dentry->d_parent);
++      const struct cred *cred = current_cred();
++
++      if (grsec_enable_link && d_is_symlink(dentry) &&
++          (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
++          (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
++              gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, GR_GLOBAL_UID(inode->i_uid), GR_GLOBAL_GID(inode->i_gid));
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_hardlink(const struct dentry *dentry,
++                 const struct vfsmount *mnt,
++                 const struct filename *to)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++      struct inode *inode = d_backing_inode(dentry);
++      const struct cred *cred = current_cred();
++
++      if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
++          (!d_is_reg(dentry) || is_privileged_binary(dentry) || 
++           (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
++          !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
++              gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, GR_GLOBAL_UID(inode->i_uid), GR_GLOBAL_GID(inode->i_gid), to->name);
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
+new file mode 100644
+index 0000000..a24b338
+--- /dev/null
++++ b/grsecurity/grsec_log.c
+@@ -0,0 +1,340 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/tty.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/grinternal.h>
++
++#ifdef CONFIG_TREE_PREEMPT_RCU
++#define DISABLE_PREEMPT() preempt_disable()
++#define ENABLE_PREEMPT() preempt_enable()
++#else
++#define DISABLE_PREEMPT()
++#define ENABLE_PREEMPT()
++#endif
++
++#define BEGIN_LOCKS(x) \
++      DISABLE_PREEMPT(); \
++      rcu_read_lock(); \
++      read_lock(&tasklist_lock); \
++      read_lock(&grsec_exec_file_lock); \
++      if (x != GR_DO_AUDIT) \
++              spin_lock(&grsec_alert_lock); \
++      else \
++              spin_lock(&grsec_audit_lock)
++
++#define END_LOCKS(x) \
++      if (x != GR_DO_AUDIT) \
++              spin_unlock(&grsec_alert_lock); \
++      else \
++              spin_unlock(&grsec_audit_lock); \
++      read_unlock(&grsec_exec_file_lock); \
++      read_unlock(&tasklist_lock); \
++      rcu_read_unlock(); \
++      ENABLE_PREEMPT(); \
++      if (x == GR_DONT_AUDIT) \
++              gr_handle_alertkill(current)
++
++enum {
++      FLOODING,
++      NO_FLOODING
++};
++
++extern char *gr_alert_log_fmt;
++extern char *gr_audit_log_fmt;
++extern char *gr_alert_log_buf;
++extern char *gr_audit_log_buf;
++
++static int gr_log_start(int audit)
++{
++      char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
++      char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
++      unsigned long curr_secs = get_seconds();
++
++      if (audit == GR_DO_AUDIT)
++              goto set_fmt;
++
++      if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
++              grsec_alert_wtime = curr_secs;
++              grsec_alert_fyet = 0;
++      } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
++                  && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
++              grsec_alert_fyet++;
++      } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
++              grsec_alert_wtime = curr_secs;
++              grsec_alert_fyet++;
++              printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
++              return FLOODING;
++      }
++      else return FLOODING;
++
++set_fmt:
++#endif
++      memset(buf, 0, PAGE_SIZE);
++      if (current->signal->curr_ip && gr_acl_is_enabled()) {
++              sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
++              snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++      } else if (current->signal->curr_ip) {
++              sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
++              snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
++      } else if (gr_acl_is_enabled()) {
++              sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
++              snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++      } else {
++              sprintf(fmt, "%s%s", loglevel, "grsec: ");
++              strcpy(buf, fmt);
++      }
++
++      return NO_FLOODING;
++}
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++      __attribute__ ((format (printf, 2, 0)));
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++{
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++      unsigned int len = strlen(buf);
++
++      vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++
++      return;
++}
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++      __attribute__ ((format (printf, 2, 3)));
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++{
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++      unsigned int len = strlen(buf);
++      va_list ap;
++
++      va_start(ap, msg);
++      vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++      va_end(ap);
++
++      return;
++}
++
++static void gr_log_end(int audit, int append_default)
++{
++      char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++      if (append_default) {
++              struct task_struct *task = current;
++              struct task_struct *parent = task->real_parent;
++              const struct cred *cred = __task_cred(task);
++              const struct cred *pcred = __task_cred(parent);
++              unsigned int len = strlen(buf);
++
++              snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++      }
++
++      printk("%s\n", buf);
++
++      return;
++}
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
++{
++      int logtype;
++      char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
++      char *str1 = NULL, *str2 = NULL, *str3 = NULL;
++      void *voidptr = NULL;
++      int num1 = 0, num2 = 0;
++      unsigned long ulong1 = 0, ulong2 = 0;
++      struct dentry *dentry = NULL;
++      struct vfsmount *mnt = NULL;
++      struct file *file = NULL;
++      struct task_struct *task = NULL;
++      struct vm_area_struct *vma = NULL;
++      const struct cred *cred, *pcred;
++      va_list ap;
++
++      BEGIN_LOCKS(audit);
++      logtype = gr_log_start(audit);
++      if (logtype == FLOODING) {
++              END_LOCKS(audit);
++              return;
++      }
++      va_start(ap, argtypes);
++      switch (argtypes) {
++      case GR_TTYSNIFF:
++              task = va_arg(ap, struct task_struct *);
++              gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
++              break;
++      case GR_SYSCTL_HIDDEN:
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, str1);
++              break;
++      case GR_RBAC:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
++              break;
++      case GR_RBAC_STR:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
++              break;
++      case GR_STR_RBAC:
++              str1 = va_arg(ap, char *);
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
++              break;
++      case GR_RBAC_MODE2:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              str2 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
++              break;
++      case GR_RBAC_MODE3:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              str2 = va_arg(ap, char *);
++              str3 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
++              break;
++      case GR_FILENAME:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
++              break;
++      case GR_STR_FILENAME:
++              str1 = va_arg(ap, char *);
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
++              break;
++      case GR_FILENAME_STR:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
++              break;
++      case GR_FILENAME_TWO_INT:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              num1 = va_arg(ap, int);
++              num2 = va_arg(ap, int);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
++              break;
++      case GR_FILENAME_TWO_INT_STR:
++              dentry = va_arg(ap, struct dentry *);
++              mnt = va_arg(ap, struct vfsmount *);
++              num1 = va_arg(ap, int);
++              num2 = va_arg(ap, int);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
++              break;
++      case GR_TEXTREL:
++              str1 = va_arg(ap, char *);
++              file = va_arg(ap, struct file *);
++              ulong1 = va_arg(ap, unsigned long);
++              ulong2 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, str1, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
++              break;
++      case GR_PTRACE:
++              task = va_arg(ap, struct task_struct *);
++              gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
++              break;
++      case GR_RESOURCE:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              ulong1 = va_arg(ap, unsigned long);
++              str1 = va_arg(ap, char *);
++              ulong2 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              break;
++      case GR_CAP:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              str1 = va_arg(ap, char *);
++              gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              break;
++      case GR_SIG:
++              str1 = va_arg(ap, char *);
++              voidptr = va_arg(ap, void *);
++              gr_log_middle_varargs(audit, msg, str1, voidptr);
++              break;
++      case GR_SIG2:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              num1 = va_arg(ap, int);
++              gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              break;
++      case GR_CRASH1:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              ulong1 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
++              break;
++      case GR_CRASH2:
++              task = va_arg(ap, struct task_struct *);
++              cred = __task_cred(task);
++              pcred = __task_cred(task->real_parent);
++              ulong1 = va_arg(ap, unsigned long);
++              gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
++              break;
++      case GR_RWXMAP:
++              file = va_arg(ap, struct file *);
++              gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
++              break;
++      case GR_RWXMAPVMA:
++              vma = va_arg(ap, struct vm_area_struct *);
++              if (vma->vm_file)
++                      str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
++              else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++                      str1 = "<stack>";
++              else if (vma->vm_start <= current->mm->brk &&
++                       vma->vm_end >= current->mm->start_brk)
++                      str1 = "<heap>";
++              else
++                      str1 = "<anonymous mapping>";
++              gr_log_middle_varargs(audit, msg, str1);
++              break;
++      case GR_PSACCT:
++              {
++                      unsigned int wday, cday;
++                      __u8 whr, chr;
++                      __u8 wmin, cmin;
++                      __u8 wsec, csec;
++
++                      task = va_arg(ap, struct task_struct *);
++                      wday = va_arg(ap, unsigned int);
++                      cday = va_arg(ap, unsigned int);
++                      whr = va_arg(ap, int);
++                      chr = va_arg(ap, int);
++                      wmin = va_arg(ap, int);
++                      cmin = va_arg(ap, int);
++                      wsec = va_arg(ap, int);
++                      csec = va_arg(ap, int);
++                      ulong1 = va_arg(ap, unsigned long);
++                      cred = __task_cred(task);
++                      pcred = __task_cred(task->real_parent);
++
++                      gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
++              }
++              break;
++      default:
++              gr_log_middle(audit, msg, ap);
++      }
++      va_end(ap);
++      // these don't need DEFAULTSECARGS printed on the end
++      if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
++              gr_log_end(audit, 0);
++      else
++              gr_log_end(audit, 1);
++      END_LOCKS(audit);
++}
+diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
+new file mode 100644
+index 0000000..0e39d8c
+--- /dev/null
++++ b/grsecurity/grsec_mem.c
+@@ -0,0 +1,48 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/module.h>
++#include <linux/grinternal.h>
++
++void gr_handle_msr_write(void)
++{
++      gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
++      return;
++}
++EXPORT_SYMBOL_GPL(gr_handle_msr_write);
++
++void
++gr_handle_ioperm(void)
++{
++      gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
++      return;
++}
++
++void
++gr_handle_iopl(void)
++{
++      gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
++      return;
++}
++
++void
++gr_handle_mem_readwrite(u64 from, u64 to)
++{
++      gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
++      return;
++}
++
++void
++gr_handle_vm86(void)
++{
++      gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
++      return;
++}
++
++void
++gr_log_badprocpid(const char *entry)
++{
++      gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
++      return;
++}
+diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
+new file mode 100644
+index 0000000..fe02bf4
+--- /dev/null
++++ b/grsecurity/grsec_mount.c
+@@ -0,0 +1,65 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mount.h>
++#include <linux/major.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_remount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      if (grsec_enable_mount && (retval >= 0))
++              gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++      return;
++}
++
++void
++gr_log_unmount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      if (grsec_enable_mount && (retval >= 0))
++              gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++      return;
++}
++
++void
++gr_log_mount(const char *from, struct path *to, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      if (grsec_enable_mount && (retval >= 0))
++              gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
++#endif
++      return;
++}
++
++int
++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++      if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
++              gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
++              return -EPERM;
++      } else
++              return 0;
++#endif
++      return 0;
++}
++
++int
++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++      struct inode *inode = d_backing_inode(dentry);
++
++      if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
++          inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
++              gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
++              return -EPERM;
++      } else
++              return 0;
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
+new file mode 100644
+index 0000000..2ad7b96
+--- /dev/null
++++ b/grsecurity/grsec_pax.c
+@@ -0,0 +1,47 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_log_textrel(struct vm_area_struct * vma, bool is_textrel_rw)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG,
++                      is_textrel_rw ? "executable to writable" : "writable to executable",
++                      vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#endif
++      return;
++}
++
++void gr_log_ptgnustack(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
++#endif
++      return;
++}
++
++void
++gr_log_rwxmmap(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
++#endif
++      return;
++}
++
++void
++gr_log_rwxmprotect(struct vm_area_struct *vma)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      if (grsec_enable_log_rwxmaps)
++              gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
++#endif
++      return;
++}
+diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
+new file mode 100644
+index 0000000..2005a3a
+--- /dev/null
++++ b/grsecurity/grsec_proc.c
+@@ -0,0 +1,20 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int gr_proc_is_restricted(void)
++{
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      const struct cred *cred = current_cred();
++#endif
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
++              return -EACCES;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
++              return -EACCES;
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
+new file mode 100644
+index 0000000..304c518
+--- /dev/null
++++ b/grsecurity/grsec_ptrace.c
+@@ -0,0 +1,30 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/security.h>
++
++void
++gr_audit_ptrace(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++      if (grsec_enable_audit_ptrace)
++              gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
++#endif
++      return;
++}
++
++int
++gr_ptrace_readexec(struct file *file, int unsafe_flags)
++{
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++      const struct dentry *dentry = file->f_path.dentry;
++      const struct vfsmount *mnt = file->f_path.mnt;
++
++      if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) && 
++          (inode_permission(d_backing_inode(dentry), MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
++              gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
+new file mode 100644
+index 0000000..f072c9d
+--- /dev/null
++++ b/grsecurity/grsec_sig.c
+@@ -0,0 +1,248 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/delay.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/hardirq.h>
++#include <asm/pgtable.h>
++
++char *signames[] = {
++      [SIGSEGV] = "Segmentation fault",
++      [SIGILL] = "Illegal instruction",
++      [SIGABRT] = "Abort",
++      [SIGBUS] = "Invalid alignment/Bus error"
++};
++
++void
++gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
++{
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++      if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
++                                  (sig == SIGABRT) || (sig == SIGBUS))) {
++              if (task_pid_nr(t) == task_pid_nr(current)) {
++                      gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
++              } else {
++                      gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
++              }
++      }
++#endif
++      return;
++}
++
++int
++gr_handle_signal(const struct task_struct *p, const int sig)
++{
++#ifdef CONFIG_GRKERNSEC
++      /* ignore the 0 signal for protected task checks */
++      if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
++              gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
++              return -EPERM;
++      } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
++              return -EPERM;
++      }
++#endif
++      return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC
++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
++
++int gr_fake_force_sig(int sig, struct task_struct *t)
++{
++      unsigned long int flags;
++      int ret, blocked, ignored;
++      struct k_sigaction *action;
++
++      spin_lock_irqsave(&t->sighand->siglock, flags);
++      action = &t->sighand->action[sig-1];
++      ignored = action->sa.sa_handler == SIG_IGN;
++      blocked = sigismember(&t->blocked, sig);
++      if (blocked || ignored) {
++              action->sa.sa_handler = SIG_DFL;
++              if (blocked) {
++                      sigdelset(&t->blocked, sig);
++                      recalc_sigpending_and_wake(t);
++              }
++      }
++      if (action->sa.sa_handler == SIG_DFL)
++              t->signal->flags &= ~SIGNAL_UNKILLABLE;
++      ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
++
++      spin_unlock_irqrestore(&t->sighand->siglock, flags);
++
++      return ret;
++}
++#endif
++
++#define GR_USER_BAN_TIME (15 * 60)
++#define GR_DAEMON_BRUTE_TIME (30 * 60)
++
++void gr_handle_brute_attach(int dumpable)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      struct task_struct *p = current;
++      kuid_t uid = GLOBAL_ROOT_UID;
++      int is_priv = 0;
++      int daemon = 0;
++
++      if (!grsec_enable_brute)
++              return;
++
++      if (is_privileged_binary(p->mm->exe_file->f_path.dentry))
++              is_priv = 1;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      read_lock(&grsec_exec_file_lock);
++      if (!is_priv && p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
++              p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
++              p->real_parent->brute = 1;
++              daemon = 1;
++      } else {
++              const struct cred *cred = __task_cred(p), *cred2;
++              struct task_struct *tsk, *tsk2;
++
++              if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
++                      struct user_struct *user;
++
++                      uid = cred->uid;
++
++                      /* this is put upon execution past expiration */
++                      user = find_user(uid);
++                      if (user == NULL)
++                              goto unlock;
++                      user->sugid_banned = 1;
++                      user->sugid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
++                      if (user->sugid_ban_expires == ~0UL)
++                              user->sugid_ban_expires--;
++
++                      /* only kill other threads of the same binary, from the same user */
++                      do_each_thread(tsk2, tsk) {
++                              cred2 = __task_cred(tsk);
++                              if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
++                                      gr_fake_force_sig(SIGKILL, tsk);
++                      } while_each_thread(tsk2, tsk);
++              }
++      }
++unlock:
++      read_unlock(&grsec_exec_file_lock);
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      if (gr_is_global_nonroot(uid))
++              gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
++      else if (daemon)
++              gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
++
++#endif
++      return;
++}
++
++void gr_handle_brute_check(void)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      struct task_struct *p = current;
++
++      if (unlikely(p->brute)) {
++              if (!grsec_enable_brute)
++                      p->brute = 0;
++              else if (time_before(get_seconds(), p->brute_expires))
++                      msleep(30 * 1000);
++      }
++#endif
++      return;
++}
++
++void gr_handle_kernel_exploit(void)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      static unsigned int num_banned_users __read_only;
++      const struct cred *cred;
++      struct task_struct *tsk, *tsk2;
++      struct user_struct *user;
++      kuid_t uid;
++
++      if (in_irq() || in_serving_softirq() || in_nmi())
++              panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
++
++      uid = current_uid();
++
++      if (gr_is_global_root(uid))
++              panic("grsec: halting the system due to suspicious kernel crash caused by root");
++      else {
++              pax_open_kernel();
++              num_banned_users++;
++              pax_close_kernel();
++              if (num_banned_users > 8)
++                      panic("grsec: halting the system due to suspicious kernel crash caused by a large number of different users");
++
++              /* kill all the processes of this user, hold a reference
++                 to their creds struct, and prevent them from creating
++                 another process until system reset
++              */
++              printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
++                      GR_GLOBAL_UID(uid));
++              /* we intentionally leak this ref */
++              user = get_uid(current->cred->user);
++              if (user)
++                      user->kernel_banned = 1;
++
++              /* kill all processes of this user */
++              read_lock(&tasklist_lock);
++              do_each_thread(tsk2, tsk) {
++                      cred = __task_cred(tsk);
++                      if (uid_eq(cred->uid, uid))
++                              gr_fake_force_sig(SIGKILL, tsk);
++              } while_each_thread(tsk2, tsk);
++              read_unlock(&tasklist_lock); 
++      }
++#endif
++}
++
++#ifdef CONFIG_GRKERNSEC_BRUTE
++static bool sugid_ban_expired(struct user_struct *user)
++{
++      if (user->sugid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->sugid_ban_expires)) {
++              user->sugid_banned = 0;
++              user->sugid_ban_expires = 0;
++              free_uid(user);
++              return true;
++      }
++
++      return false;
++}
++#endif
++
++int gr_process_kernel_exec_ban(void)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      if (unlikely(current->cred->user->kernel_banned))
++              return -EPERM;
++#endif
++      return 0;
++}
++
++int gr_process_kernel_setuid_ban(struct user_struct *user)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      if (unlikely(user->kernel_banned))
++              gr_fake_force_sig(SIGKILL, current);
++#endif
++      return 0;
++}
++
++int gr_process_sugid_exec_ban(const struct linux_binprm *bprm)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      struct user_struct *user = current->cred->user;
++      if (unlikely(user->sugid_banned)) {
++              if (sugid_ban_expired(user))
++                      return 0;
++              /* disallow execution of suid/sgid binaries only */
++              else if (is_privileged_binary(bprm->file->f_path.dentry))
++                      return -EPERM;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
+new file mode 100644
+index 0000000..3cdd946
+--- /dev/null
++++ b/grsecurity/grsec_sock.c
+@@ -0,0 +1,244 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <net/inet_sock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
++
++EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
++
++#ifdef CONFIG_UNIX_MODULE
++EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
++EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
++EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
++EXPORT_SYMBOL_GPL(gr_handle_create);
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define gr_conn_table_size 32749
++struct conn_table_entry {
++      struct conn_table_entry *next;
++      struct signal_struct *sig;
++};
++
++struct conn_table_entry *gr_conn_table[gr_conn_table_size];
++DEFINE_SPINLOCK(gr_conn_table_lock);
++
++extern const char * gr_socktype_to_name(unsigned char type);
++extern const char * gr_proto_to_name(unsigned char proto);
++extern const char * gr_sockfamily_to_name(unsigned char family);
++
++static int 
++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
++{
++      return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
++}
++
++static int
++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, 
++         __u16 sport, __u16 dport)
++{
++      if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
++                   sig->gr_sport == sport && sig->gr_dport == dport))
++              return 1;
++      else
++              return 0;
++}
++
++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
++{
++      struct conn_table_entry **match;
++      unsigned int index;
++
++      index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
++                        sig->gr_sport, sig->gr_dport, 
++                        gr_conn_table_size);
++
++      newent->sig = sig;
++      
++      match = &gr_conn_table[index];
++      newent->next = *match;
++      *match = newent;
++
++      return;
++}
++
++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
++{
++      struct conn_table_entry *match, *last = NULL;
++      unsigned int index;
++
++      index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
++                        sig->gr_sport, sig->gr_dport, 
++                        gr_conn_table_size);
++
++      match = gr_conn_table[index];
++      while (match && !conn_match(match->sig, 
++              sig->gr_saddr, sig->gr_daddr, sig->gr_sport, 
++              sig->gr_dport)) {
++              last = match;
++              match = match->next;
++      }
++
++      if (match) {
++              if (last)
++                      last->next = match->next;
++              else
++                      gr_conn_table[index] = NULL;
++              kfree(match);
++      }
++
++      return;
++}
++
++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
++                                           __u16 sport, __u16 dport)
++{
++      struct conn_table_entry *match;
++      unsigned int index;
++
++      index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
++
++      match = gr_conn_table[index];
++      while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
++              match = match->next;
++
++      if (match)
++              return match->sig;
++      else
++              return NULL;
++}
++
++#endif
++
++void gr_update_task_in_ip_table(const struct inet_sock *inet)
++{
++#ifdef CONFIG_GRKERNSEC
++      struct signal_struct *sig = current->signal;
++      struct conn_table_entry *newent;
++
++      newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
++      if (newent == NULL)
++              return;
++      /* no bh lock needed since we are called with bh disabled */
++      spin_lock(&gr_conn_table_lock);
++      gr_del_task_from_ip_table_nolock(sig);
++      sig->gr_saddr = inet->inet_rcv_saddr;
++      sig->gr_daddr = inet->inet_daddr;
++      sig->gr_sport = inet->inet_sport;
++      sig->gr_dport = inet->inet_dport;
++      gr_add_to_task_ip_table_nolock(sig, newent);
++      spin_unlock(&gr_conn_table_lock);
++#endif
++      return;
++}
++
++void gr_del_task_from_ip_table(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++      spin_lock_bh(&gr_conn_table_lock);
++      gr_del_task_from_ip_table_nolock(task->signal);
++      spin_unlock_bh(&gr_conn_table_lock);
++#endif
++      return;
++}
++
++void
++gr_attach_curr_ip(const struct sock *sk)
++{
++#ifdef CONFIG_GRKERNSEC
++      struct signal_struct *p, *set;
++      const struct inet_sock *inet = inet_sk(sk);     
++
++      if (unlikely(sk->sk_protocol != IPPROTO_TCP))
++              return;
++
++      set = current->signal;
++
++      spin_lock_bh(&gr_conn_table_lock);
++      p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
++                                  inet->inet_dport, inet->inet_sport);
++      if (unlikely(p != NULL)) {
++              set->curr_ip = p->curr_ip;
++              set->used_accept = 1;
++              gr_del_task_from_ip_table_nolock(p);
++              spin_unlock_bh(&gr_conn_table_lock);
++              return;
++      }
++      spin_unlock_bh(&gr_conn_table_lock);
++
++      set->curr_ip = inet->inet_daddr;
++      set->used_accept = 1;
++#endif
++      return;
++}
++
++int
++gr_handle_sock_all(const int family, const int type, const int protocol)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++      if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
++          (family != AF_UNIX)) {
++              if (family == AF_INET)
++                      gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
++              else
++                      gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_sock_server(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      if (grsec_enable_socket_server &&
++          in_group_p(grsec_socket_server_gid) &&
++          sck && (sck->sa_family != AF_UNIX) &&
++          (sck->sa_family != AF_LOCAL)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_sock_server_other(const struct sock *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      if (grsec_enable_socket_server &&
++          in_group_p(grsec_socket_server_gid) &&
++          sck && (sck->sk_family != AF_UNIX) &&
++          (sck->sk_family != AF_LOCAL)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++int
++gr_handle_sock_client(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++      if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
++          sck && (sck->sa_family != AF_UNIX) &&
++          (sck->sa_family != AF_LOCAL)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
+new file mode 100644
+index 0000000..4f673f8
+--- /dev/null
++++ b/grsecurity/grsec_sysctl.c
+@@ -0,0 +1,497 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
++{
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++      if (dirname == NULL || name == NULL)
++              return 0;
++      if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
++              gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
++              return -EACCES;
++      }
++#endif
++      return 0;
++}
++
++#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
++static int __maybe_unused __read_only one = 1;
++#endif
++
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
++      defined(CONFIG_GRKERNSEC_DENYUSB)
++struct ctl_table grsecurity_table[] = {
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
++#ifdef CONFIG_GRKERNSEC_IO
++      {
++              .procname       = "disable_priv_io",
++              .data           = &grsec_disable_privio,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++      {
++              .procname       = "linking_restrictions",
++              .data           = &grsec_enable_link,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++      {
++              .procname       = "enforce_symlinksifowner",
++              .data           = &grsec_enable_symlinkown,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++      {
++              .procname       = "symlinkown_gid",
++              .data           = &grsec_symlinkown_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      {
++              .procname       = "deter_bruteforce",
++              .data           = &grsec_enable_brute,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++      {
++              .procname       = "fifo_restrictions",
++              .data           = &grsec_enable_fifo,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++      {
++              .procname       = "ptrace_readexec",
++              .data           = &grsec_enable_ptrace_readexec,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++      {
++              .procname       = "consistent_setxid",
++              .data           = &grsec_enable_setxid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++      {
++              .procname       = "ip_blackhole",
++              .data           = &grsec_enable_blackhole,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++      {
++              .procname       = "lastack_retries",
++              .data           = &grsec_lastack_retries,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++      {
++              .procname       = "exec_logging",
++              .data           = &grsec_enable_execlog,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++      {
++              .procname       = "rwxmap_logging",
++              .data           = &grsec_enable_log_rwxmaps,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++      {
++              .procname       = "signal_logging",
++              .data           = &grsec_enable_signal,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++      {
++              .procname       = "forkfail_logging",
++              .data           = &grsec_enable_forkfail,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++      {
++              .procname       = "timechange_logging",
++              .data           = &grsec_enable_time,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++      {
++              .procname       = "chroot_deny_shmat",
++              .data           = &grsec_enable_chroot_shmat,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++      {
++              .procname       = "chroot_deny_unix",
++              .data           = &grsec_enable_chroot_unix,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++      {
++              .procname       = "chroot_deny_mount",
++              .data           = &grsec_enable_chroot_mount,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++      {
++              .procname       = "chroot_deny_fchdir",
++              .data           = &grsec_enable_chroot_fchdir,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++      {
++              .procname       = "chroot_deny_chroot",
++              .data           = &grsec_enable_chroot_double,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++      {
++              .procname       = "chroot_deny_pivot",
++              .data           = &grsec_enable_chroot_pivot,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++      {
++              .procname       = "chroot_enforce_chdir",
++              .data           = &grsec_enable_chroot_chdir,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++      {
++              .procname       = "chroot_deny_chmod",
++              .data           = &grsec_enable_chroot_chmod,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++      {
++              .procname       = "chroot_deny_mknod",
++              .data           = &grsec_enable_chroot_mknod,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++      {
++              .procname       = "chroot_restrict_nice",
++              .data           = &grsec_enable_chroot_nice,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++      {
++              .procname       = "chroot_execlog",
++              .data           = &grsec_enable_chroot_execlog,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++      {
++              .procname       = "chroot_caps",
++              .data           = &grsec_enable_chroot_caps,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++      {
++              .procname       = "chroot_deny_bad_rename",
++              .data           = &grsec_enable_chroot_rename,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++      {
++              .procname       = "chroot_deny_sysctl",
++              .data           = &grsec_enable_chroot_sysctl,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++      {
++              .procname       = "tpe",
++              .data           = &grsec_enable_tpe,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++      {
++              .procname       = "tpe_gid",
++              .data           = &grsec_tpe_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++      {
++              .procname       = "tpe_invert",
++              .data           = &grsec_enable_tpe_invert,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++      {
++              .procname       = "tpe_restrict_all",
++              .data           = &grsec_enable_tpe_all,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++      {
++              .procname       = "socket_all",
++              .data           = &grsec_enable_socket_all,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++      {
++              .procname       = "socket_all_gid",
++              .data           = &grsec_socket_all_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++      {
++              .procname       = "socket_client",
++              .data           = &grsec_enable_socket_client,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++      {
++              .procname       = "socket_client_gid",
++              .data           = &grsec_socket_client_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++      {
++              .procname       = "socket_server",
++              .data           = &grsec_enable_socket_server,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++      {
++              .procname       = "socket_server_gid",
++              .data           = &grsec_socket_server_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++      {
++              .procname       = "audit_group",
++              .data           = &grsec_enable_group,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++      {
++              .procname       = "audit_gid",
++              .data           = &grsec_audit_gid,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++      {
++              .procname       = "audit_chdir",
++              .data           = &grsec_enable_chdir,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++      {
++              .procname       = "audit_mount",
++              .data           = &grsec_enable_mount,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++      {
++              .procname       = "dmesg",
++              .data           = &grsec_enable_dmesg,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      {
++              .procname       = "chroot_findtask",
++              .data           = &grsec_enable_chroot_findtask,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++      {
++              .procname       = "resource_logging",
++              .data           = &grsec_resource_logging,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++      {
++              .procname       = "audit_ptrace",
++              .data           = &grsec_enable_audit_ptrace,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++      {
++              .procname       = "harden_ptrace",
++              .data           = &grsec_enable_harden_ptrace,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
++      {
++              .procname       = "harden_ipc",
++              .data           = &grsec_enable_harden_ipc,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_TTY
++      {
++              .procname       = "harden_tty",
++              .data           = &grsec_enable_harden_tty,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++      {
++              .procname       = "grsec_lock",
++              .data           = &grsec_lock,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++#ifdef CONFIG_GRKERNSEC_ROFS
++      {
++              .procname       = "romount_protect",
++              .data           = &grsec_enable_rofs,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_minmax_secure,
++              .extra1         = &one,
++              .extra2         = &one,
++      },
++#endif
++#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
++      {
++              .procname       = "deny_new_usb",
++              .data           = &grsec_deny_new_usb,
++              .maxlen         = sizeof(int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec_secure,
++      },
++#endif
++      { }
++};
++#endif
+diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
+new file mode 100644
+index 0000000..61b514e
+--- /dev/null
++++ b/grsecurity/grsec_time.c
+@@ -0,0 +1,16 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/module.h>
++
++void
++gr_log_timechange(void)
++{
++#ifdef CONFIG_GRKERNSEC_TIME
++      if (grsec_enable_time)
++              gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
++#endif
++      return;
++}
++
++EXPORT_SYMBOL_GPL(gr_log_timechange);
+diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
+new file mode 100644
+index 0000000..cbd2776
+--- /dev/null
++++ b/grsecurity/grsec_tpe.c
+@@ -0,0 +1,78 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++extern int gr_acl_tpe_check(void);
++
++int
++gr_tpe_allow(const struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC
++      struct inode *inode = d_backing_inode(file->f_path.dentry->d_parent);
++      struct inode *file_inode = d_backing_inode(file->f_path.dentry);
++      const struct cred *cred = current_cred();
++      char *msg = NULL;
++      char *msg2 = NULL;
++
++      // never restrict root
++      if (gr_is_global_root(cred->uid))
++              return 1;
++
++      if (grsec_enable_tpe) {
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++              if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
++                      msg = "not being in trusted group";
++              else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
++                      msg = "being in untrusted group";
++#else
++              if (in_group_p(grsec_tpe_gid))
++                      msg = "being in untrusted group";
++#endif
++      }
++      if (!msg && gr_acl_tpe_check())
++              msg = "being in untrusted role";
++
++      // not in any affected group/role
++      if (!msg)
++              goto next_check;
++
++      if (gr_is_global_nonroot(inode->i_uid))
++              msg2 = "file in non-root-owned directory";
++      else if (inode->i_mode & S_IWOTH)
++              msg2 = "file in world-writable directory";
++      else if ((inode->i_mode & S_IWGRP) && gr_is_global_nonroot_gid(inode->i_gid))
++              msg2 = "file in group-writable directory";
++      else if (file_inode->i_mode & S_IWOTH)
++              msg2 = "file is world-writable";
++
++      if (msg && msg2) {
++              char fullmsg[70] = {0};
++              snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
++              gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      }
++      msg = NULL;
++next_check:
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++      if (!grsec_enable_tpe || !grsec_enable_tpe_all)
++              return 1;
++
++      if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
++              msg = "directory not owned by user";
++      else if (inode->i_mode & S_IWOTH)
++              msg = "file in world-writable directory";
++      else if ((inode->i_mode & S_IWGRP) && gr_is_global_nonroot_gid(inode->i_gid))
++              msg = "file in group-writable directory";
++      else if (file_inode->i_mode & S_IWOTH)
++              msg = "file is world-writable";
++
++      if (msg) {
++              gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
++              return 0;
++      }
++#endif
++#endif
++      return 1;
++}
+diff --git a/grsecurity/grsec_tty.c b/grsecurity/grsec_tty.c
+new file mode 100644
+index 0000000..ad8b9c5
+--- /dev/null
++++ b/grsecurity/grsec_tty.c
+@@ -0,0 +1,18 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++#include <linux/tty.h>
++
++int gr_handle_tiocsti(struct tty_struct *tty)
++{
++#ifdef CONFIG_GRKERNSEC_HARDEN_TTY
++      if (grsec_enable_harden_tty && (current->signal->tty == tty) &&
++          !capable(CAP_SYS_ADMIN)) {
++              gr_log_noargs(GR_DONT_AUDIT, GR_TIOCSTI_MSG);
++              return 1;
++      }
++#endif
++      return 0;
++}
+diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
+new file mode 100644
+index 0000000..ae02d8e
+--- /dev/null
++++ b/grsecurity/grsec_usb.c
+@@ -0,0 +1,15 @@
++#include <linux/kernel.h>
++#include <linux/grinternal.h>
++#include <linux/module.h>
++
++int gr_handle_new_usb(void)
++{
++#ifdef CONFIG_GRKERNSEC_DENYUSB
++      if (grsec_deny_new_usb) {
++              printk(KERN_ALERT "grsec: denied insert of new USB device\n");
++              return 1;
++      }
++#endif
++      return 0;
++}
++EXPORT_SYMBOL_GPL(gr_handle_new_usb);
+diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
+new file mode 100644
+index 0000000..1af1e63
+--- /dev/null
++++ b/grsecurity/grsum.c
+@@ -0,0 +1,56 @@
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/scatterlist.h>
++#include <linux/crypto.h>
++#include <linux/gracl.h>
++#include <crypto/algapi.h>
++#include <crypto/hash.h>
++
++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
++#error "crypto and sha256 must be built into the kernel"
++#endif
++
++int
++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
++{
++      struct crypto_ahash *tfm;
++      struct ahash_request *req;
++      struct scatterlist sg[2];
++      unsigned char temp_sum[GR_SHA_LEN];
++      unsigned long *tmpsumptr = (unsigned long *)temp_sum;
++      unsigned long *sumptr = (unsigned long *)sum;
++      int retval = 1;
++
++      tfm = crypto_alloc_ahash("sha256", 0, CRYPTO_ALG_ASYNC);
++      if (IS_ERR(tfm))
++              goto out_wipe;
++
++      sg_init_table(sg, 2);
++      sg_set_buf(&sg[0], salt, GR_SALT_LEN);
++      sg_set_buf(&sg[1], entry->pw, strlen((const char *)entry->pw));
++
++      req = ahash_request_alloc(tfm, GFP_KERNEL);
++      if (!req) {
++              crypto_free_ahash(tfm);
++              goto out_wipe;
++      }
++
++      ahash_request_set_callback(req, 0, NULL, NULL);
++      ahash_request_set_crypt(req, sg, temp_sum, GR_SALT_LEN + strlen((const char *)entry->pw));
++
++      if (crypto_ahash_digest(req))
++              goto out_free;
++
++      if (!crypto_memneq(sumptr, tmpsumptr, GR_SHA_LEN))
++              retval = 0;
++
++out_free:
++      ahash_request_free(req);
++      crypto_free_ahash(tfm);
++out_wipe:
++      memset(entry->pw, 0, GR_PW_LEN);
++
++      return retval;
++}
+diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
+index 562603d..7ee4475 100644
+--- a/include/acpi/acpiosxf.h
++++ b/include/acpi/acpiosxf.h
+@@ -337,11 +337,12 @@ acpi_status acpi_os_signal(u32 function, void *info);
+  * Debug print routines
+  */
+ #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_printf
++__printf(1, 2)
+ void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...);
+ #endif
+ #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_vprintf
+-void acpi_os_vprintf(const char *format, va_list args);
++__printf(1, 0) void acpi_os_vprintf(const char *format, va_list args);
+ #endif
+ #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 1ff3a76..c52f3b4 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -914,7 +914,7 @@ ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
+ /*
+  * Debug output
+  */
+-ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6)
++ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6) __nocapture(3)
+                              void ACPI_INTERNAL_VAR_XFACE
+                              acpi_debug_print(u32 requested_debug_level,
+                                               u32 line_number,
+diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
+index 720446c..f32baee 100644
+--- a/include/acpi/ghes.h
++++ b/include/acpi/ghes.h
+@@ -32,7 +32,7 @@ struct ghes_estatus_node {
+ struct ghes_estatus_cache {
+       u32 estatus_len;
+-      atomic_t count;
++      atomic_unchecked_t count;
+       struct acpi_hest_generic *generic;
+       unsigned long long time_in;
+       struct rcu_head rcu;
+diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
+index 5bdab6b..9ae82fe 100644
+--- a/include/asm-generic/4level-fixup.h
++++ b/include/asm-generic/4level-fixup.h
+@@ -14,8 +14,10 @@
+ #define pmd_alloc(mm, pud, address) \
+       ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
+               NULL: pmd_offset(pud, address))
++#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
+ #define pud_alloc(mm, pgd, address)   (pgd)
++#define pud_alloc_kernel(mm, pgd, address)    pud_alloc((mm), (pgd), (address))
+ #define pud_offset(pgd, start)                (pgd)
+ #define pud_none(pud)                 0
+ #define pud_bad(pud)                  0
+diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
+index 288cc9e..714fd14 100644
+--- a/include/asm-generic/atomic-long.h
++++ b/include/asm-generic/atomic-long.h
+@@ -22,6 +22,12 @@
+ typedef atomic64_t atomic_long_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic64_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic64_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i)   ATOMIC64_INIT(i)
+ #define ATOMIC_LONG_PFX(x)    atomic64 ## x
+@@ -29,51 +35,61 @@ typedef atomic64_t atomic_long_t;
+ typedef atomic_t atomic_long_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i)   ATOMIC_INIT(i)
+ #define ATOMIC_LONG_PFX(x)    atomic ## x
+ #endif
+-#define ATOMIC_LONG_READ_OP(mo)                                               \
+-static inline long atomic_long_read##mo(const atomic_long_t *l)               \
++#define ATOMIC_LONG_READ_OP(mo, suffix)                                       \
++static inline long atomic_long_read##mo##suffix(const atomic_long##suffix##_t *l)\
+ {                                                                     \
+-      ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
++      ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\
+                                                                       \
+-      return (long)ATOMIC_LONG_PFX(_read##mo)(v);                     \
++      return (long)ATOMIC_LONG_PFX(_read##mo##suffix)(v);             \
+ }
+-ATOMIC_LONG_READ_OP()
+-ATOMIC_LONG_READ_OP(_acquire)
++ATOMIC_LONG_READ_OP(,)
++ATOMIC_LONG_READ_OP(,_unchecked)
++ATOMIC_LONG_READ_OP(_acquire,)
+ #undef ATOMIC_LONG_READ_OP
+-#define ATOMIC_LONG_SET_OP(mo)                                                \
+-static inline void atomic_long_set##mo(atomic_long_t *l, long i)      \
++#define ATOMIC_LONG_SET_OP(mo, suffix)                                        \
++static inline void atomic_long_set##mo##suffix(atomic_long##suffix##_t *l, long i)\
+ {                                                                     \
+-      ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
++      ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\
+                                                                       \
+-      ATOMIC_LONG_PFX(_set##mo)(v, i);                                \
++      ATOMIC_LONG_PFX(_set##mo##suffix)(v, i);                        \
+ }
+-ATOMIC_LONG_SET_OP()
+-ATOMIC_LONG_SET_OP(_release)
++ATOMIC_LONG_SET_OP(,)
++ATOMIC_LONG_SET_OP(,_unchecked)
++ATOMIC_LONG_SET_OP(_release,)
+ #undef ATOMIC_LONG_SET_OP
+-#define ATOMIC_LONG_ADD_SUB_OP(op, mo)                                        \
++#define ATOMIC_LONG_ADD_SUB_OP(op, mo, suffix)                                \
+ static inline long                                                    \
+-atomic_long_##op##_return##mo(long i, atomic_long_t *l)                       \
++atomic_long_##op##_return##mo##suffix(long i, atomic_long##suffix##_t *l)\
+ {                                                                     \
+-      ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
++      ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\
+                                                                       \
+-      return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v);         \
++      return (long)ATOMIC_LONG_PFX(_##op##_return##mo##suffix)(i, v); \
+ }
+-ATOMIC_LONG_ADD_SUB_OP(add,)
+-ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+-ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+-ATOMIC_LONG_ADD_SUB_OP(add, _release)
+-ATOMIC_LONG_ADD_SUB_OP(sub,)
+-ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+-ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+-ATOMIC_LONG_ADD_SUB_OP(sub, _release)
++ATOMIC_LONG_ADD_SUB_OP(add,,)
++ATOMIC_LONG_ADD_SUB_OP(add,,_unchecked)
++ATOMIC_LONG_ADD_SUB_OP(add, _relaxed,)
++ATOMIC_LONG_ADD_SUB_OP(add, _acquire,)
++ATOMIC_LONG_ADD_SUB_OP(add, _release,)
++ATOMIC_LONG_ADD_SUB_OP(sub,,)
++//ATOMIC_LONG_ADD_SUB_OP(sub,,_unchecked)
++ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed,)
++ATOMIC_LONG_ADD_SUB_OP(sub, _acquire,)
++ATOMIC_LONG_ADD_SUB_OP(sub, _release,)
+ #undef ATOMIC_LONG_ADD_SUB_OP
+@@ -98,6 +114,11 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+ #define atomic_long_xchg(v, new) \
+       (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
++#ifdef CONFIG_PAX_REFCOUNT
++#define atomic_long_xchg_unchecked(v, new) \
++      (ATOMIC_LONG_PFX(_xchg_unchecked)((ATOMIC_LONG_PFX(_unchecked_t) *)(v), (new)))
++#endif
++
+ static __always_inline void atomic_long_inc(atomic_long_t *l)
+ {
+       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+@@ -105,6 +126,15 @@ static __always_inline void atomic_long_inc(atomic_long_t *l)
+       ATOMIC_LONG_PFX(_inc)(v);
+ }
++#ifdef CONFIG_PAX_REFCOUNT
++static __always_inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++      ATOMIC_LONG_PFX(_unchecked_t) *v = (ATOMIC_LONG_PFX(_unchecked_t) *)l;
++
++      ATOMIC_LONG_PFX(_inc_unchecked)(v);
++}
++#endif
++
+ static __always_inline void atomic_long_dec(atomic_long_t *l)
+ {
+       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+@@ -168,21 +198,32 @@ ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
+ #undef ATOMIC_LONG_FETCH_INC_DEC_OP
+-#define ATOMIC_LONG_OP(op)                                            \
++#ifdef CONFIG_PAX_REFCOUNT
++static __always_inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++      ATOMIC_LONG_PFX(_unchecked_t) *v = (ATOMIC_LONG_PFX(_unchecked_t) *)l;
++
++      ATOMIC_LONG_PFX(_dec_unchecked)(v);
++}
++#endif
++
++#define ATOMIC_LONG_OP(op, suffix)                                    \
+ static __always_inline void                                           \
+-atomic_long_##op(long i, atomic_long_t *l)                            \
++atomic_long_##op##suffix(long i, atomic_long##suffix##_t *l)          \
+ {                                                                     \
+-      ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
++      ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\
+                                                                       \
+-      ATOMIC_LONG_PFX(_##op)(i, v);                                   \
++      ATOMIC_LONG_PFX(_##op##suffix)(i, v);                           \
+ }
+-ATOMIC_LONG_OP(add)
+-ATOMIC_LONG_OP(sub)
+-ATOMIC_LONG_OP(and)
+-ATOMIC_LONG_OP(andnot)
+-ATOMIC_LONG_OP(or)
+-ATOMIC_LONG_OP(xor)
++ATOMIC_LONG_OP(add,)
++ATOMIC_LONG_OP(add,_unchecked)
++ATOMIC_LONG_OP(sub,)
++ATOMIC_LONG_OP(sub,_unchecked)
++ATOMIC_LONG_OP(and,)
++ATOMIC_LONG_OP(andnot,)
++ATOMIC_LONG_OP(or,)
++ATOMIC_LONG_OP(xor,)
+ #undef ATOMIC_LONG_OP
+@@ -214,22 +255,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+       return ATOMIC_LONG_PFX(_add_negative)(i, v);
+ }
+-#define ATOMIC_LONG_INC_DEC_OP(op, mo)                                        \
++#define ATOMIC_LONG_INC_DEC_OP(op, mo, suffix)                                \
+ static inline long                                                    \
+-atomic_long_##op##_return##mo(atomic_long_t *l)                               \
++atomic_long_##op##_return##mo##suffix(atomic_long##suffix##_t *l)     \
+ {                                                                     \
+-      ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
++      ATOMIC_LONG_PFX(suffix##_t) *v = (ATOMIC_LONG_PFX(suffix##_t) *)l;\
+                                                                       \
+-      return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v);            \
++      return (long)ATOMIC_LONG_PFX(_##op##_return##mo##suffix)(v);    \
+ }
+-ATOMIC_LONG_INC_DEC_OP(inc,)
+-ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
+-ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
+-ATOMIC_LONG_INC_DEC_OP(inc, _release)
+-ATOMIC_LONG_INC_DEC_OP(dec,)
+-ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
+-ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
+-ATOMIC_LONG_INC_DEC_OP(dec, _release)
++ATOMIC_LONG_INC_DEC_OP(inc,,)
++ATOMIC_LONG_INC_DEC_OP(inc,,_unchecked)
++ATOMIC_LONG_INC_DEC_OP(inc, _relaxed,)
++ATOMIC_LONG_INC_DEC_OP(inc, _acquire,)
++ATOMIC_LONG_INC_DEC_OP(inc, _release,)
++ATOMIC_LONG_INC_DEC_OP(dec,,)
++ATOMIC_LONG_INC_DEC_OP(dec, _relaxed,)
++ATOMIC_LONG_INC_DEC_OP(dec, _acquire,)
++ATOMIC_LONG_INC_DEC_OP(dec, _release,)
+ #undef ATOMIC_LONG_INC_DEC_OP
+@@ -243,4 +285,62 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+ #define atomic_long_inc_not_zero(l) \
+       ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void pax_refcount_needs_these_functions(void)
++{
++      atomic_read_unchecked((atomic_unchecked_t *)NULL);
++      atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
++      atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
++      atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
++      atomic_inc_unchecked((atomic_unchecked_t *)NULL);
++      (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
++      atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
++      atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
++      atomic_dec_unchecked((atomic_unchecked_t *)NULL);
++      atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
++      (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++
++      atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
++      atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
++      atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
++      atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
++      atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
++      atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
++      atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
++      atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
++}
++#else
++#define atomic_read_unchecked(v) atomic_read(v)
++#define atomic_set_unchecked(v, i) atomic_set((v), (i))
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#ifndef atomic_inc_and_test_unchecked
++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
++#endif
++#ifndef atomic_inc_return_unchecked
++#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
++#endif
++#ifndef atomic_add_return_unchecked
++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
++#endif
++#define atomic_dec_unchecked(v) atomic_dec(v)
++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
++#ifndef atomic_xchg_unchecked
++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
++#endif
++
++#define atomic_long_read_unchecked(v) atomic_long_read(v)
++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
++#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
++#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
++#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
++#ifndef atomic_long_xchg_unchecked
++#define atomic_long_xchg_unchecked(v, i) atomic_long_xchg((v), (i))
++#endif
++#endif
++
+ #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
+diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
+index dad68bf..cadcc641 100644
+--- a/include/asm-generic/atomic64.h
++++ b/include/asm-generic/atomic64.h
+@@ -16,6 +16,8 @@ typedef struct {
+       long long counter;
+ } atomic64_t;
++typedef atomic64_t atomic64_unchecked_t;
++
+ #define ATOMIC64_INIT(i)      { (i) }
+ extern long long atomic64_read(const atomic64_t *v);
+@@ -62,4 +64,15 @@ extern int   atomic64_add_unless(atomic64_t *v, long long a, long long u);
+ #define atomic64_dec_and_test(v)      (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v)      atomic64_add_unless((v), 1LL, 0LL)
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++#define atomic64_xchg_unchecked(v, n) atomic64_xchg((v), (n))
++
+ #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
+diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
+index a60a7cc..0fe12f2 100644
+--- a/include/asm-generic/bitops/__fls.h
++++ b/include/asm-generic/bitops/__fls.h
+@@ -9,7 +9,7 @@
+  *
+  * Undefined if no set bit exists, so code should check against 0 first.
+  */
+-static __always_inline unsigned long __fls(unsigned long word)
++static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
+ {
+       int num = BITS_PER_LONG - 1;
+diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
+index 0576d1f..dad6c71 100644
+--- a/include/asm-generic/bitops/fls.h
++++ b/include/asm-generic/bitops/fls.h
+@@ -9,7 +9,7 @@
+  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+  */
+-static __always_inline int fls(int x)
++static __always_inline int __intentional_overflow(-1) fls(int x)
+ {
+       int r = 32;
+diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
+index b097cf8..3d40e14 100644
+--- a/include/asm-generic/bitops/fls64.h
++++ b/include/asm-generic/bitops/fls64.h
+@@ -15,7 +15,7 @@
+  * at position 64.
+  */
+ #if BITS_PER_LONG == 32
+-static __always_inline int fls64(__u64 x)
++static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
+ {
+       __u32 h = x >> 32;
+       if (h)
+@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
+       return fls(x);
+ }
+ #elif BITS_PER_LONG == 64
+-static __always_inline int fls64(__u64 x)
++static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
+ {
+       if (x == 0)
+               return 0;
+diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
+index 6f96247..f6ae0d7 100644
+--- a/include/asm-generic/bug.h
++++ b/include/asm-generic/bug.h
+@@ -62,13 +62,13 @@ struct bug_entry {
+  * to provide better diagnostics.
+  */
+ #ifndef __WARN_TAINT
+-extern __printf(3, 4)
++extern __printf(3, 4) __nocapture(1)
+ void warn_slowpath_fmt(const char *file, const int line,
+                      const char *fmt, ...);
+-extern __printf(4, 5)
++extern __printf(4, 5) __nocapture(1)
+ void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
+                            const char *fmt, ...);
+-extern void warn_slowpath_null(const char *file, const int line);
++extern __nocapture(1) void warn_slowpath_null(const char *file, const int line);
+ #define WANT_WARN_ON_SLOWPATH
+ #define __WARN()              warn_slowpath_null(__FILE__, __LINE__)
+ #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
+@@ -84,6 +84,7 @@ extern void warn_slowpath_null(const char *file, const int line);
+ /* used internally by panic.c */
+ struct warn_args;
++__nocapture(1, 0)
+ void __warn(const char *file, int line, void *caller, unsigned taint,
+           struct pt_regs *regs, struct warn_args *args);
+diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
+index 1bfcfe5..e04c5c9 100644
+--- a/include/asm-generic/cache.h
++++ b/include/asm-generic/cache.h
+@@ -6,7 +6,7 @@
+  * cache lines need to provide their own cache.h.
+  */
+-#define L1_CACHE_SHIFT                5
+-#define L1_CACHE_BYTES                (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT                5UL
++#define L1_CACHE_BYTES                (1UL << L1_CACHE_SHIFT)
+ #endif /* __ASM_GENERIC_CACHE_H */
+diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
+index 0d68a1e..b74a761 100644
+--- a/include/asm-generic/emergency-restart.h
++++ b/include/asm-generic/emergency-restart.h
+@@ -1,7 +1,7 @@
+ #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+ #define _ASM_GENERIC_EMERGENCY_RESTART_H
+-static inline void machine_emergency_restart(void)
++static inline __noreturn void machine_emergency_restart(void)
+ {
+       machine_restart(NULL);
+ }
+diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
+index 90f99c7..00ce236 100644
+--- a/include/asm-generic/kmap_types.h
++++ b/include/asm-generic/kmap_types.h
+@@ -2,9 +2,9 @@
+ #define _ASM_GENERIC_KMAP_TYPES_H
+ #ifdef __WITH_KM_FENCE
+-# define KM_TYPE_NR 41
++# define KM_TYPE_NR 42
+ #else
+-# define KM_TYPE_NR 20
++# define KM_TYPE_NR 21
+ #endif
+ #endif
+diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
+index 9ceb03b..62b0b8f 100644
+--- a/include/asm-generic/local.h
++++ b/include/asm-generic/local.h
+@@ -23,24 +23,37 @@ typedef struct
+       atomic_long_t a;
+ } local_t;
++typedef struct {
++      atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l)       atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l,i)        atomic_long_set((&(l)->a),(i))
++#define local_set_unchecked(l,i)      atomic_long_set_unchecked((&(l)->a),(i))
+ #define local_inc(l)  atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l)        atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l)  atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l)        atomic_long_dec_unchecked(&(l)->a)
+ #define local_add(i,l)        atomic_long_add((i),(&(l)->a))
++#define local_add_unchecked(i,l)      atomic_long_add_unchecked((i),(&(l)->a))
+ #define local_sub(i,l)        atomic_long_sub((i),(&(l)->a))
++#define local_sub_unchecked(i,l)      atomic_long_sub_unchecked((i),(&(l)->a))
+ #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
+ #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
+ #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
+ #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
+ #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
++#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
+ #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+ #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
++#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
+ #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
++#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+ #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+ #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
+ #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
+index 725612b..8458d85 100644
+--- a/include/asm-generic/pgtable-nopmd.h
++++ b/include/asm-generic/pgtable-nopmd.h
+@@ -1,14 +1,19 @@
+ #ifndef _PGTABLE_NOPMD_H
+ #define _PGTABLE_NOPMD_H
+-#ifndef __ASSEMBLY__
+-
+ #include <asm-generic/pgtable-nopud.h>
+-struct mm_struct;
+-
+ #define __PAGETABLE_PMD_FOLDED
++#define PMD_SHIFT     PUD_SHIFT
++#define PTRS_PER_PMD  1
++#define PMD_SIZE      (_AC(1,UL) << PMD_SHIFT)
++#define PMD_MASK      (~(PMD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
++struct mm_struct;
++
+ /*
+  * Having the pmd type consist of a pud gets the size right, and allows
+  * us to conceptually access the pud entry that this pmd is folded into
+@@ -16,11 +21,6 @@ struct mm_struct;
+  */
+ typedef struct { pud_t pud; } pmd_t;
+-#define PMD_SHIFT     PUD_SHIFT
+-#define PTRS_PER_PMD  1
+-#define PMD_SIZE      (1UL << PMD_SHIFT)
+-#define PMD_MASK      (~(PMD_SIZE-1))
+-
+ /*
+  * The "pud_xxx()" functions here are trivial for a folded two-level
+  * setup: the pmd is never bad, and a pmd always exists (as it's folded
+@@ -33,6 +33,7 @@ static inline void pud_clear(pud_t *pud)     { }
+ #define pmd_ERROR(pmd)                                (pud_ERROR((pmd).pud))
+ #define pud_populate(mm, pmd, pte)            do { } while (0)
++#define pud_populate_kernel(mm, pmd, pte)     do { } while (0)
+ /*
+  * (pmds are folded into puds so this doesn't get actually called,
+diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
+index 810431d..0ec4804f 100644
+--- a/include/asm-generic/pgtable-nopud.h
++++ b/include/asm-generic/pgtable-nopud.h
+@@ -1,10 +1,15 @@
+ #ifndef _PGTABLE_NOPUD_H
+ #define _PGTABLE_NOPUD_H
+-#ifndef __ASSEMBLY__
+-
+ #define __PAGETABLE_PUD_FOLDED
++#define PUD_SHIFT     PGDIR_SHIFT
++#define PTRS_PER_PUD  1
++#define PUD_SIZE      (_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK      (~(PUD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
+ /*
+  * Having the pud type consist of a pgd gets the size right, and allows
+  * us to conceptually access the pgd entry that this pud is folded into
+@@ -12,11 +17,6 @@
+  */
+ typedef struct { pgd_t pgd; } pud_t;
+-#define PUD_SHIFT     PGDIR_SHIFT
+-#define PTRS_PER_PUD  1
+-#define PUD_SIZE      (1UL << PUD_SHIFT)
+-#define PUD_MASK      (~(PUD_SIZE-1))
+-
+ /*
+  * The "pgd_xxx()" functions here are trivial for a folded two-level
+  * setup: the pud is never bad, and a pud always exists (as it's folded
+@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd)     { }
+ #define pud_ERROR(pud)                                (pgd_ERROR((pud).pgd))
+ #define pgd_populate(mm, pgd, pud)            do { } while (0)
++#define pgd_populate_kernel(mm, pgd, pud)     do { } while (0)
+ /*
+  * (puds are folded into pgds so this doesn't get actually called,
+  * but the define is needed for a generic inline function.)
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index d4458b6..34e3f46 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -757,6 +757,22 @@ static inline int pmd_protnone(pmd_t pmd)
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
++#ifdef CONFIG_PAX_KERNEXEC
++#error KERNEXEC requires pax_open_kernel
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++#endif
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
++#ifdef CONFIG_PAX_KERNEXEC
++#error KERNEXEC requires pax_close_kernel
++#else
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++#endif
++
+ #endif /* CONFIG_MMU */
+ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
+index af0254c..a4e4da3 100644
+--- a/include/asm-generic/sections.h
++++ b/include/asm-generic/sections.h
+@@ -31,6 +31,7 @@ extern char _data[], _sdata[], _edata[];
+ extern char __bss_start[], __bss_stop[];
+ extern char __init_begin[], __init_end[];
+ extern char _sinittext[], _einittext[];
++extern char _sinitdata[], _einitdata[];
+ extern char _end[];
+ extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
+ extern char __kprobes_text_start[], __kprobes_text_end[];
+diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
+index 6df9b07..8b07d2ff 100644
+--- a/include/asm-generic/uaccess.h
++++ b/include/asm-generic/uaccess.h
+@@ -352,4 +352,20 @@ clear_user(void __user *to, unsigned long n)
+       return __clear_user(to, n);
+ }
++#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#error UDEREF requires pax_open_userland
++#else
++static inline unsigned long pax_open_userland(void) { return 0; }
++#endif
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#error UDEREF requires pax_close_userland
++#else
++static inline unsigned long pax_close_userland(void) { return 0; }
++#endif
++#endif
++
+ #endif /* __ASM_GENERIC_UACCESS_H */
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 2456397..85deae0 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -266,6 +266,7 @@
+               VMLINUX_SYMBOL(__start_rodata) = .;                     \
+               *(.rodata) *(.rodata.*)                                 \
+               RO_AFTER_INIT_DATA      /* Read only after init */      \
++              *(.data..read_only)                                     \
+               *(__vermagic)           /* Kernel version magic */      \
+               . = ALIGN(8);                                           \
+               VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
+@@ -434,9 +435,20 @@
+               ALIGN_FUNCTION();                                       \
+               *(.text.hot .text .text.fixup .text.unlikely)           \
+               *(.ref.text)                                            \
++              REFCOUNT_TEXT                                           \
+       MEM_KEEP(init.text)                                             \
+       MEM_KEEP(exit.text)                                             \
++#define __REFCOUNT_TEXT(section)                                      \
++              VMLINUX_SYMBOL(__##section##_start) = .;                \
++              *(.text.##section)                                      \
++              VMLINUX_SYMBOL(__##section##_end) = .;
++
++#define REFCOUNT_TEXT                                                 \
++      __REFCOUNT_TEXT(refcount_overflow)                              \
++      __REFCOUNT_TEXT(refcount64_overflow)                            \
++      __REFCOUNT_TEXT(refcount_underflow)                             \
++      __REFCOUNT_TEXT(refcount64_underflow)                           \
+ /* sched.text is aling to function alignment to secure we have same
+  * address even at second ld pass when generating System.map */
+@@ -531,7 +543,9 @@
+       MEM_DISCARD(init.data)                                          \
+       KERNEL_CTORS()                                                  \
+       MCOUNT_REC()                                                    \
++      *(.init.rodata.str)                                             \
+       *(.init.rodata)                                                 \
++      *(.init.rodata.*)                                               \
+       FTRACE_EVENTS()                                                 \
+       TRACE_SYSCALLS()                                                \
+       KPROBE_BLACKLIST()                                              \
+@@ -555,9 +569,12 @@
+ #define EXIT_DATA                                                     \
+       *(.exit.data)                                                   \
++      *(.exit.rodata)                                                 \
++      *(.exit.rodata.*)                                               \
+       *(.fini_array)                                                  \
+       *(.dtors)                                                       \
+       MEM_DISCARD(exit.data)                                          \
++      *(.exit.rodata.str)                                             \
+       MEM_DISCARD(exit.rodata)
+ #define EXIT_TEXT                                                     \
+@@ -774,17 +791,18 @@
+  * section in the linker script will go there too.  @phdr should have
+  * a leading colon.
+  *
+- * Note that this macros defines __per_cpu_load as an absolute symbol.
++ * Note that this macros defines per_cpu_load as an absolute symbol.
+  * If there is no need to put the percpu section at a predetermined
+  * address, use PERCPU_SECTION.
+  */
+ #define PERCPU_VADDR(cacheline, vaddr, phdr)                          \
+-      VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
+-      .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)         \
++      per_cpu_load = .;                                               \
++      .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load)           \
+                               - LOAD_OFFSET) {                        \
++              VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load;      \
+               PERCPU_INPUT(cacheline)                                 \
+       } phdr                                                          \
+-      . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
++      . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
+ /**
+  * PERCPU_SECTION - define output section for percpu area, simple version
+@@ -846,12 +864,14 @@
+ #define INIT_DATA_SECTION(initsetup_align)                            \
+       .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {               \
++              VMLINUX_SYMBOL(_sinitdata) = .;                         \
+               INIT_DATA                                               \
+               INIT_SETUP(initsetup_align)                             \
+               INIT_CALLS                                              \
+               CON_INITCALL                                            \
+               SECURITY_INITCALL                                       \
+               INIT_RAM_FS                                             \
++              VMLINUX_SYMBOL(_einitdata) = .;                         \
+       }
+ #define BSS_SECTION(sbss_align, bss_align, stop_align)                        \
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index 8637cdf..1907623 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -38,7 +38,7 @@ struct crypto_type {
+       unsigned int maskclear;
+       unsigned int maskset;
+       unsigned int tfmsize;
+-};
++} __do_const;
+ struct crypto_instance {
+       struct crypto_alg alg;
+diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h
+index 32b60eb..1a592df 100644
+--- a/include/crypto/cast6.h
++++ b/include/crypto/cast6.h
+@@ -18,7 +18,7 @@ int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
+                  unsigned int keylen, u32 *flags);
+ int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+-void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+-void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
++void __cast6_encrypt(void *ctx, u8 *dst, const u8 *src);
++void __cast6_decrypt(void *ctx, u8 *dst, const u8 *src);
+ #endif
+diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h
+index b7e0941..1a1f67f 100644
+--- a/include/crypto/serpent.h
++++ b/include/crypto/serpent.h
+@@ -21,7 +21,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
+                    unsigned int keylen);
+ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
++void __serpent_encrypt(void *ctx, u8 *dst, const u8 *src);
++void __serpent_decrypt(void *ctx, u8 *dst, const u8 *src);
+ #endif
+diff --git a/include/crypto/xts.h b/include/crypto/xts.h
+index ede6b97..1f5b11f 100644
+--- a/include/crypto/xts.h
++++ b/include/crypto/xts.h
+@@ -21,7 +21,7 @@ struct xts_crypt_req {
+       void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
+ };
+-#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
++#define XTS_TWEAK_CAST(x) (x)
+ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+             struct scatterlist *src, unsigned int nbytes,
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 988903a..88e6883 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -60,6 +60,7 @@
+ #include <asm/mman.h>
+ #include <asm/pgalloc.h>
++#include <asm/local.h>
+ #include <asm/uaccess.h>
+ #include <uapi/drm/drm.h>
+@@ -134,7 +135,7 @@ struct dma_buf_attachment;
+ #define DRM_UT_ATOMIC         0x10
+ #define DRM_UT_VBL            0x20
+-extern __printf(2, 3)
++extern __printf(2, 3) __nocapture(1)
+ void drm_ut_debug_printk(const char *function_name,
+                        const char *format, ...);
+ extern __printf(1, 2)
+@@ -247,10 +248,12 @@ void drm_err(const char *format, ...);
+  * \param cmd command.
+  * \param arg argument.
+  */
+-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
++typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
++typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
+                              unsigned long arg);
+ #define DRM_IOCTL_NR(n)                _IOC_NR(n)
+@@ -266,9 +269,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ struct drm_ioctl_desc {
+       unsigned int cmd;
+       int flags;
+-      drm_ioctl_t *func;
++      drm_ioctl_t func;
+       const char *name;
+-};
++} __do_const;
+ /**
+  * Creates a driver or general drm_ioctl_desc array entry for the given
+@@ -639,7 +642,8 @@ struct drm_driver {
+       /* List of devices hanging off this driver with stealth attach. */
+       struct list_head legacy_dev_list;
+-};
++} __do_const;
++typedef struct drm_driver __no_const drm_driver_no_const;
+ enum drm_minor_type {
+       DRM_MINOR_LEGACY,
+@@ -657,7 +661,8 @@ struct drm_info_list {
+       int (*show)(struct seq_file*, void*); /** show callback */
+       u32 driver_features; /**< Required driver features for this entry */
+       void *data;
+-};
++} __do_const;
++typedef struct drm_info_list __no_const drm_info_list_no_const;
+ /**
+  * debugfs node structure. This structure represents a debugfs file.
+@@ -718,7 +723,7 @@ struct drm_device {
+       /** \name Usage Counters */
+       /*@{ */
+-      int open_count;                 /**< Outstanding files open, protected by drm_global_mutex. */
++      local_t open_count;             /**< Outstanding files open, protected by drm_global_mutex. */
+       spinlock_t buf_lock;            /**< For drm_device::buf_use and a few other things. */
+       int buf_use;                    /**< Buffers in use -- cannot alloc */
+       atomic_t buf_alloc;             /**< Buffer allocation in progress */
+diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
+index fc65118..7d80068 100644
+--- a/include/drm/drm_mm.h
++++ b/include/drm/drm_mm.h
+@@ -291,7 +291,7 @@ void drm_mm_remove_node(struct drm_mm_node *node);
+ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+ void drm_mm_init(struct drm_mm *mm,
+                u64 start,
+-               u64 size);
++               u64 size) __intentional_overflow(3);
+ void drm_mm_takedown(struct drm_mm *mm);
+ bool drm_mm_clean(struct drm_mm *mm);
+diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
+index b55f218..0fe15f5 100644
+--- a/include/drm/drm_modeset_helper_vtables.h
++++ b/include/drm/drm_modeset_helper_vtables.h
+@@ -638,7 +638,7 @@ struct drm_encoder_helper_funcs {
+       int (*atomic_check)(struct drm_encoder *encoder,
+                           struct drm_crtc_state *crtc_state,
+                           struct drm_connector_state *conn_state);
+-};
++} __no_const;
+ /**
+  * drm_encoder_helper_add - sets the helper vtable for an encoder
+@@ -778,6 +778,7 @@ struct drm_connector_helper_funcs {
+       struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
+                                                  struct drm_connector_state *connector_state);
+ };
++typedef struct drm_connector_helper_funcs __no_const drm_connector_helper_funcs_no_const;
+ /**
+  * drm_connector_helper_add - sets the helper vtable for a connector
+diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+index 33466bf..3c53007 100644
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -37,7 +37,7 @@
+  */
+ #define INTEL_VGA_DEVICE(id, info) {          \
+       0x8086, id,                             \
+-      ~0, ~0,                                 \
++      PCI_ANY_ID, PCI_ANY_ID,                 \
+       0x030000, 0xff0000,                     \
+       (unsigned long) info }
+diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
+index f49edec..e47b019 100644
+--- a/include/drm/intel-gtt.h
++++ b/include/drm/intel-gtt.h
+@@ -3,8 +3,8 @@
+ #ifndef _DRM_INTEL_GTT_H
+ #define       _DRM_INTEL_GTT_H
+-void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+-                 phys_addr_t *mappable_base, u64 *mappable_end);
++void intel_gtt_get(u64 *gtt_total, u64 *stolen_size,
++                 u64 *mappable_base, u64 *mappable_end);
+ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+                    struct agp_bridge_data *bridge);
+diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
+index 72dcbe8..8db58d7 100644
+--- a/include/drm/ttm/ttm_memory.h
++++ b/include/drm/ttm/ttm_memory.h
+@@ -48,7 +48,7 @@
+ struct ttm_mem_shrink {
+       int (*do_shrink) (struct ttm_mem_shrink *);
+-};
++} __no_const;
+ /**
+  * struct ttm_mem_global - Global memory accounting structure.
+diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
+index 49a8284..9643967 100644
+--- a/include/drm/ttm/ttm_page_alloc.h
++++ b/include/drm/ttm/ttm_page_alloc.h
+@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
+  */
+ extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
++struct device;
+ extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+ extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
+index 2480469..afcbfd4 100644
+--- a/include/keys/asymmetric-subtype.h
++++ b/include/keys/asymmetric-subtype.h
+@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
+       /* Verify the signature on a key of this subtype (optional) */
+       int (*verify_signature)(const struct key *key,
+                               const struct public_key_signature *sig);
+-};
++} __do_const;
+ /**
+  * asymmetric_key_subtype - Get the subtype from an asymmetric key
+diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h
+index 1d45413..377bc27 100644
+--- a/include/keys/encrypted-type.h
++++ b/include/keys/encrypted-type.h
+@@ -15,7 +15,7 @@
+ #ifndef _KEYS_ENCRYPTED_TYPE_H
+ #define _KEYS_ENCRYPTED_TYPE_H
+-#include <linux/key.h>
++#include <linux/key-type.h>
+ #include <linux/rcupdate.h>
+ struct encrypted_key_payload {
+diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
+index 5de0673..5e8f2c5 100644
+--- a/include/keys/rxrpc-type.h
++++ b/include/keys/rxrpc-type.h
+@@ -12,7 +12,7 @@
+ #ifndef _KEYS_RXRPC_TYPE_H
+ #define _KEYS_RXRPC_TYPE_H
+-#include <linux/key.h>
++#include <linux/key-type.h>
+ /*
+  * key type for AF_RXRPC keys
+diff --git a/include/keys/user-type.h b/include/keys/user-type.h
+index c56fef4..c9ebdc7 100644
+--- a/include/keys/user-type.h
++++ b/include/keys/user-type.h
+@@ -12,7 +12,7 @@
+ #ifndef _KEYS_USER_TYPE_H
+ #define _KEYS_USER_TYPE_H
+-#include <linux/key.h>
++#include <linux/key-type.h>
+ #include <linux/rcupdate.h>
+ #ifdef CONFIG_KEYS
+diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
+index c1da539..1dcec55 100644
+--- a/include/linux/atmdev.h
++++ b/include/linux/atmdev.h
+@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
+ #endif
+ struct k_atm_aal_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+       __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ };
+@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
+       int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
+       int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
+       struct module *owner;
+-};
++} __do_const ;
+ struct atmphy_ops {
+       int (*start)(struct atm_dev *dev);
+diff --git a/include/linux/atomic.h b/include/linux/atomic.h
+index e71835b..957f2d6 100644
+--- a/include/linux/atomic.h
++++ b/include/linux/atomic.h
+@@ -72,6 +72,7 @@
+ #define  atomic_add_return_relaxed    atomic_add_return
+ #define  atomic_add_return_acquire    atomic_add_return
+ #define  atomic_add_return_release    atomic_add_return
++#define atomic_add_return_unchecked_relaxed   atomic_add_return_unchecked
+ #else /* atomic_add_return_relaxed */
+@@ -89,6 +90,11 @@
+ #define  atomic_add_return(...)                                               \
+       __atomic_op_fence(atomic_add_return, __VA_ARGS__)
+ #endif
++
++#ifndef atomic_add_return_unchecked
++#define  atomic_add_return_unchecked(...)                             \
++      __atomic_op_fence(atomic_add_return_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic_add_return_relaxed */
+ /* atomic_inc_return_relaxed */
+@@ -113,6 +119,11 @@
+ #define  atomic_inc_return(...)                                               \
+       __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
+ #endif
++
++#ifndef atomic_inc_return_unchecked
++#define  atomic_inc_return_unchecked(...)                             \
++      __atomic_op_fence(atomic_inc_return_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic_inc_return_relaxed */
+ /* atomic_sub_return_relaxed */
+@@ -490,6 +501,10 @@
+ #ifndef xchg
+ #define  xchg(...)                    __atomic_op_fence(xchg, __VA_ARGS__)
+ #endif
++
++#ifndef xchg_unchecked
++#define  xchg_unchecked(...)          __atomic_op_fence(xchg_unchecked, __VA_ARGS__)
++#endif
+ #endif /* xchg_relaxed */
+ /**
+@@ -501,7 +516,7 @@
+  * Atomically adds @a to @v, so long as @v was not already @u.
+  * Returns non-zero if @v was not @u, and zero otherwise.
+  */
+-static inline int atomic_add_unless(atomic_t *v, int a, int u)
++static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
+ {
+       return __atomic_add_unless(v, a, u) != u;
+ }
+@@ -618,7 +633,7 @@ static inline int atomic_dec_if_positive(atomic_t *v)
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+-              old = atomic_cmpxchg((v), c, dec);
++              old = atomic_cmpxchg(v, c, dec);
+               if (likely(old == c))
+                       break;
+               c = old;
+@@ -661,6 +676,11 @@ static inline int atomic_dec_if_positive(atomic_t *v)
+ #define  atomic64_add_return(...)                                     \
+       __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+ #endif
++
++#ifndef atomic64_add_return_unchecked
++#define  atomic64_add_return_unchecked(...)                           \
++      __atomic_op_fence(atomic64_add_return_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic64_add_return_relaxed */
+ /* atomic64_inc_return_relaxed */
+@@ -685,6 +705,11 @@ static inline int atomic_dec_if_positive(atomic_t *v)
+ #define  atomic64_inc_return(...)                                     \
+       __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+ #endif
++
++#ifndef atomic64_inc_return_unchecked
++#define  atomic64_inc_return_unchecked(...)                           \
++      __atomic_op_fence(atomic64_inc_return_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic64_inc_return_relaxed */
+@@ -970,6 +995,11 @@ static inline int atomic_dec_if_positive(atomic_t *v)
+ #define  atomic64_xchg(...)                                           \
+       __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+ #endif
++
++#ifndef atomic64_xchg_unchecked
++#define  atomic64_xchg_unchecked(...)                                 \
++      __atomic_op_fence(atomic64_xchg_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic64_xchg_relaxed */
+ /* atomic64_cmpxchg_relaxed */
+@@ -994,6 +1024,11 @@ static inline int atomic_dec_if_positive(atomic_t *v)
+ #define  atomic64_cmpxchg(...)                                                \
+       __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+ #endif
++
++#ifndef atomic64_cmpxchg_unchecked
++#define  atomic64_cmpxchg_unchecked(...)                              \
++      __atomic_op_fence(atomic64_cmpxchg_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic64_cmpxchg_relaxed */
+ #ifndef atomic64_andnot
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index 9d4443f..b0b3fef 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -135,7 +135,7 @@ extern void                    audit_log_n_hex(struct audit_buffer *ab,
+                                         size_t len);
+ extern void               audit_log_n_string(struct audit_buffer *ab,
+                                              const char *buf,
+-                                             size_t n);
++                                             size_t n) __nocapture(2);
+ extern void               audit_log_n_untrustedstring(struct audit_buffer *ab,
+                                                       const char *string,
+                                                       size_t n);
+@@ -333,7 +333,7 @@ static inline void audit_ptrace(struct task_struct *t)
+ extern unsigned int audit_serial(void);
+ extern int auditsc_get_stamp(struct audit_context *ctx,
+                             struct timespec *t, unsigned int *serial);
+-extern int audit_set_loginuid(kuid_t loginuid);
++extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
+ static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
+ {
+@@ -552,7 +552,8 @@ static inline bool audit_loginuid_set(struct task_struct *tsk)
+       return uid_valid(audit_get_loginuid(tsk));
+ }
+-static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
++static inline __nocapture(2)
++void audit_log_string(struct audit_buffer *ab, const char *buf)
+ {
+       audit_log_n_string(ab, buf, strlen(buf));
+ }
+diff --git a/include/linux/average.h b/include/linux/average.h
+index d04aa58..3de0da8 100644
+--- a/include/linux/average.h
++++ b/include/linux/average.h
+@@ -36,7 +36,7 @@
+               BUILD_BUG_ON_NOT_POWER_OF_2(_factor);                   \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_weight);                   \
+                                                                       \
+-              ACCESS_ONCE(e->internal) = internal ?                   \
++              ACCESS_ONCE_RW(e->internal) = internal ?                \
+                       (((internal << weight) - internal) +            \
+                               (val << factor)) >> weight :            \
+                       (val << factor);                                \
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index 1303b57..c8196d8 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -44,7 +44,7 @@ struct linux_binprm {
+       unsigned interp_flags;
+       unsigned interp_data;
+       unsigned long loader, exec;
+-};
++} __randomize_layout;
+ #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
+ #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
+@@ -78,8 +78,10 @@ struct linux_binfmt {
+       int (*load_binary)(struct linux_binprm *);
+       int (*load_shlib)(struct file *);
+       int (*core_dump)(struct coredump_params *cprm);
++      void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
++      void (*handle_mmap)(struct file *);
+       unsigned long min_coredump;     /* minimal dump size */
+-};
++} __do_const __randomize_layout;
+ extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 23ddf4b..9115ce0 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -354,7 +354,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip,
+ #endif /* CONFIG_BLK_DEV_INTEGRITY */
+ extern void bio_trim(struct bio *bio, int offset, int size);
+-extern struct bio *bio_split(struct bio *bio, int sectors,
++extern struct bio *bio_split(struct bio *bio, unsigned int sectors,
+                            gfp_t gfp, struct bio_set *bs);
+ /**
+@@ -367,7 +367,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
+  * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+  * than @sectors, returns the original bio unchanged.
+  */
+-static inline struct bio *bio_next_split(struct bio *bio, int sectors,
++static inline struct bio *bio_next_split(struct bio *bio, unsigned int sectors,
+                                        gfp_t gfp, struct bio_set *bs)
+ {
+       if (sectors >= bio_sectors(bio))
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
+index 598bc99..bb8f339f 100644
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -308,7 +308,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+       return find_first_zero_bit(src, nbits) == nbits;
+ }
+-static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
++static __always_inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
+ {
+       if (small_const_nbits(nbits))
+               return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index 299e76b..ef972c1 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -75,7 +75,7 @@ static inline int get_count_order(unsigned int count)
+       return order;
+ }
+-static __always_inline unsigned long hweight_long(unsigned long w)
++static __always_inline unsigned long __intentional_overflow(-1) hweight_long(unsigned long w)
+ {
+       return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+ }
+@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
+  * @word: value to rotate
+  * @shift: bits to roll
+  */
+-static inline __u32 rol32(__u32 word, unsigned int shift)
++static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
+ {
+       return (word << shift) | (word >> ((-shift) & 31));
+ }
+@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
+  * @word: value to rotate
+  * @shift: bits to roll
+  */
+-static inline __u32 ror32(__u32 word, unsigned int shift)
++static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
+ {
+       return (word >> shift) | (word << (32 - shift));
+ }
+@@ -184,7 +184,7 @@ static inline __s64 sign_extend64(__u64 value, int index)
+       return (__s64)(value << shift) >> shift;
+ }
+-static inline unsigned fls_long(unsigned long l)
++static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
+ {
+       if (sizeof(l) == 4)
+               return fls(l);
+diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
+index 10648e3..a230bec 100644
+--- a/include/linux/blk-cgroup.h
++++ b/include/linux/blk-cgroup.h
+@@ -63,12 +63,12 @@ struct blkcg {
+  */
+ struct blkg_stat {
+       struct percpu_counter           cpu_cnt;
+-      atomic64_t                      aux_cnt;
++      atomic64_unchecked_t            aux_cnt;
+ };
+ struct blkg_rwstat {
+       struct percpu_counter           cpu_cnt[BLKG_RWSTAT_NR];
+-      atomic64_t                      aux_cnt[BLKG_RWSTAT_NR];
++      atomic64_unchecked_t            aux_cnt[BLKG_RWSTAT_NR];
+ };
+ /*
+@@ -508,7 +508,7 @@ static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
+       if (ret)
+               return ret;
+-      atomic64_set(&stat->aux_cnt, 0);
++      atomic64_set_unchecked(&stat->aux_cnt, 0);
+       return 0;
+ }
+@@ -546,7 +546,7 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+ static inline void blkg_stat_reset(struct blkg_stat *stat)
+ {
+       percpu_counter_set(&stat->cpu_cnt, 0);
+-      atomic64_set(&stat->aux_cnt, 0);
++      atomic64_set_unchecked(&stat->aux_cnt, 0);
+ }
+ /**
+@@ -559,7 +559,7 @@ static inline void blkg_stat_reset(struct blkg_stat *stat)
+ static inline void blkg_stat_add_aux(struct blkg_stat *to,
+                                    struct blkg_stat *from)
+ {
+-      atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
++      atomic64_add_unchecked(blkg_stat_read(from) + atomic64_read_unchecked(&from->aux_cnt),
+                    &to->aux_cnt);
+ }
+@@ -574,7 +574,7 @@ static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
+                               percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+                       return ret;
+               }
+-              atomic64_set(&rwstat->aux_cnt[i], 0);
++              atomic64_set_unchecked(&rwstat->aux_cnt[i], 0);
+       }
+       return 0;
+ }
+@@ -629,7 +629,7 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+       int i;
+       for (i = 0; i < BLKG_RWSTAT_NR; i++)
+-              atomic64_set(&result.aux_cnt[i],
++              atomic64_set_unchecked(&result.aux_cnt[i],
+                            percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
+       return result;
+ }
+@@ -646,8 +646,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+ {
+       struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+-      return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+-              atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
++      return atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
++              atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
+ }
+ /**
+@@ -660,7 +660,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+       for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+               percpu_counter_set(&rwstat->cpu_cnt[i], 0);
+-              atomic64_set(&rwstat->aux_cnt[i], 0);
++              atomic64_set_unchecked(&rwstat->aux_cnt[i], 0);
+       }
+ }
+@@ -678,8 +678,8 @@ static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
+       int i;
+       for (i = 0; i < BLKG_RWSTAT_NR; i++)
+-              atomic64_add(atomic64_read(&v.aux_cnt[i]) +
+-                           atomic64_read(&from->aux_cnt[i]),
++              atomic64_add_unchecked(atomic64_read_unchecked(&v.aux_cnt[i]) +
++                           atomic64_read_unchecked(&from->aux_cnt[i]),
+                            &to->aux_cnt[i]);
+ }
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index e79055c..262f1ba 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1690,7 +1690,7 @@ struct block_device_operations {
+       void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+       struct module *owner;
+       const struct pr_ops *pr_ops;
+-};
++} __do_const;
+ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
+                                unsigned long);
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index cceb72f..c9f287a 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -25,7 +25,7 @@ struct blk_trace {
+       struct dentry *dropped_file;
+       struct dentry *msg_file;
+       struct list_head running_list;
+-      atomic_t dropped;
++      atomic_unchecked_t dropped;
+ };
+ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
+diff --git a/include/linux/cache.h b/include/linux/cache.h
+index 1be04f8..9c2d3e2 100644
+--- a/include/linux/cache.h
++++ b/include/linux/cache.h
+@@ -26,6 +26,15 @@
+  * after mark_rodata_ro() has been called). These are effectively read-only,
+  * but may get written to during init, so can't live in .rodata (via "const").
+  */
++#ifdef CONFIG_PAX_KERNEXEC
++# ifdef __ro_after_init
++#  error KERNEXEC requires __read_only
++# endif
++# define __read_only __attribute__((__section__(".data..read_only")))
++#else
++# define __read_only __read_mostly
++#endif
++
+ #ifndef __ro_after_init
+ #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+ #endif
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index dbc21c7..5b432a7 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -231,6 +231,10 @@ static inline bool capable(int cap)
+ {
+       return true;
+ }
++static inline bool capable_nolog(int cap)
++{
++      return true;
++}
+ static inline bool ns_capable(struct user_namespace *ns, int cap)
+ {
+       return true;
+@@ -241,9 +245,13 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
+ }
+ #endif /* CONFIG_MULTIUSER */
+ extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
++extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
+ extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
++extern bool capable_nolog(int cap);
+ /* audit system wants to get cap info from files as well */
+ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
++extern int is_privileged_binary(const struct dentry *dentry);
++
+ #endif /* !_LINUX_CAPABILITY_H */
+diff --git a/include/linux/cdev.h b/include/linux/cdev.h
+index f876361..7c05fd9dd 100644
+--- a/include/linux/cdev.h
++++ b/include/linux/cdev.h
+@@ -16,7 +16,7 @@ struct cdev {
+       struct list_head list;
+       dev_t dev;
+       unsigned int count;
+-};
++} __randomize_layout;
+ void cdev_init(struct cdev *, const struct file_operations *);
+diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
+index 8609d57..86e4d79 100644
+--- a/include/linux/cdrom.h
++++ b/include/linux/cdrom.h
+@@ -87,7 +87,6 @@ struct cdrom_device_ops {
+ /* driver specifications */
+       const int capability;   /* capability flags */
+-      int n_minors;           /* number of active minor devices */
+       /* handle uniform packets for scsi type devices (scsi,atapi) */
+       int (*generic_packet) (struct cdrom_device_info *,
+                              struct packet_command *);
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 5b17de6..d75785b 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -427,7 +427,7 @@ struct cftype {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lock_class_key   lockdep_key;
+ #endif
+-};
++} __do_const;
+ /*
+  * Control Group subsystem type.
+diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
+index fccf7f4..1d5925e 100644
+--- a/include/linux/cleancache.h
++++ b/include/linux/cleancache.h
+@@ -35,7 +35,7 @@ struct cleancache_ops {
+       void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
+       void (*invalidate_inode)(int, struct cleancache_filekey);
+       void (*invalidate_fs)(int);
+-};
++} __no_const;
+ extern int cleancache_register_ops(const struct cleancache_ops *ops);
+ extern void __cleancache_init_fs(struct super_block *);
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index a39c0c5..1518828 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -218,6 +218,7 @@ struct clk_ops {
+       void            (*init)(struct clk_hw *hw);
+       int             (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+ };
++typedef struct clk_ops __no_const clk_ops_no_const;
+ /**
+  * struct clk_init_data - holds init data that's common to all clocks and is
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index f964ef7..0679632 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -47,14 +47,15 @@
+       COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+ #define COMPAT_SYSCALL_DEFINEx(x, name, ...)                          \
+-      asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
+-              __attribute__((alias(__stringify(compat_SyS##name))));  \
+       static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+-      asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
+-      asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
++      static inline asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
+       {                                                               \
+               return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+       }                                                               \
++      asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
++      {                                                               \
++              return compat_SyS##name(__MAP(x,__SC_ARGS,__VA_ARGS__));\
++      }                                                               \
+       static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+ #ifndef compat_user_stack_pointer
+@@ -318,7 +319,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+                          compat_size_t __user *len_ptr);
+ asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
+-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
++asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
+ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
+ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
+               compat_ssize_t msgsz, int msgflg);
+@@ -327,7 +328,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+ long compat_sys_msgctl(int first, int second, void __user *uptr);
+ long compat_sys_shmctl(int first, int second, void __user *uptr);
+ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+-              unsigned nsems, const struct compat_timespec __user *timeout);
++              compat_long_t nsems, const struct compat_timespec __user *timeout);
+ asmlinkage long compat_sys_keyctl(u32 option,
+                             u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+@@ -447,7 +448,7 @@ extern int compat_ptrace_request(struct task_struct *child,
+ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+                              compat_ulong_t addr, compat_ulong_t data);
+ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+-                                compat_long_t addr, compat_long_t data);
++                                compat_ulong_t addr, compat_ulong_t data);
+ asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+ /*
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 573c5a1..b902c3f 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -116,9 +116,9 @@
+  */
+ #define __pure                        __attribute__((pure))
+ #define __aligned(x)          __attribute__((aligned(x)))
+-#define __printf(a, b)                __attribute__((format(printf, a, b)))
+-#define __scanf(a, b)         __attribute__((format(scanf, a, b)))
+-#define __attribute_const__   __attribute__((__const__))
++#define __printf(a, b)                __attribute__((format(printf, a, b))) __nocapture(a, b)
++#define __scanf(a, b)         __attribute__((format(scanf, a, b))) __nocapture(a, b)
++#define __attribute_const__   __attribute__((const))
+ #define __maybe_unused                __attribute__((unused))
+ #define __always_unused               __attribute__((unused))
+@@ -185,9 +185,56 @@
+ # define __compiletime_warning(message) __attribute__((warning(message)))
+ # define __compiletime_error(message) __attribute__((error(message)))
+ #endif /* __CHECKER__ */
++
++#define __alloc_size(...)     __attribute((alloc_size(__VA_ARGS__)))
++#define __bos(ptr, arg)               __builtin_object_size((ptr), (arg))
++#define __bos0(ptr)           __bos((ptr), 0)
++#define __bos1(ptr)           __bos((ptr), 1)
+ #endif /* GCC_VERSION >= 40300 */
+ #if GCC_VERSION >= 40500
++
++#ifdef RANDSTRUCT_PLUGIN
++#define __randomize_layout __attribute__((randomize_layout))
++#define __no_randomize_layout __attribute__((no_randomize_layout))
++#endif
++
++#ifdef CONSTIFY_PLUGIN
++#define __no_const __attribute__((no_const))
++#define __do_const __attribute__((do_const))
++#define const_cast(x) (*(typeof((typeof(x))0) *)&(x))
++#endif
++
++#ifdef SIZE_OVERFLOW_PLUGIN
++#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
++#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
++#endif
++
++#ifndef __CHECKER__
++#ifdef LATENT_ENTROPY_PLUGIN
++#define __latent_entropy __attribute__((latent_entropy))
++#endif
++#endif
++
++#ifdef INITIFY_PLUGIN
++#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__)))
++#endif
++
++/*
++ * The initify gcc-plugin attempts to identify const arguments that are only
++ * used during init (see __init and __exit), so they can be moved to the
++ * .init.rodata/.exit.rodata section. If an argument is passed to a non-init
++ * function, it must normally be assumed that such an argument has been
++ * captured by that function and may be used in the future when .init/.exit has
++ * been unmapped from memory. In order to identify functions that are confirmed
++ * to not capture their arguments, the __nocapture() attribute is used so that
++ * initify can better identify candidate variables.
++ */
++#ifdef INITIFY_PLUGIN
++#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__)))
++#define __unverified_nocapture(...) __attribute__((unverified_nocapture(__VA_ARGS__)))
++#endif
++
+ /*
+  * Mark a position in code as unreachable.  This can be used to
+  * suppress control flow warnings after asm blocks that transfer
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 6685698..688714d 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -5,11 +5,14 @@
+ #ifdef __CHECKER__
+ # define __user               __attribute__((noderef, address_space(1)))
++# define __force_user __force __user
+ # define __kernel     __attribute__((address_space(0)))
++# define __force_kernel       __force __kernel
+ # define __safe               __attribute__((safe))
+ # define __force      __attribute__((force))
+ # define __nocast     __attribute__((nocast))
+ # define __iomem      __attribute__((noderef, address_space(2)))
++# define __force_iomem        __force __iomem
+ # define __must_hold(x)       __attribute__((context(x,1,1)))
+ # define __acquires(x)        __attribute__((context(x,0,1)))
+ # define __releases(x)        __attribute__((context(x,1,0)))
+@@ -17,33 +20,76 @@
+ # define __release(x) __context__(x,-1)
+ # define __cond_lock(x,c)     ((c) ? ({ __acquire(x); 1; }) : 0)
+ # define __percpu     __attribute__((noderef, address_space(3)))
++# define __force_percpu       __force __percpu
+ #ifdef CONFIG_SPARSE_RCU_POINTER
+ # define __rcu                __attribute__((noderef, address_space(4)))
++# define __force_rcu  __force __rcu
+ #else /* CONFIG_SPARSE_RCU_POINTER */
+ # define __rcu
++# define __force_rcu
+ #endif /* CONFIG_SPARSE_RCU_POINTER */
+ # define __private    __attribute__((noderef))
+ extern void __chk_user_ptr(const volatile void __user *);
+ extern void __chk_io_ptr(const volatile void __iomem *);
+ # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+ #else /* __CHECKER__ */
+-# define __user
+-# define __kernel
++# ifdef CHECKER_PLUGIN
++#  ifdef CHECKER_PLUGIN_USER
++//#  define __user
++//#  define __force_user
++//#  define __kernel
++//#  define __force_kernel
++#  else
++#  define __user
++#  define __force_user
++#  define __kernel
++#  define __force_kernel
++#  endif
++#  ifdef CHECKER_PLUGIN_CONTEXT
++#  define __must_hold(x)      __attribute__((context(#x,1,1)))
++#  define __acquires(x)       __attribute__((context(#x,0,1)))
++#  define __releases(x)       __attribute__((context(#x,1,0)))
++#  define __acquire(x)        __context__(#x,1)
++#  define __release(x)        __context__(#x,-1)
++#  define __cond_lock(x,c)    ((c) ? ({ __acquire(x); 1; }) : 0)
++#  define __cond_unlock(x,c)  ((c) ? ({ __release(x); 1; }) : 0)
++#  else
++#  define __must_hold(x)
++#  define __acquires(x)
++#  define __releases(x)
++#  define __acquire(x) (void)0
++#  define __release(x) (void)0
++#  define __cond_lock(x,c) (c)
++#  define __cond_unlock(x,c) (c)
++#  endif
++# else
++#  ifdef STRUCTLEAK_PLUGIN
++#   define __user __attribute__((user))
++#  else
++#   define __user
++#  endif
++#  define __force_user
++#  define __kernel
++#  define __force_kernel
++#  define __must_hold(x)
++#  define __acquires(x)
++#  define __releases(x)
++#  define __acquire(x) (void)0
++#  define __release(x) (void)0
++#  define __cond_lock(x,c) (c)
++# endif
+ # define __safe
+ # define __force
+ # define __nocast
+ # define __iomem
++# define __force_iomem
+ # define __chk_user_ptr(x) (void)0
+ # define __chk_io_ptr(x) (void)0
+ # define __builtin_warning(x, y...) (1)
+-# define __must_hold(x)
+-# define __acquires(x)
+-# define __releases(x)
+-# define __acquire(x) (void)0
+-# define __release(x) (void)0
+-# define __cond_lock(x,c) (c)
+ # define __percpu
++# define __force_percpu
+ # define __rcu
++# define __force_rcu
+ # define __private
+ # define ACCESS_PRIVATE(p, member) ((p)->member)
+ #endif /* __CHECKER__ */
+@@ -200,29 +246,20 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ #include <uapi/linux/types.h>
+-#define __READ_ONCE_SIZE                                              \
+-({                                                                    \
+-      switch (size) {                                                 \
+-      case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
+-      case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
+-      case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
+-      case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
+-      default:                                                        \
+-              barrier();                                              \
+-              __builtin_memcpy((void *)res, (const void *)p, size);   \
+-              barrier();                                              \
+-      }                                                               \
+-})
+-
+-static __always_inline
+-void __read_once_size(const volatile void *p, void *res, int size)
+-{
+-      __READ_ONCE_SIZE;
+-}
+-
+ #ifdef CONFIG_KASAN
+ /*
+- * This function is not 'inline' because __no_sanitize_address confilcts
++ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
++ * to hide memory access from KASAN.
++ */
++#define READ_ONCE_NOCHECK(x)                                  \
++({                                                            \
++      union { typeof(x) __val; char __c[sizeof(x)]; } __u;    \
++      __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
++      __u.__val;                                              \
++})
++
++/*
++ * This function is not 'inline' because __no_sanitize_address conflicts
+  * with inlining. Attempt to inline it may cause a build failure.
+  *    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+@@ -230,29 +267,20 @@ void __read_once_size(const volatile void *p, void *res, int size)
+ static __no_sanitize_address __maybe_unused
+ void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+ {
+-      __READ_ONCE_SIZE;
+-}
+-#else
+-static __always_inline
+-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+-{
+-      __READ_ONCE_SIZE;
+-}
+-#endif
+-
+-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+-{
+       switch (size) {
+-      case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+-      case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+-      case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+-      case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
++      case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
++      case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
++      case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
++      case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
+       default:
+               barrier();
+-              __builtin_memcpy((void *)p, (const void *)res, size);
++              __builtin_memcpy(res, (const void *)p, size);
+               barrier();
+       }
+ }
++#else
++#define READ_ONCE_NOCHECK(x) READ_ONCE(x)
++#endif
+ /*
+  * Prevent the compiler from merging or refetching reads or writes. The
+@@ -277,29 +305,15 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+  * required ordering.
+  */
+-#define __READ_ONCE(x, check)                                         \
+-({                                                                    \
+-      union { typeof(x) __val; char __c[1]; } __u;                    \
+-      if (check)                                                      \
+-              __read_once_size(&(x), __u.__c, sizeof(x));             \
+-      else                                                            \
+-              __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
+-      __u.__val;                                                      \
++#define READ_ONCE(x) ({                                       \
++      typeof(x) __val = *(volatile typeof(x) *)&(x);  \
++      __val;                                          \
+ })
+-#define READ_ONCE(x) __READ_ONCE(x, 1)
+-/*
+- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
+- * to hide memory access from KASAN.
+- */
+-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+-
+-#define WRITE_ONCE(x, val) \
+-({                                                    \
+-      union { typeof(x) __val; char __c[1]; } __u =   \
+-              { .__val = (__force typeof(x)) (val) }; \
+-      __write_once_size(&(x), __u.__c, sizeof(x));    \
+-      __u.__val;                                      \
++#define WRITE_ONCE(x, val) ({                         \
++      typeof(x) __val = (val);                        \
++      (x) = *(volatile typeof(x) *)&__val;            \
++      __val;                                          \
+ })
+ #endif /* __KERNEL__ */
+@@ -406,6 +420,50 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+ # define __attribute_const__  /* unimplemented */
+ #endif
++#ifndef __randomize_layout
++# define __randomize_layout
++#endif
++
++#ifndef __no_randomize_layout
++# define __no_randomize_layout
++#endif
++
++#ifndef __no_const
++# define __no_const
++#endif
++
++#ifndef __do_const
++# define __do_const
++#endif
++
++#ifndef __size_overflow
++# define __size_overflow(...)
++#endif
++
++#ifndef __intentional_overflow
++# define __intentional_overflow(...)
++#endif
++
++#ifndef __latent_entropy
++# define __latent_entropy
++#endif
++
++#ifndef __nocapture
++# define __nocapture(...)
++#endif
++
++#ifndef const_cast
++# define const_cast(x)        (x)
++#endif
++
++#ifndef __nocapture
++# define __nocapture(...)
++#endif
++
++#ifndef __unverified_nocapture
++# define __unverified_nocapture(...)
++#endif
++
+ /*
+  * Tell gcc if a function is cold. The compiler will assume any path
+  * directly leading to the call is unlikely.
+@@ -415,6 +473,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+ #define __cold
+ #endif
++#ifndef __alloc_size
++#define __alloc_size(...)
++#endif
++
++#ifndef __bos
++#define __bos(ptr, arg)
++#endif
++
++#ifndef __bos0
++#define __bos0(ptr)
++#endif
++
++#ifndef __bos1
++#define __bos1(ptr)
++#endif
++
+ /* Simple shorthand for a section definition */
+ #ifndef __section
+ # define __section(S) __attribute__ ((__section__(#S)))
+@@ -437,6 +511,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+ # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+ #endif
++#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
++
+ /* Is this type a native word size -- useful for atomic operations */
+ #ifndef __native_word
+ # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+@@ -516,8 +592,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+  */
+ #define __ACCESS_ONCE(x) ({ \
+        __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
+-      (volatile typeof(x) *)&(x); })
++      (volatile const typeof(x) *)&(x); })
+ #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
+ /**
+  * lockless_dereference() - safely load a pointer for later dereference
+diff --git a/include/linux/configfs.h b/include/linux/configfs.h
+index d9d6a9d..489772c 100644
+--- a/include/linux/configfs.h
++++ b/include/linux/configfs.h
+@@ -136,7 +136,7 @@ struct configfs_attribute {
+       umode_t                 ca_mode;
+       ssize_t (*show)(struct config_item *, char *);
+       ssize_t (*store)(struct config_item *, const char *, size_t);
+-};
++} __do_const;
+ #define CONFIGFS_ATTR(_pfx, _name)                    \
+ static struct configfs_attribute _pfx##attr_##_name = {       \
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 32dc0cbd..6e18583 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -237,10 +237,11 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
+ struct global_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct kobject *kobj,
+-                      struct attribute *attr, char *buf);
+-      ssize_t (*store)(struct kobject *a, struct attribute *b,
++                      struct kobj_attribute *attr, char *buf);
++      ssize_t (*store)(struct kobject *a, struct kobj_attribute *b,
+                        const char *c, size_t count);
+ };
++typedef struct global_attr __no_const global_attr_no_const;
+ #define define_one_global_ro(_name)           \
+ static struct global_attr _name =             \
+@@ -323,7 +324,7 @@ struct cpufreq_driver {
+       /* platform specific boost support code */
+       bool            boost_enabled;
+       int             (*set_boost)(int state);
+-};
++} __do_const;
+ /* flags */
+ #define CPUFREQ_STICKY                (1 << 0)        /* driver isn't removed even if
+diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
+index bb31373..e85eb5f 100644
+--- a/include/linux/cpuidle.h
++++ b/include/linux/cpuidle.h
+@@ -59,7 +59,8 @@ struct cpuidle_state {
+       void (*enter_freeze) (struct cpuidle_device *dev,
+                             struct cpuidle_driver *drv,
+                             int index);
+-};
++} __do_const;
++typedef struct cpuidle_state __no_const cpuidle_state_no_const;
+ /* Idle State Flags */
+ #define CPUIDLE_FLAG_COUPLED  (0x02) /* state applies to multiple cpus */
+@@ -237,7 +238,7 @@ struct cpuidle_governor {
+       void (*reflect)         (struct cpuidle_device *dev, int index);
+       struct module           *owner;
+-};
++} __do_const;
+ #ifdef CONFIG_CPU_IDLE
+ extern int cpuidle_register_governor(struct cpuidle_governor *gov);
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index da7fbf1..c2a221b 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -131,17 +131,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
+ }
+ /* Valid inputs for n are -1 and 0. */
+-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
+ {
+       return n+1;
+ }
+-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
+ {
+       return n+1;
+ }
+-static inline unsigned int cpumask_next_and(int n,
++static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
+                                           const struct cpumask *srcp,
+                                           const struct cpumask *andp)
+ {
+@@ -185,7 +185,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
+  *
+  * Returns >= nr_cpu_ids if no further cpus set.
+  */
+-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
+ {
+       /* -1 is a legal arg here. */
+       if (n != -1)
+@@ -200,7 +200,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+  *
+  * Returns >= nr_cpu_ids if no further cpus unset.
+  */
+-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
+ {
+       /* -1 is a legal arg here. */
+       if (n != -1)
+@@ -208,7 +208,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+       return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ }
+-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
++int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
+ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+ unsigned int cpumask_local_spread(unsigned int i, int node);
+@@ -475,7 +475,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
+  * cpumask_weight - Count of bits in *srcp
+  * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+  */
+-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
+ {
+       return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
+ }
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 257db64..a73cf86 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -35,7 +35,7 @@ struct group_info {
+       int             nblocks;
+       kgid_t          small_block[NGROUPS_SMALL];
+       kgid_t          *blocks[0];
+-};
++} __randomize_layout;
+ /**
+  * get_group_info - Get a reference to a group info structure
+@@ -153,7 +153,7 @@ struct cred {
+       struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+       struct group_info *group_info;  /* supplementary groups for euid/fsgid */
+       struct rcu_head rcu;            /* RCU deletion hook */
+-};
++} __randomize_layout;
+ extern void __put_cred(struct cred *);
+ extern void exit_creds(struct task_struct *);
+@@ -211,6 +211,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+ static inline void validate_process_creds(void)
+ {
+ }
++static inline void validate_task_creds(struct task_struct *task)
++{
++}
+ #endif
+ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+@@ -355,6 +358,7 @@ static inline void put_cred(const struct cred *_cred)
+ #define task_uid(task)                (task_cred_xxx((task), uid))
+ #define task_euid(task)               (task_cred_xxx((task), euid))
++#define task_securebits(task) (task_cred_xxx((task), securebits))
+ #define current_cred_xxx(xxx)                 \
+ ({                                            \
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index 7cee555..65ead50 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -510,7 +510,7 @@ struct cipher_tfm {
+                         const u8 *key, unsigned int keylen);
+       void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+       void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-};
++} __no_const;
+ struct compress_tfm {
+       int (*cot_compress)(struct crypto_tfm *tfm,
+@@ -519,7 +519,7 @@ struct compress_tfm {
+       int (*cot_decompress)(struct crypto_tfm *tfm,
+                             const u8 *src, unsigned int slen,
+                             u8 *dst, unsigned int *dlen);
+-};
++} __no_const;
+ #define crt_ablkcipher        crt_u.ablkcipher
+ #define crt_blkcipher crt_u.blkcipher
+diff --git a/include/linux/ctype.h b/include/linux/ctype.h
+index 653589e..4ef254a 100644
+--- a/include/linux/ctype.h
++++ b/include/linux/ctype.h
+@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
+  * Fast implementation of tolower() for internal usage. Do not use in your
+  * code.
+  */
+-static inline char _tolower(const char c)
++static inline unsigned char _tolower(const unsigned char c)
+ {
+       return c | 0x20;
+ }
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 5ff3e9a..fc6b872 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -102,6 +102,9 @@ struct dentry {
+               struct list_head d_lru;         /* LRU list */
+               wait_queue_head_t *d_wait;      /* in-lookup ones only */
+       };
++#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
++      atomic_t chroot_refcnt;         /* tracks use of directory in chroot */
++#endif
+       struct list_head d_child;       /* child of parent list */
+       struct list_head d_subdirs;     /* our children */
+       /*
+@@ -112,7 +115,7 @@ struct dentry {
+               struct hlist_bl_node d_in_lookup_hash;  /* only for in-lookup ones */
+               struct rcu_head d_rcu;
+       } d_u;
+-};
++} __randomize_layout;
+ /*
+  * dentry->d_lock spinlock nesting subclasses:
+@@ -279,7 +282,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
+ static inline unsigned d_count(const struct dentry *dentry)
+ {
+-      return dentry->d_lockref.count;
++      return __lockref_read(&dentry->d_lockref);
+ }
+ /*
+@@ -308,7 +311,7 @@ extern char *dentry_path(struct dentry *, char *, int);
+ static inline struct dentry *dget_dlock(struct dentry *dentry)
+ {
+       if (dentry)
+-              dentry->d_lockref.count++;
++              __lockref_inc(&dentry->d_lockref);
+       return dentry;
+ }
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index 4d3f0d1..7713e0a 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -139,6 +139,8 @@ struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
+                                    struct dentry *parent, size_t *value);
+ struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
+                                    struct dentry *parent, atomic_t *value);
++struct dentry *debugfs_create_atomic_unchecked_t(const char *name, umode_t mode,
++                                   struct dentry *parent, atomic_unchecked_t *value);
+ struct dentry *debugfs_create_bool(const char *name, umode_t mode,
+                                 struct dentry *parent, bool *value);
+@@ -234,7 +236,7 @@ static inline void debugfs_use_file_finish(int srcu_idx)
+ { }
+ #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+-      static const struct file_operations __fops = { 0 }
++      static const struct file_operations __fops = { }
+ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+                 struct dentry *new_dir, char *new_name)
+@@ -311,6 +313,12 @@ static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t m
+       return ERR_PTR(-ENODEV);
+ }
++static inline struct dentry *debugfs_create_atomic_unchecked_t(const char *name, umode_t mode,
++                                   struct dentry *parent, atomic_unchecked_t *value)
++{
++      return ERR_PTR(-ENODEV);
++}
++
+ static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
+                                                struct dentry *parent,
+                                                bool *value)
+diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
+index 7925bf0..d5143d2 100644
+--- a/include/linux/decompress/mm.h
++++ b/include/linux/decompress/mm.h
+@@ -77,7 +77,7 @@ static void free(void *where)
+  * warnings when not needed (indeed large_malloc / large_free are not
+  * needed by inflate */
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #define large_malloc(a) vmalloc(a)
+diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
+index 2de4e2e..510a09b8 100644
+--- a/include/linux/devfreq.h
++++ b/include/linux/devfreq.h
+@@ -124,7 +124,7 @@ struct devfreq_governor {
+       int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+       int (*event_handler)(struct devfreq *devfreq,
+                               unsigned int event, void *data);
+-};
++} __do_const;
+ /**
+  * struct devfreq - Device devfreq structure
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 38f0281..72e7b70 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -346,7 +346,7 @@ struct subsys_interface {
+       struct list_head node;
+       int (*add_dev)(struct device *dev, struct subsys_interface *sif);
+       void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+-};
++} __do_const;
+ int subsys_interface_register(struct subsys_interface *sif);
+ void subsys_interface_unregister(struct subsys_interface *sif);
+@@ -542,7 +542,7 @@ struct device_type {
+       void (*release)(struct device *dev);
+       const struct dev_pm_ops *pm;
+-};
++} __do_const;
+ /* interface for exporting device attributes */
+ struct device_attribute {
+@@ -552,11 +552,12 @@ struct device_attribute {
+       ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count);
+ };
++typedef struct device_attribute __no_const device_attribute_no_const;
+ struct dev_ext_attribute {
+       struct device_attribute attr;
+       void *var;
+-};
++} __do_const;
+ ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
+                         char *buf);
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index dc69df0..d8db6b8 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -114,7 +114,7 @@ struct dma_map_ops {
+       u64 (*get_required_mask)(struct device *dev);
+ #endif
+       int is_phys;
+-};
++} __do_const;
+ extern struct dma_map_ops dma_noop_ops;
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 0148a30..6f9e494 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1134,6 +1134,7 @@ struct efivar_operations {
+       efi_set_variable_t *set_variable_nonblocking;
+       efi_query_variable_store_t *query_variable_store;
+ };
++typedef struct efivar_operations __no_const efivar_operations_no_const;
+ struct efivars {
+       /*
+diff --git a/include/linux/elf.h b/include/linux/elf.h
+index 20fa8d8..3d0dd18 100644
+--- a/include/linux/elf.h
++++ b/include/linux/elf.h
+@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
+ #define elf_note      elf32_note
+ #define elf_addr_t    Elf32_Off
+ #define Elf_Half      Elf32_Half
++#define elf_dyn               Elf32_Dyn
+ #else
+@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
+ #define elf_note      elf64_note
+ #define elf_addr_t    Elf64_Off
+ #define Elf_Half      Elf64_Half
++#define elf_dyn               Elf64_Dyn
+ #endif
+diff --git a/include/linux/err.h b/include/linux/err.h
+index 1e35588..ce9721b 100644
+--- a/include/linux/err.h
++++ b/include/linux/err.h
+@@ -20,12 +20,12 @@
+ #define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
+-static inline void * __must_check ERR_PTR(long error)
++static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
+ {
+       return (void *) error;
+ }
+-static inline long __must_check PTR_ERR(__force const void *ptr)
++static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
+ {
+       return (long) ptr;
+ }
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 9ded8c6..e11a2457 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -373,4 +373,5 @@ struct ethtool_ops {
+       int     (*set_link_ksettings)(struct net_device *,
+                                     const struct ethtool_link_ksettings *);
+ };
++typedef struct ethtool_ops __no_const ethtool_ops_no_const;
+ #endif /* _LINUX_ETHTOOL_H */
+diff --git a/include/linux/extcon.h b/include/linux/extcon.h
+index 6100441..15b9e72 100644
+--- a/include/linux/extcon.h
++++ b/include/linux/extcon.h
+@@ -123,7 +123,7 @@ struct extcon_dev {
+       /* /sys/class/extcon/.../mutually_exclusive/... */
+       struct attribute_group attr_g_muex;
+       struct attribute **attrs_muex;
+-      struct device_attribute *d_attrs_muex;
++      device_attribute_no_const *d_attrs_muex;
+ };
+ #if IS_ENABLED(CONFIG_EXTCON)
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index a964d07..09bf71f 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -320,7 +320,8 @@ struct fb_ops {
+       /* called at KDB enter and leave time to prepare the console */
+       int (*fb_debug_enter)(struct fb_info *info);
+       int (*fb_debug_leave)(struct fb_info *info);
+-};
++} __do_const;
++typedef struct fb_ops __no_const fb_ops_no_const;
+ #ifdef CONFIG_FB_TILEBLITTING
+ #define FB_TILE_CURSOR_NONE        0
+diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
+index 5295535..9852c7e 100644
+--- a/include/linux/fdtable.h
++++ b/include/linux/fdtable.h
+@@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
+ void put_files_struct(struct files_struct *fs);
+ void reset_files_struct(struct files_struct *);
+ int unshare_files(struct files_struct **);
+-struct files_struct *dup_fd(struct files_struct *, int *);
++struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
+ void do_close_on_exec(struct files_struct *);
+ int iterate_fd(struct files_struct *, unsigned,
+               int (*)(const void *, struct file *, unsigned),
+diff --git a/include/linux/firewire.h b/include/linux/firewire.h
+index d4b7683..9feb066 100644
+--- a/include/linux/firewire.h
++++ b/include/linux/firewire.h
+@@ -451,7 +451,7 @@ struct fw_iso_context {
+ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+               int type, int channel, int speed, size_t header_size,
+-              fw_iso_callback_t callback, void *callback_data);
++              void *callback, void *callback_data);
+ int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
+ int fw_iso_context_queue(struct fw_iso_context *ctx,
+                        struct fw_iso_packet *packet,
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 7c39136..69c438a 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -328,7 +328,7 @@ struct kiocb {
+       void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+       void                    *private;
+       int                     ki_flags;
+-};
++} __randomize_layout;
+ static inline bool is_sync_kiocb(struct kiocb *kiocb)
+ {
+@@ -444,7 +444,7 @@ struct address_space {
+       spinlock_t              private_lock;   /* for use by the address_space */
+       struct list_head        private_list;   /* ditto */
+       void                    *private_data;  /* ditto */
+-} __attribute__((aligned(sizeof(long))));
++} __attribute__((aligned(sizeof(long)))) __randomize_layout;
+       /*
+        * On most architectures that alignment is already the case; but
+        * must be enforced here for CRIS, to let the least significant bit
+@@ -486,7 +486,7 @@ struct block_device {
+       int                     bd_fsfreeze_count;
+       /* Mutex for freeze */
+       struct mutex            bd_fsfreeze_mutex;
+-};
++} __randomize_layout;
+ /*
+  * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
+@@ -700,7 +700,7 @@ struct inode {
+ #endif
+       void                    *i_private; /* fs or device private pointer */
+-};
++} __randomize_layout;
+ static inline int inode_unhashed(struct inode *inode)
+ {
+@@ -910,7 +910,7 @@ struct file {
+       struct list_head        f_tfile_llink;
+ #endif /* #ifdef CONFIG_EPOLL */
+       struct address_space    *f_mapping;
+-} __attribute__((aligned(4)));        /* lest something weird decides that 2 is OK */
++} __attribute__((aligned(4))) __randomize_layout;     /* lest something weird decides that 2 is OK */
+ struct file_handle {
+       __u32 handle_bytes;
+@@ -1045,7 +1045,7 @@ struct file_lock {
+                       int state;              /* state of grant or error if -ve */
+               } afs;
+       } fl_u;
+-};
++} __randomize_layout;
+ struct file_lock_context {
+       spinlock_t              flc_lock;
+@@ -1432,7 +1432,7 @@ struct super_block {
+       spinlock_t              s_inode_wblist_lock;
+       struct list_head        s_inodes_wb;    /* writeback inodes */
+-};
++} __randomize_layout;
+ /* Helper functions so that in most cases filesystems will
+  * not need to deal directly with kuid_t and kgid_t and can
+@@ -1716,7 +1716,8 @@ struct file_operations {
+                       u64);
+       ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
+                       u64);
+-};
++} __do_const __randomize_layout;
++typedef struct file_operations __no_const file_operations_no_const;
+ struct inode_operations {
+       struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
+@@ -2440,12 +2441,12 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ #define CHRDEV_MAJOR_HASH_SIZE        255
+ /* Marks the bottom of the first segment of free char majors */
+ #define CHRDEV_MAJOR_DYN_END 234
+-extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
++extern __nocapture(4) int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
+ extern int register_chrdev_region(dev_t, unsigned, const char *);
+ extern int __register_chrdev(unsigned int major, unsigned int baseminor,
+                            unsigned int count, const char *name,
+                            const struct file_operations *fops);
+-extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
++extern __nocapture(4) void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+                               unsigned int count, const char *name);
+ extern void unregister_chrdev_region(dev_t, unsigned);
+ extern void chrdev_show(struct seq_file *,off_t);
+@@ -3193,4 +3194,14 @@ static inline bool dir_relax_shared(struct inode *inode)
+ extern bool path_noexec(const struct path *path);
+ extern void inode_nohighmem(struct inode *inode);
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
++      umode_t mode = inode->i_mode;
++      return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++#else
++      return false;
++#endif
++}
++
+ #endif /* _LINUX_FS_H */
+diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
+index 0efc3e6..fd23610 100644
+--- a/include/linux/fs_struct.h
++++ b/include/linux/fs_struct.h
+@@ -6,13 +6,13 @@
+ #include <linux/seqlock.h>
+ struct fs_struct {
+-      int users;
++      atomic_t users;
+       spinlock_t lock;
+       seqcount_t seq;
+       int umask;
+       int in_exec;
+       struct path root, pwd;
+-};
++} __randomize_layout;
+ extern struct kmem_cache *fs_cachep;
+diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
+index 13ba552..c4db760 100644
+--- a/include/linux/fscache-cache.h
++++ b/include/linux/fscache-cache.h
+@@ -117,7 +117,7 @@ struct fscache_operation {
+       fscache_operation_release_t release;
+ };
+-extern atomic_t fscache_op_debug_id;
++extern atomic_unchecked_t fscache_op_debug_id;
+ extern void fscache_op_work_func(struct work_struct *work);
+ extern void fscache_enqueue_operation(struct fscache_operation *);
+diff --git a/include/linux/fscache.h b/include/linux/fscache.h
+index 115bb81..e7b812b 100644
+--- a/include/linux/fscache.h
++++ b/include/linux/fscache.h
+@@ -152,7 +152,7 @@ struct fscache_cookie_def {
+        * - this is mandatory for any object that may have data
+        */
+       void (*now_uncached)(void *cookie_netfs_data);
+-};
++} __do_const;
+ /*
+  * fscache cached network filesystem type
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index eed9e85..21238db 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -176,6 +176,9 @@ static inline void fsnotify_access(struct file *file)
+       struct inode *inode = file_inode(file);
+       __u32 mask = FS_ACCESS;
++      if (is_sidechannel_device(inode))
++              return;
++
+       if (S_ISDIR(inode->i_mode))
+               mask |= FS_ISDIR;
+@@ -194,6 +197,9 @@ static inline void fsnotify_modify(struct file *file)
+       struct inode *inode = file_inode(file);
+       __u32 mask = FS_MODIFY;
++      if (is_sidechannel_device(inode))
++              return;
++
+       if (S_ISDIR(inode->i_mode))
+               mask |= FS_ISDIR;
+@@ -296,7 +302,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
+  */
+ static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
+ {
+-      return kstrdup(name, GFP_KERNEL);
++      return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
+ }
+ /*
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index 1dbf52f..b698a75 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -208,7 +208,7 @@ struct gendisk {
+       struct kobject *slave_dir;
+       struct timer_rand_state *random;
+-      atomic_t sync_io;               /* RAID */
++      atomic_unchecked_t sync_io;     /* RAID */
+       struct disk_events *ev;
+ #ifdef  CONFIG_BLK_DEV_INTEGRITY
+       struct kobject integrity_kobj;
+@@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
+ extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
+ /* drivers/char/random.c */
+-extern void add_disk_randomness(struct gendisk *disk);
++extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+ extern void rand_initialize_disk(struct gendisk *disk);
+ static inline sector_t get_start_sect(struct block_device *bdev)
+diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
+index 667c311..abac2a7 100644
+--- a/include/linux/genl_magic_func.h
++++ b/include/linux/genl_magic_func.h
+@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+ },
+ #define ZZZ_genl_ops          CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
++static struct genl_ops ZZZ_genl_ops[] = {
+ #include GENL_MAGIC_INCLUDE_FILE
+ };
+diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
+index 6270a56..ddda3ac 100644
+--- a/include/linux/genl_magic_struct.h
++++ b/include/linux/genl_magic_struct.h
+@@ -81,8 +81,8 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
+       __field(attr_nr, attr_flag, name, NLA_U32, __u32, \
+                       nla_get_u32, nla_put_u32, false)
+ #define __s32_field(attr_nr, attr_flag, name) \
+-      __field(attr_nr, attr_flag, name, NLA_U32, __s32, \
+-                      nla_get_u32, nla_put_u32, true)
++      __field(attr_nr, attr_flag, name, NLA_S32, __s32, \
++                      nla_get_s32, nla_put_s32, true)
+ #define __u64_field(attr_nr, attr_flag, name) \
+       __field(attr_nr, attr_flag, name, NLA_U64, __u64, \
+                       nla_get_u64, nla_put_u64_0pad, false)
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index f8041f9de..593a07b 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -41,6 +41,13 @@ struct vm_area_struct;
+ #define ___GFP_OTHER_NODE     0x800000u
+ #define ___GFP_WRITE          0x1000000u
+ #define ___GFP_KSWAPD_RECLAIM 0x2000000u
++
++#ifdef CONFIG_PAX_USERCOPY
++#define ___GFP_USERCOPY               0x4000000u
++#else
++#define ___GFP_USERCOPY               0
++#endif
++
+ /* If the above are modified, __GFP_BITS_SHIFT may need updating */
+ /*
+@@ -79,12 +86,15 @@ struct vm_area_struct;
+  *   node with no fallbacks or placement policy enforcements.
+  *
+  * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
++ *
++ * __GFP_USERCOPY indicates that the page will be copied to/from userland
+  */
+ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+ #define __GFP_WRITE   ((__force gfp_t)___GFP_WRITE)
+ #define __GFP_HARDWALL   ((__force gfp_t)___GFP_HARDWALL)
+ #define __GFP_THISNODE        ((__force gfp_t)___GFP_THISNODE)
+ #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
++#define __GFP_USERCOPY        ((__force gfp_t)___GFP_USERCOPY)
+ /*
+  * Watermark modifiers -- controls access to emergency reserves
+@@ -187,7 +197,7 @@ struct vm_area_struct;
+ #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
+ /* Room for N __GFP_FOO bits */
+-#define __GFP_BITS_SHIFT 26
++#define __GFP_BITS_SHIFT 27
+ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+ /*
+@@ -260,6 +270,8 @@ struct vm_area_struct;
+                        __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+ #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
++#define GFP_USERCOPY  __GFP_USERCOPY
++
+ /* Convert GFP flags to their corresponding migrate type */
+ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
+ #define GFP_MOVABLE_SHIFT 3
+@@ -516,7 +528,7 @@ extern void __free_page_frag(void *addr);
+ void page_alloc_init(void);
+ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
+ void drain_all_pages(struct zone *zone);
+-void drain_local_pages(struct zone *zone);
++void drain_local_pages(void *zone);
+ void page_alloc_init_late(void);
+diff --git a/include/linux/gracl.h b/include/linux/gracl.h
+new file mode 100644
+index 0000000..a3c4df7
+--- /dev/null
++++ b/include/linux/gracl.h
+@@ -0,0 +1,342 @@
++#ifndef GR_ACL_H
++#define GR_ACL_H
++
++#include <linux/grdefs.h>
++#include <linux/resource.h>
++#include <linux/capability.h>
++#include <linux/dcache.h>
++#include <asm/resource.h>
++
++/* Major status information */
++
++#define GR_VERSION  "grsecurity 3.1"
++#define GRSECURITY_VERSION 0x3100
++
++enum {
++      GR_SHUTDOWN = 0,
++      GR_ENABLE = 1,
++      GR_SPROLE = 2,
++      GR_OLDRELOAD = 3,
++      GR_SEGVMOD = 4,
++      GR_STATUS = 5,
++      GR_UNSPROLE = 6,
++      GR_PASSSET = 7,
++      GR_SPROLEPAM = 8,
++      GR_RELOAD = 9,
++};
++
++/* Password setup definitions
++ * kernel/grhash.c */
++enum {
++      GR_PW_LEN = 128,
++      GR_SALT_LEN = 16,
++      GR_SHA_LEN = 32,
++};
++
++enum {
++      GR_SPROLE_LEN = 64,
++};
++
++enum {
++      GR_NO_GLOB = 0,
++      GR_REG_GLOB,
++      GR_CREATE_GLOB
++};
++
++#define GR_NLIMITS 32
++
++/* Begin Data Structures */
++
++struct sprole_pw {
++      unsigned char *rolename;
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];  /* 256-bit SHA hash of the password */
++};
++
++struct name_entry {
++      __u32 key;
++      u64 inode;
++      dev_t device;
++      char *name;
++      __u16 len;
++      __u8 deleted;
++      struct name_entry *prev;
++      struct name_entry *next;
++};
++
++struct inodev_entry {
++      struct name_entry *nentry;
++      struct inodev_entry *prev;
++      struct inodev_entry *next;
++};
++
++struct acl_role_db {
++      struct acl_role_label **r_hash;
++      __u32 r_size;
++};
++
++struct inodev_db {
++      struct inodev_entry **i_hash;
++      __u32 i_size;
++};
++
++struct name_db {
++      struct name_entry **n_hash;
++      __u32 n_size;
++};
++
++struct crash_uid {
++      uid_t uid;
++      unsigned long expires;
++};
++
++struct gr_hash_struct {
++      void **table;
++      void **nametable;
++      void *first;
++      __u32 table_size;
++      __u32 used_size;
++      int type;
++};
++
++/* Userspace Grsecurity ACL data structures */
++
++struct acl_subject_label {
++      char *filename;
++      u64 inode;
++      dev_t device;
++      __u32 mode;
++      kernel_cap_t cap_mask;
++      kernel_cap_t cap_lower;
++      kernel_cap_t cap_invert_audit;
++
++      struct rlimit res[GR_NLIMITS];
++      __u32 resmask;
++
++      __u8 user_trans_type;
++      __u8 group_trans_type;
++      uid_t *user_transitions;
++      gid_t *group_transitions;
++      __u16 user_trans_num;
++      __u16 group_trans_num;
++
++      __u32 sock_families[2];
++      __u32 ip_proto[8];
++      __u32 ip_type;
++      struct acl_ip_label **ips;
++      __u32 ip_num;
++      __u32 inaddr_any_override;
++
++      __u32 crashes;
++      unsigned long expires;
++
++      struct acl_subject_label *parent_subject;
++      struct gr_hash_struct *hash;
++      struct acl_subject_label *prev;
++      struct acl_subject_label *next;
++
++      struct acl_object_label **obj_hash;
++      __u32 obj_hash_size;
++      __u16 pax_flags;
++};
++
++struct role_allowed_ip {
++      __u32 addr;
++      __u32 netmask;
++
++      struct role_allowed_ip *prev;
++      struct role_allowed_ip *next;
++};
++
++struct role_transition {
++      char *rolename;
++
++      struct role_transition *prev;
++      struct role_transition *next;
++};
++
++struct acl_role_label {
++      char *rolename;
++      uid_t uidgid;
++      __u16 roletype;
++
++      __u16 auth_attempts;
++      unsigned long expires;
++
++      struct acl_subject_label *root_label;
++      struct gr_hash_struct *hash;
++
++      struct acl_role_label *prev;
++      struct acl_role_label *next;
++
++      struct role_transition *transitions;
++      struct role_allowed_ip *allowed_ips;
++      uid_t *domain_children;
++      __u16 domain_child_num;
++
++      umode_t umask;
++
++      struct acl_subject_label **subj_hash;
++      __u32 subj_hash_size;
++};
++
++struct user_acl_role_db {
++      struct acl_role_label **r_table;
++      __u32 num_pointers;             /* Number of allocations to track */
++      __u32 num_roles;                /* Number of roles */
++      __u32 num_domain_children;      /* Number of domain children */
++      __u32 num_subjects;             /* Number of subjects */
++      __u32 num_objects;              /* Number of objects */
++};
++
++struct acl_object_label {
++      char *filename;
++      u64 inode;
++      dev_t device;
++      __u32 mode;
++
++      struct acl_subject_label *nested;
++      struct acl_object_label *globbed;
++
++      /* next two structures not used */
++
++      struct acl_object_label *prev;
++      struct acl_object_label *next;
++};
++
++struct acl_ip_label {
++      char *iface;
++      __u32 addr;
++      __u32 netmask;
++      __u16 low, high;
++      __u8 mode;
++      __u32 type;
++      __u32 proto[8];
++
++      /* next two structures not used */
++
++      struct acl_ip_label *prev;
++      struct acl_ip_label *next;
++};
++
++struct gr_arg {
++      struct user_acl_role_db role_db;
++      unsigned char pw[GR_PW_LEN];
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];
++      unsigned char sp_role[GR_SPROLE_LEN];
++      struct sprole_pw *sprole_pws;
++      dev_t segv_device;
++      u64 segv_inode;
++      uid_t segv_uid;
++      __u16 num_sprole_pws;
++      __u16 mode;
++};
++
++struct gr_arg_wrapper {
++      struct gr_arg *arg;
++      __u32 version;
++      __u32 size;
++};
++
++struct subject_map {
++      struct acl_subject_label *user;
++      struct acl_subject_label *kernel;
++      struct subject_map *prev;
++      struct subject_map *next;
++};
++
++struct acl_subj_map_db {
++      struct subject_map **s_hash;
++      __u32 s_size;
++};
++
++struct gr_policy_state {
++      struct sprole_pw **acl_special_roles;
++      __u16 num_sprole_pws;
++      struct acl_role_label *kernel_role;
++      struct acl_role_label *role_list;
++      struct acl_role_label *default_role;
++      struct acl_role_db acl_role_set;
++      struct acl_subj_map_db subj_map_set;
++      struct name_db name_set;
++      struct inodev_db inodev_set;
++};
++
++struct gr_alloc_state {
++      unsigned long alloc_stack_next;
++      unsigned long alloc_stack_size;
++      void **alloc_stack;
++};
++
++struct gr_reload_state {
++      struct gr_policy_state oldpolicy;
++      struct gr_alloc_state oldalloc;
++      struct gr_policy_state newpolicy;
++      struct gr_alloc_state newalloc;
++      struct gr_policy_state *oldpolicy_ptr;
++      struct gr_alloc_state *oldalloc_ptr;
++      unsigned char oldmode;
++};
++
++/* End Data Structures Section */
++
++/* Hash functions generated by empirical testing by Brad Spengler
++   Makes good use of the low bits of the inode.  Generally 0-1 times
++   in loop for successful match.  0-3 for unsuccessful match.
++   Shift/add algorithm with modulus of table size and an XOR*/
++
++static __inline__ unsigned int
++gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
++{
++      return ((((uid + type) << (16 + type)) ^ uid) % sz);
++}
++
++ static __inline__ unsigned int
++gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
++{
++      return ((const unsigned long)userp % sz);
++}
++
++static __inline__ unsigned int
++gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
++{
++      unsigned int rem;
++      div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
++      return rem;
++}
++
++static __inline__ unsigned int
++gr_nhash(const char *name, const __u16 len, const unsigned int sz)
++{
++      return full_name_hash(NULL, (const unsigned char *)name, len) % sz;
++}
++
++#define FOR_EACH_SUBJECT_START(role,subj,iter) \
++      subj = NULL; \
++      iter = 0; \
++      while (iter < role->subj_hash_size) { \
++              if (subj == NULL) \
++                      subj = role->subj_hash[iter]; \
++              if (subj == NULL) { \
++                      iter++; \
++                      continue; \
++              }
++
++#define FOR_EACH_SUBJECT_END(subj,iter) \
++              subj = subj->next; \
++              if (subj == NULL) \
++                      iter++; \
++      }
++
++
++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
++      subj = role->hash->first; \
++      while (subj != NULL) {
++
++#define FOR_EACH_NESTED_SUBJECT_END(subj) \
++              subj = subj->next; \
++      }
++
++#endif
++
+diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
+new file mode 100644
+index 0000000..af64092
+--- /dev/null
++++ b/include/linux/gracl_compat.h
+@@ -0,0 +1,156 @@
++#ifndef GR_ACL_COMPAT_H
++#define GR_ACL_COMPAT_H
++
++#include <linux/resource.h>
++#include <asm/resource.h>
++
++struct sprole_pw_compat {
++      compat_uptr_t rolename;
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];
++};
++
++struct gr_hash_struct_compat {
++      compat_uptr_t table;
++      compat_uptr_t nametable;
++      compat_uptr_t first;
++      __u32 table_size;
++      __u32 used_size;
++      int type;
++};
++
++struct acl_subject_label_compat {
++      compat_uptr_t filename;
++      compat_u64 inode;
++      __u32 device;
++      __u32 mode;
++      kernel_cap_t cap_mask;
++      kernel_cap_t cap_lower;
++      kernel_cap_t cap_invert_audit;
++
++      struct compat_rlimit res[GR_NLIMITS];
++      __u32 resmask;
++
++      __u8 user_trans_type;
++      __u8 group_trans_type;
++      compat_uptr_t user_transitions;
++      compat_uptr_t group_transitions;
++      __u16 user_trans_num;
++      __u16 group_trans_num;
++
++      __u32 sock_families[2];
++      __u32 ip_proto[8];
++      __u32 ip_type;
++      compat_uptr_t ips;
++      __u32 ip_num;
++      __u32 inaddr_any_override;
++
++      __u32 crashes;
++      compat_ulong_t expires;
++
++      compat_uptr_t parent_subject;
++      compat_uptr_t hash;
++      compat_uptr_t prev;
++      compat_uptr_t next;
++
++      compat_uptr_t obj_hash;
++      __u32 obj_hash_size;
++      __u16 pax_flags;
++};
++
++struct role_allowed_ip_compat {
++      __u32 addr;
++      __u32 netmask;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct role_transition_compat {
++      compat_uptr_t rolename;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct acl_role_label_compat {
++      compat_uptr_t rolename;
++      uid_t uidgid;
++      __u16 roletype;
++
++      __u16 auth_attempts;
++      compat_ulong_t expires;
++
++      compat_uptr_t root_label;
++      compat_uptr_t hash;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++
++      compat_uptr_t transitions;
++      compat_uptr_t allowed_ips;
++      compat_uptr_t domain_children;
++      __u16 domain_child_num;
++
++      umode_t umask;
++
++      compat_uptr_t subj_hash;
++      __u32 subj_hash_size;
++};
++
++struct user_acl_role_db_compat {
++      compat_uptr_t r_table;
++      __u32 num_pointers;
++      __u32 num_roles;
++      __u32 num_domain_children;
++      __u32 num_subjects;
++      __u32 num_objects;
++};
++
++struct acl_object_label_compat {
++      compat_uptr_t filename;
++      compat_u64 inode;
++      __u32 device;
++      __u32 mode;
++
++      compat_uptr_t nested;
++      compat_uptr_t globbed;
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct acl_ip_label_compat {
++      compat_uptr_t iface;
++      __u32 addr;
++      __u32 netmask;
++      __u16 low, high;
++      __u8 mode;
++      __u32 type;
++      __u32 proto[8];
++
++      compat_uptr_t prev;
++      compat_uptr_t next;
++};
++
++struct gr_arg_compat {
++      struct user_acl_role_db_compat role_db;
++      unsigned char pw[GR_PW_LEN];
++      unsigned char salt[GR_SALT_LEN];
++      unsigned char sum[GR_SHA_LEN];
++      unsigned char sp_role[GR_SPROLE_LEN];
++      compat_uptr_t sprole_pws;
++      __u32 segv_device;
++      compat_u64 segv_inode;
++      uid_t segv_uid;
++      __u16 num_sprole_pws;
++      __u16 mode;
++};
++
++struct gr_arg_wrapper_compat {
++      compat_uptr_t arg;
++      __u32 version;
++      __u32 size;
++};
++
++#endif
+diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
+new file mode 100644
+index 0000000..323ecf2
+--- /dev/null
++++ b/include/linux/gralloc.h
+@@ -0,0 +1,9 @@
++#ifndef __GRALLOC_H
++#define __GRALLOC_H
++
++void acl_free_all(void);
++int acl_alloc_stack_init(unsigned long size);
++void *acl_alloc(unsigned long len);
++void *acl_alloc_num(unsigned long num, unsigned long len);
++
++#endif
+diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
+new file mode 100644
+index 0000000..be66033
+--- /dev/null
++++ b/include/linux/grdefs.h
+@@ -0,0 +1,140 @@
++#ifndef GRDEFS_H
++#define GRDEFS_H
++
++/* Begin grsecurity status declarations */
++
++enum {
++      GR_READY = 0x01,
++      GR_STATUS_INIT = 0x00   // disabled state
++};
++
++/* Begin  ACL declarations */
++
++/* Role flags */
++
++enum {
++      GR_ROLE_USER = 0x0001,
++      GR_ROLE_GROUP = 0x0002,
++      GR_ROLE_DEFAULT = 0x0004,
++      GR_ROLE_SPECIAL = 0x0008,
++      GR_ROLE_AUTH = 0x0010,
++      GR_ROLE_NOPW = 0x0020,
++      GR_ROLE_GOD = 0x0040,
++      GR_ROLE_LEARN = 0x0080,
++      GR_ROLE_TPE = 0x0100,
++      GR_ROLE_DOMAIN = 0x0200,
++      GR_ROLE_PAM = 0x0400,
++      GR_ROLE_PERSIST = 0x0800
++};
++
++/* ACL Subject and Object mode flags */
++enum {
++      GR_DELETED = 0x80000000
++};
++
++/* ACL Object-only mode flags */
++enum {
++      GR_READ         = 0x00000001,
++      GR_APPEND       = 0x00000002,
++      GR_WRITE        = 0x00000004,
++      GR_EXEC         = 0x00000008,
++      GR_FIND         = 0x00000010,
++      GR_INHERIT      = 0x00000020,
++      GR_SETID        = 0x00000040,
++      GR_CREATE       = 0x00000080,
++      GR_DELETE       = 0x00000100,
++      GR_LINK         = 0x00000200,
++      GR_AUDIT_READ   = 0x00000400,
++      GR_AUDIT_APPEND = 0x00000800,
++      GR_AUDIT_WRITE  = 0x00001000,
++      GR_AUDIT_EXEC   = 0x00002000,
++      GR_AUDIT_FIND   = 0x00004000,
++      GR_AUDIT_INHERIT= 0x00008000,
++      GR_AUDIT_SETID  = 0x00010000,
++      GR_AUDIT_CREATE = 0x00020000,
++      GR_AUDIT_DELETE = 0x00040000,
++      GR_AUDIT_LINK   = 0x00080000,
++      GR_PTRACERD     = 0x00100000,
++      GR_NOPTRACE     = 0x00200000,
++      GR_SUPPRESS     = 0x00400000,
++      GR_NOLEARN      = 0x00800000,
++      GR_INIT_TRANSFER= 0x01000000
++};
++
++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
++                 GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
++                 GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
++
++/* ACL subject-only mode flags */
++enum {
++      GR_KILL         = 0x00000001,
++      GR_VIEW         = 0x00000002,
++      GR_PROTECTED    = 0x00000004,
++      GR_LEARN        = 0x00000008,
++      GR_OVERRIDE     = 0x00000010,
++      /* just a placeholder, this mode is only used in userspace */
++      GR_DUMMY        = 0x00000020,
++      GR_PROTSHM      = 0x00000040,
++      GR_KILLPROC     = 0x00000080,
++      GR_KILLIPPROC   = 0x00000100,
++      /* just a placeholder, this mode is only used in userspace */
++      GR_NOTROJAN     = 0x00000200,
++      GR_PROTPROCFD   = 0x00000400,
++      GR_PROCACCT     = 0x00000800,
++      GR_RELAXPTRACE  = 0x00001000,
++      //GR_NESTED     = 0x00002000,
++      GR_INHERITLEARN = 0x00004000,
++      GR_PROCFIND     = 0x00008000,
++      GR_POVERRIDE    = 0x00010000,
++      GR_KERNELAUTH   = 0x00020000,
++      GR_ATSECURE     = 0x00040000,
++      GR_SHMEXEC      = 0x00080000
++};
++
++enum {
++      GR_PAX_ENABLE_SEGMEXEC  = 0x0001,
++      GR_PAX_ENABLE_PAGEEXEC  = 0x0002,
++      GR_PAX_ENABLE_MPROTECT  = 0x0004,
++      GR_PAX_ENABLE_RANDMMAP  = 0x0008,
++      GR_PAX_ENABLE_EMUTRAMP  = 0x0010,
++      GR_PAX_DISABLE_SEGMEXEC = 0x0100,
++      GR_PAX_DISABLE_PAGEEXEC = 0x0200,
++      GR_PAX_DISABLE_MPROTECT = 0x0400,
++      GR_PAX_DISABLE_RANDMMAP = 0x0800,
++      GR_PAX_DISABLE_EMUTRAMP = 0x1000,
++};
++
++enum {
++      GR_ID_USER      = 0x01,
++      GR_ID_GROUP     = 0x02,
++};
++
++enum {
++      GR_ID_ALLOW     = 0x01,
++      GR_ID_DENY      = 0x02,
++};
++
++#define GR_CRASH_RES  31
++#define GR_UIDTABLE_MAX 500
++
++/* begin resource learning section */
++enum {
++      GR_RLIM_CPU_BUMP = 60,
++      GR_RLIM_FSIZE_BUMP = 50000,
++      GR_RLIM_DATA_BUMP = 10000,
++      GR_RLIM_STACK_BUMP = 1000,
++      GR_RLIM_CORE_BUMP = 10000,
++      GR_RLIM_RSS_BUMP = 500000,
++      GR_RLIM_NPROC_BUMP = 1,
++      GR_RLIM_NOFILE_BUMP = 5,
++      GR_RLIM_MEMLOCK_BUMP = 50000,
++      GR_RLIM_AS_BUMP = 500000,
++      GR_RLIM_LOCKS_BUMP = 2,
++      GR_RLIM_SIGPENDING_BUMP = 5,
++      GR_RLIM_MSGQUEUE_BUMP = 10000,
++      GR_RLIM_NICE_BUMP = 1,
++      GR_RLIM_RTPRIO_BUMP = 1,
++      GR_RLIM_RTTIME_BUMP = 1000000
++};
++
++#endif
+diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
+new file mode 100644
+index 0000000..1dbf9c8
+--- /dev/null
++++ b/include/linux/grinternal.h
+@@ -0,0 +1,231 @@
++#ifndef __GRINTERNAL_H
++#define __GRINTERNAL_H
++
++#ifdef CONFIG_GRKERNSEC
++
++#include <linux/fs.h>
++#include <linux/mnt_namespace.h>
++#include <linux/nsproxy.h>
++#include <linux/gracl.h>
++#include <linux/grdefs.h>
++#include <linux/grmsg.h>
++
++void gr_add_learn_entry(const char *fmt, ...)
++      __attribute__ ((format (printf, 1, 2)));
++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
++                          const struct vfsmount *mnt);
++__u32 gr_check_create(const struct dentry *new_dentry,
++                           const struct dentry *parent,
++                           const struct vfsmount *mnt, const __u32 mode);
++int gr_check_protected_task(const struct task_struct *task);
++__u32 to_gr_audit(const __u32 reqmode);
++int gr_set_acls(const int type);
++int gr_acl_is_enabled(void);
++char gr_roletype_to_char(void);
++
++void gr_handle_alertkill(struct task_struct *task);
++char *gr_to_filename(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++char *gr_to_filename1(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++char *gr_to_filename2(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++char *gr_to_filename3(const struct dentry *dentry,
++                          const struct vfsmount *mnt);
++
++extern int grsec_enable_ptrace_readexec;
++extern int grsec_enable_harden_ptrace;
++extern int grsec_enable_link;
++extern int grsec_enable_fifo;
++extern int grsec_enable_execve;
++extern int grsec_enable_shm;
++extern int grsec_enable_execlog;
++extern int grsec_enable_signal;
++extern int grsec_enable_audit_ptrace;
++extern int grsec_enable_forkfail;
++extern int grsec_enable_time;
++extern int grsec_enable_rofs;
++extern int grsec_deny_new_usb;
++extern int grsec_enable_chroot_shmat;
++extern int grsec_enable_chroot_mount;
++extern int grsec_enable_chroot_double;
++extern int grsec_enable_chroot_pivot;
++extern int grsec_enable_chroot_chdir;
++extern int grsec_enable_chroot_chmod;
++extern int grsec_enable_chroot_mknod;
++extern int grsec_enable_chroot_fchdir;
++extern int grsec_enable_chroot_nice;
++extern int grsec_enable_chroot_execlog;
++extern int grsec_enable_chroot_caps;
++extern int grsec_enable_chroot_rename;
++extern int grsec_enable_chroot_sysctl;
++extern int grsec_enable_chroot_unix;
++extern int grsec_enable_symlinkown;
++extern kgid_t grsec_symlinkown_gid;
++extern int grsec_enable_tpe;
++extern kgid_t grsec_tpe_gid;
++extern int grsec_enable_tpe_all;
++extern int grsec_enable_tpe_invert;
++extern int grsec_enable_socket_all;
++extern kgid_t grsec_socket_all_gid;
++extern int grsec_enable_socket_client;
++extern kgid_t grsec_socket_client_gid;
++extern int grsec_enable_socket_server;
++extern kgid_t grsec_socket_server_gid;
++extern kgid_t grsec_audit_gid;
++extern int grsec_enable_group;
++extern int grsec_enable_log_rwxmaps;
++extern int grsec_enable_mount;
++extern int grsec_enable_chdir;
++extern int grsec_resource_logging;
++extern int grsec_enable_blackhole;
++extern int grsec_lastack_retries;
++extern int grsec_enable_brute;
++extern int grsec_enable_harden_ipc;
++extern int grsec_enable_harden_tty;
++extern int grsec_lock;
++
++extern spinlock_t grsec_alert_lock;
++extern unsigned long grsec_alert_wtime;
++extern unsigned long grsec_alert_fyet;
++
++extern spinlock_t grsec_audit_lock;
++
++extern rwlock_t grsec_exec_file_lock;
++
++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
++                      gr_to_filename2((tsk)->exec_file->f_path.dentry, \
++                      (tsk)->exec_file->f_path.mnt) : "/")
++
++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
++                      gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
++                      (tsk)->real_parent->exec_file->f_path.mnt) : "/")
++
++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
++                      gr_to_filename((tsk)->exec_file->f_path.dentry, \
++                      (tsk)->exec_file->f_path.mnt) : "/")
++
++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
++                      gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
++                      (tsk)->real_parent->exec_file->f_path.mnt) : "/")
++
++#define proc_is_chrooted(tsk_a)  ((tsk_a)->gr_is_chrooted)
++
++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
++
++static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
++{
++      if (file1 && file2) {
++              const struct inode *inode1 = file1->f_path.dentry->d_inode;
++              const struct inode *inode2 = file2->f_path.dentry->d_inode;
++              if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
++                      return true;
++      }
++
++      return false;
++}
++
++#define GR_CHROOT_CAPS {{ \
++      CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
++      CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
++      CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
++      CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
++      CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
++      CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
++      CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
++
++#define security_learn(normal_msg,args...) \
++({ \
++      read_lock(&grsec_exec_file_lock); \
++      gr_add_learn_entry(normal_msg "\n", ## args); \
++      read_unlock(&grsec_exec_file_lock); \
++})
++
++enum {
++      GR_DO_AUDIT,
++      GR_DONT_AUDIT,
++      /* used for non-audit messages that we shouldn't kill the task on */
++      GR_DONT_AUDIT_GOOD
++};
++
++enum {
++      GR_TTYSNIFF,
++      GR_RBAC,
++      GR_RBAC_STR,
++      GR_STR_RBAC,
++      GR_RBAC_MODE2,
++      GR_RBAC_MODE3,
++      GR_FILENAME,
++      GR_SYSCTL_HIDDEN,
++      GR_NOARGS,
++      GR_ONE_INT,
++      GR_ONE_INT_TWO_STR,
++      GR_ONE_STR,
++      GR_STR_INT,
++      GR_TWO_STR_INT,
++      GR_TWO_INT,
++      GR_TWO_U64,
++      GR_THREE_INT,
++      GR_FIVE_INT_TWO_STR,
++      GR_TWO_STR,
++      GR_THREE_STR,
++      GR_FOUR_STR,
++      GR_STR_FILENAME,
++      GR_FILENAME_STR,
++      GR_FILENAME_TWO_INT,
++      GR_FILENAME_TWO_INT_STR,
++      GR_TEXTREL,
++      GR_PTRACE,
++      GR_RESOURCE,
++      GR_CAP,
++      GR_SIG,
++      GR_SIG2,
++      GR_CRASH1,
++      GR_CRASH2,
++      GR_PSACCT,
++      GR_RWXMAP,
++      GR_RWXMAPVMA
++};
++
++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
++#define gr_log_textrel_ulong_ulong(audit, msg, str, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, str, file, ulong1, ulong2)
++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
++#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
++
++#endif
++
++#endif
+diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
+new file mode 100644
+index 0000000..94ac4d2
+--- /dev/null
++++ b/include/linux/grmsg.h
+@@ -0,0 +1,120 @@
++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
++#define GR_STOPMOD_MSG "denied modification of module state by "
++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
++#define GR_IOPERM_MSG "denied use of ioperm() by "
++#define GR_IOPL_MSG "denied use of iopl() by "
++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
++#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
++#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
++#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
++#define GR_CHROOT_PATHAT_MSG "denied relative path access outside of chroot to %.950s by "
++#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
++#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
++#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
++#define GR_INITF_ACL_MSG "init_variables() failed %s by "
++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
++#define GR_SHUTS_ACL_MSG "shutdown auth success for "
++#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
++#define GR_ENABLEF_ACL_MSG "unable to load %s for "
++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
++#define GR_RELOADF_ACL_MSG "failed reload of %s for "
++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
++#define GR_SPROLEF_ACL_MSG "special role %s failure for "
++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
++#define GR_INVMODE_ACL_MSG "invalid mode %d by "
++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
++#define GR_FAILFORK_MSG "failed fork with errno %s by "
++#define GR_NICE_CHROOT_MSG "denied priority change by "
++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
++#define GR_TIME_MSG "time set by "
++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
++#define GR_BIND_MSG "denied bind() by "
++#define GR_CONNECT_MSG "denied connect() by "
++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
++#define GR_CAP_ACL_MSG "use of %s denied for "
++#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
++#define GR_CAP_ACL_MSG2 "use of %s permitted for "
++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
++#define GR_TEXTREL_AUDIT_MSG "allowed %s text relocation transition in %.950s, VMA:0x%08lx 0x%08lx by "
++#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
++#define GR_VM86_MSG "denied use of vm86 by "
++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
++#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
++#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
++#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
++#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds.  Please investigate the crash report for "
++#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes.  Please investigate the crash report for "
++#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
++#define GR_TIOCSTI_MSG "denied unprivileged use of TIOCSTI by "
++#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
+diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
+new file mode 100644
+index 0000000..749b915
+--- /dev/null
++++ b/include/linux/grsecurity.h
+@@ -0,0 +1,259 @@
++#ifndef GR_SECURITY_H
++#define GR_SECURITY_H
++#include <linux/fs.h>
++#include <linux/fs_struct.h>
++#include <linux/binfmts.h>
++#include <linux/tty.h>
++#include <linux/gracl.h>
++
++/* notify of brain-dead configs */
++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_GRKERNSEC_KMEM)
++#error "CONFIG_DEBUG_FS being enabled is a security risk when CONFIG_GRKERNSEC_KMEM is enabled"
++#endif
++#if defined(CONFIG_PROC_PAGE_MONITOR) && defined(CONFIG_GRKERNSEC)
++#error "CONFIG_PROC_PAGE_MONITOR is a security risk"
++#endif
++#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
++#endif
++#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
++#endif
++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
++#endif
++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
++#endif
++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
++#error "CONFIG_PAX enabled, but no PaX options are enabled."
++#endif
++
++int gr_handle_new_usb(void);
++
++void gr_handle_brute_attach(int dumpable);
++void gr_handle_brute_check(void);
++void gr_handle_kernel_exploit(void);
++
++char gr_roletype_to_char(void);
++
++int gr_proc_is_restricted(void);
++
++int gr_acl_enable_at_secure(void);
++
++int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
++int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
++
++int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap, bool log);
++
++void gr_del_task_from_ip_table(struct task_struct *p);
++
++int gr_pid_is_chrooted(struct task_struct *p);
++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
++int gr_handle_chroot_nice(void);
++int gr_handle_chroot_sysctl(const int op);
++int gr_handle_chroot_setpriority(struct task_struct *p,
++                                      const int niceval);
++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
++int gr_chroot_pathat(int dfd, struct dentry *u_dentry, struct vfsmount *u_mnt, unsigned flags);
++int gr_chroot_fhandle(void);
++int gr_handle_chroot_chroot(const struct dentry *dentry,
++                                 const struct vfsmount *mnt);
++void gr_handle_chroot_chdir(const struct path *path);
++int gr_handle_chroot_chmod(const struct dentry *dentry,
++                                const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mknod(const struct dentry *dentry,
++                                const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mount(const struct dentry *dentry,
++                                const struct vfsmount *mnt,
++                                const char *dev_name);
++int gr_handle_chroot_pivot(void);
++int gr_handle_chroot_unix(const pid_t pid);
++
++int gr_handle_rawio(const struct inode *inode);
++
++void gr_handle_ioperm(void);
++void gr_handle_iopl(void);
++void gr_handle_msr_write(void);
++
++umode_t gr_acl_umask(void);
++
++int gr_tpe_allow(const struct file *file);
++
++void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
++void gr_clear_chroot_entries(struct task_struct *task);
++
++void gr_log_forkfail(const int retval);
++void gr_log_timechange(void);
++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
++void gr_log_chdir(const struct dentry *dentry,
++                       const struct vfsmount *mnt);
++void gr_log_chroot_exec(const struct dentry *dentry,
++                             const struct vfsmount *mnt);
++void gr_log_remount(const char *devname, const int retval);
++void gr_log_unmount(const char *devname, const int retval);
++void gr_log_mount(const char *from, struct path *to, const int retval);
++void gr_log_textrel(struct vm_area_struct *vma, bool is_textrel_rw);
++void gr_log_ptgnustack(struct file *file);
++void gr_log_rwxmmap(struct file *file);
++void gr_log_rwxmprotect(struct vm_area_struct *vma);
++
++int gr_handle_follow_link(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++int gr_handle_fifo(const struct dentry *dentry,
++                        const struct vfsmount *mnt,
++                        const struct dentry *dir, const int flag,
++                        const int acc_mode);
++int gr_handle_hardlink(const struct dentry *dentry,
++                            const struct vfsmount *mnt,
++                            const struct filename *to);
++
++int gr_is_capable(const int cap);
++int gr_is_capable_nolog(const int cap);
++int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
++int gr_task_is_capable_nolog(const struct task_struct *task, const struct cred *cred, const int cap);
++
++void gr_copy_label(struct task_struct *tsk);
++void gr_handle_crash(struct task_struct *task, const int sig);
++int gr_handle_signal(const struct task_struct *p, const int sig);
++int gr_check_crash_uid(const kuid_t uid);
++int gr_check_protected_task(const struct task_struct *task);
++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
++int gr_acl_handle_mmap(const struct file *file,
++                            const unsigned long prot);
++int gr_acl_handle_mprotect(const struct file *file,
++                                const unsigned long prot);
++int gr_check_hidden_task(const struct task_struct *tsk);
++__u32 gr_acl_handle_truncate(const struct dentry *dentry,
++                                  const struct vfsmount *mnt);
++__u32 gr_acl_handle_utime(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++__u32 gr_acl_handle_access(const struct dentry *dentry,
++                                const struct vfsmount *mnt, const int fmode);
++__u32 gr_acl_handle_chmod(const struct dentry *dentry,
++                               const struct vfsmount *mnt, umode_t *mode);
++__u32 gr_acl_handle_chown(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++int gr_handle_ptrace(struct task_struct *task, const long request);
++int gr_handle_proc_ptrace(struct task_struct *task);
++__u32 gr_acl_handle_execve(const struct dentry *dentry,
++                                const struct vfsmount *mnt);
++int gr_check_crash_exec(const struct file *filp);
++int gr_acl_is_enabled(void);
++void gr_set_role_label(struct task_struct *task, const kuid_t uid,
++                            const kgid_t gid);
++int gr_set_proc_label(const struct dentry *dentry,
++                      const struct vfsmount *mnt,
++                      const int unsafe_flags);
++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
++                              const struct vfsmount *mnt);
++__u32 gr_acl_handle_open(const struct dentry *dentry,
++                              const struct vfsmount *mnt, int acc_mode);
++__u32 gr_acl_handle_creat(const struct dentry *dentry,
++                               const struct dentry *p_dentry,
++                               const struct vfsmount *p_mnt,
++                               int open_flags, int acc_mode, const int imode);
++void gr_handle_create(const struct dentry *dentry,
++                           const struct vfsmount *mnt);
++void gr_handle_proc_create(const struct dentry *dentry,
++                         const struct inode *inode);
++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
++                               const struct dentry *parent_dentry,
++                               const struct vfsmount *parent_mnt,
++                               const int mode);
++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
++                               const struct dentry *parent_dentry,
++                               const struct vfsmount *parent_mnt);
++__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
++                               const struct vfsmount *mnt);
++void gr_handle_delete(const u64 ino, const dev_t dev);
++__u32 gr_acl_handle_unlink(const struct dentry *dentry,
++                                const struct vfsmount *mnt);
++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
++                                 const struct dentry *parent_dentry,
++                                 const struct vfsmount *parent_mnt,
++                                 const struct filename *from);
++__u32 gr_acl_handle_link(const struct dentry *new_dentry,
++                              const struct dentry *parent_dentry,
++                              const struct vfsmount *parent_mnt,
++                              const struct dentry *old_dentry,
++                              const struct vfsmount *old_mnt, const struct filename *to);
++int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
++int gr_acl_handle_rename(struct dentry *new_dentry,
++                              struct dentry *parent_dentry,
++                              const struct vfsmount *parent_mnt,
++                              struct dentry *old_dentry,
++                              struct inode *old_parent_inode,
++                              struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++                              struct dentry *old_dentry,
++                              struct dentry *new_dentry,
++                              struct vfsmount *mnt, const __u8 replace, unsigned int flags);
++__u32 gr_check_link(const struct dentry *new_dentry,
++                         const struct dentry *parent_dentry,
++                         const struct vfsmount *parent_mnt,
++                         const struct dentry *old_dentry,
++                         const struct vfsmount *old_mnt);
++int gr_acl_handle_filldir(const struct file *file, const char *name,
++                               const unsigned int namelen, const u64 ino);
++
++__u32 gr_acl_handle_unix(const struct dentry *dentry,
++                              const struct vfsmount *mnt);
++void gr_acl_handle_exit(void);
++void gr_acl_handle_psacct(struct task_struct *task, const long code);
++int gr_acl_handle_procpidmem(const struct task_struct *task);
++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
++void gr_audit_ptrace(struct task_struct *task);
++dev_t gr_get_dev_from_dentry(struct dentry *dentry);
++u64 gr_get_ino_from_dentry(struct dentry *dentry);
++void gr_put_exec_file(struct task_struct *task);
++
++int gr_get_symlinkown_enabled(void);
++
++int gr_ptrace_readexec(struct file *file, int unsafe_flags);
++
++int gr_handle_tiocsti(struct tty_struct *tty);
++
++void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
++void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
++int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
++                       struct dentry *newdentry, struct vfsmount *newmnt);
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++extern void gr_log_resource(const struct task_struct *task, const int res,
++                                 const unsigned long wanted, const int gt);
++#else
++static inline void gr_log_resource(const struct task_struct *task, const int res,
++                                 const unsigned long wanted, const int gt)
++{
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
++void gr_handle_vm86(void);
++void gr_handle_mem_readwrite(u64 from, u64 to);
++
++void gr_log_badprocpid(const char *entry);
++
++extern int grsec_enable_dmesg;
++extern int grsec_disable_privio;
++
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++extern kgid_t grsec_proc_gid;
++#endif
++
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++extern int grsec_enable_chroot_findtask;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern int grsec_enable_setxid;
++#endif
++#endif
++
++#endif
+diff --git a/include/linux/grsock.h b/include/linux/grsock.h
+new file mode 100644
+index 0000000..e7ffaaf
+--- /dev/null
++++ b/include/linux/grsock.h
+@@ -0,0 +1,19 @@
++#ifndef __GRSOCK_H
++#define __GRSOCK_H
++
++extern void gr_attach_curr_ip(const struct sock *sk);
++extern int gr_handle_sock_all(const int family, const int type,
++                            const int protocol);
++extern int gr_handle_sock_server(const struct sockaddr *sck);
++extern int gr_handle_sock_server_other(const struct sock *sck);
++extern int gr_handle_sock_client(const struct sockaddr *sck);
++extern int gr_search_connect(struct socket * sock,
++                           struct sockaddr_in * addr);
++extern int gr_search_bind(struct socket * sock,
++                        struct sockaddr_in * addr);
++extern int gr_search_listen(struct socket * sock);
++extern int gr_search_accept(struct socket * sock);
++extern int gr_search_socket(const int domain, const int type,
++                          const int protocol);
++
++#endif
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index bb3f329..9daed55 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -190,6 +190,18 @@ static inline void clear_highpage(struct page *page)
+       kunmap_atomic(kaddr);
+ }
++static inline void sanitize_highpage(struct page *page)
++{
++      void *kaddr;
++      unsigned long flags;
++
++      local_irq_save(flags);
++      kaddr = kmap_atomic(page);
++      clear_page(kaddr);
++      kunmap_atomic(kaddr);
++      local_irq_restore(flags);
++}
++
+ static inline void zero_user_segments(struct page *page,
+       unsigned start1, unsigned end1,
+       unsigned start2, unsigned end2)
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index fe99e6f..b2e62ec 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -314,7 +314,7 @@ struct hstate {
+       unsigned int surplus_huge_pages_node[MAX_NUMNODES];
+ #ifdef CONFIG_CGROUP_HUGETLB
+       /* cgroup control files */
+-      struct cftype cgroup_files[5];
++      struct cftype (*cgroup_files)[5];
+ #endif
+       char name[HSTATE_NAME_LEN];
+ };
+diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
+index 063962f..d34f2da 100644
+--- a/include/linux/hugetlb_cgroup.h
++++ b/include/linux/hugetlb_cgroup.h
+@@ -26,6 +26,13 @@ struct hugetlb_cgroup;
+ #ifdef CONFIG_CGROUP_HUGETLB
++enum {
++      RES_USAGE,
++      RES_LIMIT,
++      RES_MAX_USAGE,
++      RES_FAILCNT,
++};
++
+ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+ {
+       VM_BUG_ON_PAGE(!PageHuge(page), page);
+@@ -64,6 +71,10 @@ extern void hugetlb_cgroup_file_init(void) __init;
+ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
+                                  struct page *newhpage);
++ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off);
++ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off);
++u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, struct cftype *cft);
++
+ #else
+ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+ {
+diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
+index 1c7b89a..7dda400 100644
+--- a/include/linux/hwmon-sysfs.h
++++ b/include/linux/hwmon-sysfs.h
+@@ -25,7 +25,8 @@
+ struct sensor_device_attribute{
+       struct device_attribute dev_attr;
+       int index;
+-};
++} __do_const;
++typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
+ #define to_sensor_dev_attr(_dev_attr) \
+       container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
+@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
+       struct device_attribute dev_attr;
+       u8 index;
+       u8 nr;
+-};
++} __do_const;
++typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
+ #define to_sensor_dev_attr_2(_dev_attr) \
+       container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index fffdc27..122364f 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -425,6 +425,7 @@ struct i2c_algorithm {
+       int (*unreg_slave)(struct i2c_client *client);
+ #endif
+ };
++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
+ /**
+  * struct i2c_timings - I2C timing information
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index ba7a9b0..33a0237 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -78,7 +78,7 @@ struct pppox_proto {
+       int             (*ioctl)(struct socket *sock, unsigned int cmd,
+                                unsigned long arg);
+       struct module   *owner;
+-};
++} __do_const;
+ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
+ extern void unregister_pppox_proto(int proto_num);
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 6935d02..5e3f46e 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -39,7 +39,7 @@
+ /* These are for everybody (although not all archs will actually
+    discard it in modules) */
+-#define __init                __section(.init.text) __cold notrace
++#define __init                __section(.init.text) __cold notrace __latent_entropy
+ #define __initdata    __section(.init.data)
+ #define __initconst   __constsection(.init.rodata)
+ #define __exitdata    __section(.exit.data)
+@@ -86,7 +86,7 @@
+ #define __exit          __section(.exit.text) __exitused __cold notrace
+ /* Used for MEMORY_HOTPLUG */
+-#define __meminit        __section(.meminit.text) __cold notrace
++#define __meminit        __section(.meminit.text) __cold notrace __latent_entropy
+ #define __meminitdata    __section(.meminit.data)
+ #define __meminitconst   __constsection(.meminit.rodata)
+ #define __memexit        __section(.memexit.text) __exitused __cold notrace
+@@ -111,6 +111,12 @@
+ #define __REFDATA        .section       ".ref.data", "aw"
+ #define __REFCONST       .section       ".ref.rodata", "a"
++#ifdef CONFIG_PAX_KERNEXEC
++#define __READ_ONLY   .section        ".data..read_only","a",%progbits
++#else
++#define __READ_ONLY   .section        ".data..mostly","aw",%progbits
++#endif
++
+ #ifndef __ASSEMBLY__
+ /*
+  * Used for initialization calls..
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index f8834f8..eb807a2 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -159,6 +159,12 @@ extern struct task_group root_task_group;
+ #define INIT_TASK_COMM "swapper"
++#ifdef CONFIG_X86
++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
++#else
++#define INIT_TASK_THREAD_INFO
++#endif
++
+ #ifdef CONFIG_RT_MUTEXES
+ # define INIT_RT_MUTEXES(tsk)                                         \
+       .pi_waiters = RB_ROOT,                                          \
+@@ -225,6 +231,7 @@ extern struct task_group root_task_group;
+       RCU_POINTER_INITIALIZER(cred, &init_cred),                      \
+       .comm           = INIT_TASK_COMM,                               \
+       .thread         = INIT_THREAD,                                  \
++      INIT_TASK_THREAD_INFO                                           \
+       .fs             = &init_fs,                                     \
+       .files          = &init_files,                                  \
+       .signal         = &init_signals,                                \
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index b6683f0..9c8f391 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -454,8 +454,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
+ struct softirq_action
+ {
+-      void    (*action)(struct softirq_action *);
+-};
++      void    (*action)(void);
++} __no_const;
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+@@ -469,7 +469,7 @@ static inline void do_softirq_own_stack(void)
+ }
+ #endif
+-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
++extern void open_softirq(int nr, void (*action)(void));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index a35fb8b..bceb84f 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -202,7 +202,7 @@ struct iommu_ops {
+       int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+       unsigned long pgsize_bitmap;
+-};
++} __do_const;
+ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE         1 /* Device added */
+ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE         2 /* Pre Device removed */
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 6230064..1ccafa4 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -190,7 +190,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+                   resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
+-static inline resource_size_t resource_size(const struct resource *res)
++static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
+ {
+       return res->end - res->start + 1;
+ }
+diff --git a/include/linux/ipc.h b/include/linux/ipc.h
+index 9d84942..12d5bdf 100644
+--- a/include/linux/ipc.h
++++ b/include/linux/ipc.h
+@@ -19,8 +19,8 @@ struct kern_ipc_perm
+       kuid_t          cuid;
+       kgid_t          cgid;
+       umode_t         mode; 
+-      unsigned long   seq;
++      unsigned long   seq __intentional_overflow(-1);
+       void            *security;
+-};
++} __randomize_layout;
+ #endif /* _LINUX_IPC_H */
+diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
+index d10e54f..c68f8af 100644
+--- a/include/linux/ipc_namespace.h
++++ b/include/linux/ipc_namespace.h
+@@ -60,7 +60,7 @@ struct ipc_namespace {
+       struct user_namespace *user_ns;
+       struct ns_common ns;
+-};
++} __randomize_layout;
+ extern struct ipc_namespace init_ipc_ns;
+ extern spinlock_t mq_lock;
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 0ac26c8..3bb92a3 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -408,7 +408,10 @@ struct irq_chip {
+       void            (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
+       unsigned long   flags;
+-};
++} __do_const;
++#ifndef _LINUX_IRQDOMAIN_H
++typedef struct irq_chip __no_const irq_chip_no_const;
++#endif
+ /*
+  * irq_chip specific flags
+diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
+index c78a892..124e0b7 100644
+--- a/include/linux/irqchip/mmp.h
++++ b/include/linux/irqchip/mmp.h
+@@ -1,6 +1,6 @@
+ #ifndef       __IRQCHIP_MMP_H
+ #define       __IRQCHIP_MMP_H
+-extern struct irq_chip icu_irq_chip;
++extern irq_chip_no_const icu_irq_chip;
+ #endif        /* __IRQCHIP_MMP_H */
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index b51beeb..72974cf 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -62,7 +62,7 @@ struct irq_desc {
+       unsigned int            irq_count;      /* For detecting broken IRQs */
+       unsigned long           last_unhandled; /* Aging timer for unhandled count */
+       unsigned int            irqs_unhandled;
+-      atomic_t                threads_handled;
++      atomic_unchecked_t      threads_handled;
+       int                     threads_handled_last;
+       raw_spinlock_t          lock;
+       struct cpumask          *percpu_enabled;
+diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
+index ffb8460..1ef1031 100644
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -38,6 +38,9 @@ struct device_node;
+ struct irq_domain;
+ struct of_device_id;
+ struct irq_chip;
++#ifndef _LINUX_IRQ_H
++typedef struct irq_chip __no_const irq_chip_no_const;
++#endif
+ struct irq_data;
+ struct cpumask;
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index dfaa1f4..a66f30d 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -676,7 +676,7 @@ struct transaction_s
+       /*
+        * How many handles used this transaction? [t_handle_lock]
+        */
+-      atomic_t                t_handle_count;
++      atomic_unchecked_t      t_handle_count;
+       /*
+        * This transaction is being forced and some process is
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index 5fdc553..766e169 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -284,19 +284,19 @@ extern unsigned long preset_lpj;
+ extern unsigned int jiffies_to_msecs(const unsigned long j);
+ extern unsigned int jiffies_to_usecs(const unsigned long j);
+-static inline u64 jiffies_to_nsecs(const unsigned long j)
++static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
+ {
+       return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
+ }
+-extern unsigned long __msecs_to_jiffies(const unsigned int m);
++extern unsigned long __msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
+ #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ /*
+  * HZ is equal to or smaller than 1000, and 1000 is a nice round
+  * multiple of HZ, divide with the factor between them, but round
+  * upwards:
+  */
+-static inline unsigned long _msecs_to_jiffies(const unsigned int m)
++static inline unsigned long __intentional_overflow(-1) _msecs_to_jiffies(const unsigned int m)
+ {
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+ }
+@@ -307,7 +307,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+  *
+  * But first make sure the multiplication result cannot overflow:
+  */
+-static inline unsigned long _msecs_to_jiffies(const unsigned int m)
++static inline unsigned long __intentional_overflow(-1) _msecs_to_jiffies(const unsigned int m)
+ {
+       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+@@ -318,7 +318,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+  * Generic case - multiply, round and divide. But first check that if
+  * we are doing a net multiplication, that we wouldn't overflow:
+  */
+-static inline unsigned long _msecs_to_jiffies(const unsigned int m)
++static inline unsigned long __intentional_overflow(-1) _msecs_to_jiffies(const unsigned int m)
+ {
+       if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+@@ -362,14 +362,14 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
+       }
+ }
+-extern unsigned long __usecs_to_jiffies(const unsigned int u);
++extern unsigned long __usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
+ #if !(USEC_PER_SEC % HZ)
+-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
++static inline unsigned long __intentional_overflow(-1) _usecs_to_jiffies(const unsigned int u)
+ {
+       return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+ }
+ #else
+-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
++static inline unsigned long __intentional_overflow(-1) _usecs_to_jiffies(const unsigned int u)
+ {
+       return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+               >> USEC_TO_HZ_SHR32;
+diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
+index 6883e19..d2c7746 100644
+--- a/include/linux/kallsyms.h
++++ b/include/linux/kallsyms.h
+@@ -15,7 +15,8 @@
+ struct module;
+-#ifdef CONFIG_KALLSYMS
++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /* Lookup the address for a symbol. Returns 0 if not found. */
+ unsigned long kallsyms_lookup_name(const char *name);
+@@ -40,7 +41,7 @@ extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
+ extern int sprint_backtrace(char *buffer, unsigned long address);
+ /* Look up a kernel symbol and print it to the kernel messages. */
+-extern void __print_symbol(const char *fmt, unsigned long address);
++extern __printf(1, 3) void __print_symbol(const char *fmt, unsigned long address, ...);
+ int lookup_symbol_name(unsigned long addr, char *symname);
+ int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+@@ -104,21 +105,26 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
+ }
+ /* Stupid that this does nothing, but I didn't create this mess. */
+-#define __print_symbol(fmt, addr)
++#define __print_symbol(fmt, addr, args...)
+ #endif /*CONFIG_KALLSYMS*/
++#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
++      arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
++extern unsigned long kallsyms_lookup_name(const char *name);
++extern __printf(1, 3) void __print_symbol(const char *fmt, unsigned long address, ...);
++extern int sprint_backtrace(char *buffer, unsigned long address);
++extern int sprint_symbol(char *buffer, unsigned long address);
++extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
++const char *kallsyms_lookup(unsigned long addr,
++                          unsigned long *symbolsize,
++                          unsigned long *offset,
++                          char **modname, char *namebuf);
++extern int kallsyms_lookup_size_offset(unsigned long addr,
++                                unsigned long *symbolsize,
++                                unsigned long *offset);
++#endif
+-/* This macro allows us to keep printk typechecking */
+-static __printf(1, 2)
+-void __check_printsym_format(const char *fmt, ...)
+-{
+-}
+-
+-static inline void print_symbol(const char *fmt, unsigned long addr)
+-{
+-      __check_printsym_format(fmt, "");
+-      __print_symbol(fmt, (unsigned long)
+-                     __builtin_extract_return_addr((void *)addr));
+-}
++#define print_symbol(fmt, addr) \
++      __print_symbol(fmt, addr, "")
+ static inline void print_ip_sym(unsigned long ip)
+ {
+diff --git a/include/linux/key-type.h b/include/linux/key-type.h
+index eaee981..d1d24c3 100644
+--- a/include/linux/key-type.h
++++ b/include/linux/key-type.h
+@@ -45,7 +45,7 @@ struct key_preparsed_payload {
+       size_t          datalen;        /* Raw datalen */
+       size_t          quotalen;       /* Quota length for proposed payload */
+       time_t          expiry;         /* Expiry time of key */
+-};
++} __randomize_layout;
+ typedef int (*request_key_actor_t)(struct key_construction *key,
+                                  const char *op, void *aux);
+@@ -150,7 +150,7 @@ struct key_type {
+       /* internal fields */
+       struct list_head        link;           /* link in types list */
+       struct lock_class_key   lock_class;     /* key->sem lock class */
+-};
++} __do_const __randomize_layout;
+ extern struct key_type key_type_keyring;
+diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
+index e465bb1..19f605fd 100644
+--- a/include/linux/kgdb.h
++++ b/include/linux/kgdb.h
+@@ -52,7 +52,7 @@ extern int kgdb_connected;
+ extern int kgdb_io_module_registered;
+ extern atomic_t                       kgdb_setting_breakpoint;
+-extern atomic_t                       kgdb_cpu_doing_single_step;
++extern atomic_unchecked_t     kgdb_cpu_doing_single_step;
+ extern struct task_struct     *kgdb_usethread;
+ extern struct task_struct     *kgdb_contthread;
+@@ -254,7 +254,7 @@ struct kgdb_arch {
+       void    (*correct_hw_break)(void);
+       void    (*enable_nmi)(bool on);
+-};
++} __do_const;
+ /**
+  * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
+@@ -279,7 +279,7 @@ struct kgdb_io {
+       void                    (*pre_exception) (void);
+       void                    (*post_exception) (void);
+       int                     is_console;
+-};
++} __do_const;
+ extern struct kgdb_arch               arch_kgdb_ops;
+diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
+index 4894c68..7824e6a 100644
+--- a/include/linux/kmemleak.h
++++ b/include/linux/kmemleak.h
+@@ -27,7 +27,7 @@
+ extern void kmemleak_init(void) __init;
+ extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+-                         gfp_t gfp) __ref;
++                         gfp_t gfp) __ref __size_overflow(2);
+ extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+                                 gfp_t gfp) __ref;
+ extern void kmemleak_free(const void *ptr) __ref;
+@@ -63,7 +63,7 @@ static inline void kmemleak_erase(void **ptr)
+ static inline void kmemleak_init(void)
+ {
+ }
+-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
++static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
+                                 gfp_t gfp)
+ {
+ }
+diff --git a/include/linux/kmod.h b/include/linux/kmod.h
+index fcfd2bf..e4f5edb 100644
+--- a/include/linux/kmod.h
++++ b/include/linux/kmod.h
+@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
+  * usually useless though. */
+ extern __printf(2, 3)
+ int __request_module(bool wait, const char *name, ...);
++extern __printf(3, 4)
++int ___request_module(bool wait, char *param_name, const char *name, ...);
+ #define request_module(mod...) __request_module(true, mod)
+ #define request_module_nowait(mod...) __request_module(false, mod)
+ #define try_then_request_module(x, mod...) \
+@@ -57,6 +59,9 @@ struct subprocess_info {
+       struct work_struct work;
+       struct completion *complete;
+       char *path;
++#ifdef CONFIG_GRKERNSEC
++      char *origpath;
++#endif
+       char **argv;
+       char **envp;
+       int wait;
+@@ -64,7 +69,7 @@ struct subprocess_info {
+       int (*init)(struct subprocess_info *info, struct cred *new);
+       void (*cleanup)(struct subprocess_info *info);
+       void *data;
+-};
++} __randomize_layout;
+ extern int
+ call_usermodehelper(char *path, char **argv, char **envp, int wait);
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index e628459..9d45d56 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -119,7 +119,7 @@ struct kobj_type {
+       struct attribute **default_attrs;
+       const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
+       const void *(*namespace)(struct kobject *kobj);
+-};
++} __do_const;
+ struct kobj_uevent_env {
+       char *argv[3];
+@@ -143,6 +143,14 @@ struct kobj_attribute {
+       ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+                        const char *buf, size_t count);
+ };
++typedef struct kobj_attribute __no_const kobj_attribute_no_const;
++
++#define KOBJECT_ATTR(_name, _mode, _show, _store) \
++      struct kobj_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
++#define KOBJECT_ATTR_RW(_name) \
++      struct kobj_attribute dev_attr_##_name = __ATTR_RW(_name)
++#define KOBJECT_ATTR_RO(_name) \
++      struct kobj_attribute dev_attr_##_name = __ATTR_RO(_name)
+ extern const struct sysfs_ops kobj_sysfs_ops;
+@@ -170,7 +178,7 @@ struct kset {
+       spinlock_t list_lock;
+       struct kobject kobj;
+       const struct kset_uevent_ops *uevent_ops;
+-};
++} __randomize_layout;
+ extern void kset_init(struct kset *kset);
+ extern int __must_check kset_register(struct kset *kset);
+diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
+index df32d25..fb52e27 100644
+--- a/include/linux/kobject_ns.h
++++ b/include/linux/kobject_ns.h
+@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
+       const void *(*netlink_ns)(struct sock *sk);
+       const void *(*initial_ns)(void);
+       void (*drop_ns)(void *);
+-};
++} __do_const;
+ int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+ int kobj_ns_type_registered(enum kobj_ns_type type);
+diff --git a/include/linux/kref.h b/include/linux/kref.h
+index e15828f..531fd0a 100644
+--- a/include/linux/kref.h
++++ b/include/linux/kref.h
+@@ -67,7 +67,7 @@ static inline void kref_get(struct kref *kref)
+ static inline int kref_sub(struct kref *kref, unsigned int count,
+            void (*release)(struct kref *kref))
+ {
+-      WARN_ON(release == NULL);
++      BUG_ON(release == NULL);
+       if (atomic_sub_and_test((int) count, &kref->refcount)) {
+               release(kref);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index e37d4f9..0a24569 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -997,7 +997,7 @@ struct ata_port_operations {
+        * fields must be pointers.
+        */
+       const struct ata_port_operations        *inherits;
+-};
++} __do_const;
+ struct ata_port_info {
+       unsigned long           flags;
+diff --git a/include/linux/linkage.h b/include/linux/linkage.h
+index a6a42dd..9787403 100644
+--- a/include/linux/linkage.h
++++ b/include/linux/linkage.h
+@@ -5,6 +5,7 @@
+ #include <linux/stringify.h>
+ #include <linux/export.h>
+ #include <asm/linkage.h>
++#include <asm/bitsperlong.h>
+ /* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+ #ifndef ASM_NL
+@@ -36,6 +37,7 @@
+ #endif
+ #define __page_aligned_data   __section(.data..page_aligned) __aligned(PAGE_SIZE)
++#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
+ #define __page_aligned_bss    __section(.bss..page_aligned) __aligned(PAGE_SIZE)
+ /*
+@@ -79,17 +81,40 @@
+ #define ALIGN_STR __ALIGN_STR
+ #ifndef ENTRY
+-#define ENTRY(name) \
++#define __ENTRY(name, rap_hash) \
+       .globl name ASM_NL \
+       ALIGN ASM_NL \
++      rap_hash \
+       name:
++
++#define ENTRY(name) __ENTRY(name,)
++
+ #endif
++
+ #endif /* LINKER_SCRIPT */
+ #ifndef WEAK
+-#define WEAK(name)       \
+-      .weak name ASM_NL   \
++#define __WEAK(name, rap_hash) \
++      .weak name ASM_NL \
++      rap_hash \
+       name:
++
++#define WEAK(name) __WEAK(name, )
++#endif
++
++#ifdef CONFIG_PAX_RAP
++#if BITS_PER_LONG == 64
++#define __ASM_RAP_HASH(hash) .quad 0, hash ASM_NL
++#elif BITS_PER_LONG == 32
++#define __ASM_RAP_HASH(hash) .long 0, 0, 0, hash ASM_NL
++#else
++#error incompatible BITS_PER_LONG
++#endif
++#define RAP_ENTRY(name) __ENTRY(name, __ASM_RAP_HASH(__rap_hash_##name))
++#define RAP_WEAK(name) __WEAK(name, __ASM_RAP_HASH(__rap_hash_##name))
++#else
++#define RAP_ENTRY(name) ENTRY(name)
++#define RAP_WEAK(name) WEAK(name)
+ #endif
+ #ifndef END
+diff --git a/include/linux/list.h b/include/linux/list.h
+index 5183138..645f33d 100644
+--- a/include/linux/list.h
++++ b/include/linux/list.h
+@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
+ extern void list_del(struct list_head *entry);
+ #endif
++extern void __pax_list_add(struct list_head *new,
++                            struct list_head *prev,
++                            struct list_head *next);
++static inline void pax_list_add(struct list_head *new, struct list_head *head)
++{
++      __pax_list_add(new, head, head->next);
++}
++static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
++{
++      __pax_list_add(new, head->prev, head);
++}
++extern void pax_list_del(struct list_head *entry);
++
+ /**
+  * list_replace - replace old entry by new one
+  * @old : the element to be replaced
+@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
+       INIT_LIST_HEAD(entry);
+ }
++extern void pax_list_del_init(struct list_head *entry);
++
+ /**
+  * list_move - delete from one list and add as another's head
+  * @list: the entry to move
+diff --git a/include/linux/llist.h b/include/linux/llist.h
+index fd4ca0b..d77d4a8 100644
+--- a/include/linux/llist.h
++++ b/include/linux/llist.h
+@@ -168,6 +168,10 @@ static inline struct llist_node *llist_next(struct llist_node *node)
+ extern bool llist_add_batch(struct llist_node *new_first,
+                           struct llist_node *new_last,
+                           struct llist_head *head);
++
++extern bool pax_llist_add_batch(struct llist_node *new_first,
++                              struct llist_node *new_last,
++                              struct llist_head *head);
+ /**
+  * llist_add - add a new entry
+  * @new:      new entry to be added
+@@ -180,6 +184,11 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head)
+       return llist_add_batch(new, new, head);
+ }
++static inline bool pax_llist_add(struct llist_node *new, struct llist_head *head)
++{
++      return pax_llist_add_batch(new, new, head);
++}
++
+ /**
+  * llist_del_all - delete all entries from lock-less list
+  * @head:     the head of lock-less list to delete all entries
+diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
+index d39ed1c..8b5d98f 100644
+--- a/include/linux/lockd/xdr.h
++++ b/include/linux/lockd/xdr.h
+@@ -95,24 +95,24 @@ struct nlm_reboot {
+  */
+ #define NLMSVC_XDRSIZE                sizeof(struct nlm_args)
+-int   nlmsvc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlmsvc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlmsvc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlmsvc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlmsvc_encode_void(struct svc_rqst *, __be32 *, void *);
+-int   nlmsvc_decode_void(struct svc_rqst *, __be32 *, void *);
+-int   nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlmsvc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlmsvc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlmsvc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
++int   nlmsvc_decode_testargs(void *, __be32 *, void *);
++int   nlmsvc_encode_testres(void *, __be32 *, void *);
++int   nlmsvc_decode_lockargs(void *, __be32 *, void *);
++int   nlmsvc_decode_cancargs(void *, __be32 *, void *);
++int   nlmsvc_decode_unlockargs(void *, __be32 *, void *);
++int   nlmsvc_encode_res(void *, __be32 *, void *);
++int   nlmsvc_decode_res(void *, __be32 *, void *);
++int   nlmsvc_encode_void(void *, __be32 *p, void *);
++int   nlmsvc_decode_void(void *, __be32 *, void *);
++int   nlmsvc_decode_shareargs(void *, __be32 *, void *);
++int   nlmsvc_encode_shareres(void *, __be32 *, void *);
++int   nlmsvc_decode_notify(void *, __be32 *, void *);
++int   nlmsvc_decode_reboot(void *, __be32 *, void *);
+ /*
+-int   nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+-int   nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+-int   nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+-int   nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
++int   nlmclt_encode_testargs(void *, u32 *, void *);
++int   nlmclt_encode_lockargs(void *, u32 *, void *);
++int   nlmclt_encode_cancargs(void *, u32 *, void *);
++int   nlmclt_encode_unlockargs(void *, u32 *, void *);
+  */
+ #endif /* LOCKD_XDR_H */
+diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
+index e58c88b..759ca71 100644
+--- a/include/linux/lockd/xdr4.h
++++ b/include/linux/lockd/xdr4.h
+@@ -23,24 +23,24 @@
+-int   nlm4svc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlm4svc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlm4svc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlm4svc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlm4svc_encode_void(struct svc_rqst *, __be32 *, void *);
+-int   nlm4svc_decode_void(struct svc_rqst *, __be32 *, void *);
+-int   nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlm4svc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
+-int   nlm4svc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
+-int   nlm4svc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
++int   nlm4svc_decode_testargs(void *, __be32 *, void *);
++int   nlm4svc_encode_testres(void *, __be32 *, void *);
++int   nlm4svc_decode_lockargs(void *, __be32 *, void *);
++int   nlm4svc_decode_cancargs(void *, __be32 *, void *);
++int   nlm4svc_decode_unlockargs(void *, __be32 *, void *);
++int   nlm4svc_encode_res(void *, __be32 *, void *);
++int   nlm4svc_decode_res(void *, __be32 *, void *);
++int   nlm4svc_encode_void(void *, __be32 *, void *);
++int   nlm4svc_decode_void(void *, __be32 *, void *);
++int   nlm4svc_decode_shareargs(void *, __be32 *, void *);
++int   nlm4svc_encode_shareres(void *, __be32 *, void *);
++int   nlm4svc_decode_notify(void *, __be32 *, void *);
++int   nlm4svc_decode_reboot(void *, __be32 *, void *);
+ /*
+-int   nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+-int   nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+-int   nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+-int   nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
++int   nlmclt_encode_testargs(void *, u32 *, void *);
++int   nlmclt_encode_lockargs(void *, u32 *, void *);
++int   nlmclt_encode_cancargs(void *, u32 *, void *);
++int   nlmclt_encode_unlockargs(void *, u32 *, void *);
+  */
+ extern const struct rpc_version nlm_version4;
+diff --git a/include/linux/lockref.h b/include/linux/lockref.h
+index b10b122..d37b3de 100644
+--- a/include/linux/lockref.h
++++ b/include/linux/lockref.h
+@@ -28,7 +28,7 @@ struct lockref {
+ #endif
+               struct {
+                       spinlock_t lock;
+-                      int count;
++                      atomic_t count;
+               };
+       };
+ };
+@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
+ extern int lockref_get_not_dead(struct lockref *);
+ /* Must be called under spinlock for reliable results */
+-static inline int __lockref_is_dead(const struct lockref *l)
++static inline int __lockref_is_dead(const struct lockref *lockref)
+ {
+-      return ((int)l->count < 0);
++      return atomic_read(&lockref->count) < 0;
++}
++
++static inline int __lockref_read(const struct lockref *lockref)
++{
++      return atomic_read(&lockref->count);
++}
++
++static inline void __lockref_set(struct lockref *lockref, int count)
++{
++      atomic_set(&lockref->count, count);
++}
++
++static inline void __lockref_inc(struct lockref *lockref)
++{
++      atomic_inc(&lockref->count);
++}
++
++static inline void __lockref_dec(struct lockref *lockref)
++{
++      atomic_dec(&lockref->count);
+ }
+ #endif /* __LINUX_LOCKREF_H */
+diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
+index 101bf19..feb307e 100644
+--- a/include/linux/lsm_hooks.h
++++ b/include/linux/lsm_hooks.h
+@@ -1831,7 +1831,7 @@ struct security_hook_heads {
+       struct list_head audit_rule_match;
+       struct list_head audit_rule_free;
+ #endif /* CONFIG_AUDIT */
+-};
++} __randomize_layout;
+ /*
+  * Security module hook list structure.
+@@ -1841,7 +1841,7 @@ struct security_hook_list {
+       struct list_head                list;
+       struct list_head                *head;
+       union security_list_options     hook;
+-};
++} __randomize_layout;
+ /*
+  * Initializing a security_hook_list structure takes
+diff --git a/include/linux/math64.h b/include/linux/math64.h
+index 6e8b5b2..8e8a37d 100644
+--- a/include/linux/math64.h
++++ b/include/linux/math64.h
+@@ -15,7 +15,7 @@
+  * This is commonly provided by 32bit archs to provide an optimized 64bit
+  * divide.
+  */
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+       *remainder = dividend % divisor;
+       return dividend / divisor;
+@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+ /**
+  * div64_u64 - unsigned 64bit divide with 64bit divisor
+  */
+-static inline u64 div64_u64(u64 dividend, u64 divisor)
++static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
+ {
+       return dividend / divisor;
+ }
+@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
+ #define div64_ul(x, y)   div_u64((x), (y))
+ #ifndef div_u64_rem
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+       *remainder = do_div(dividend, divisor);
+       return dividend;
+@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+ #endif
+ #ifndef div64_u64
+-extern u64 div64_u64(u64 dividend, u64 divisor);
++extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
+ #endif
+ #ifndef div64_s64
+@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
+  * divide.
+  */
+ #ifndef div_u64
+-static inline u64 div_u64(u64 dividend, u32 divisor)
++static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
+ {
+       u32 remainder;
+       return div_u64_rem(dividend, divisor, &remainder);
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index 5d8ca6e..0b2174b 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -258,7 +258,7 @@ struct mem_cgroup {
+       int last_scanned_node;
+ #if MAX_NUMNODES > 1
+       nodemask_t      scan_nodes;
+-      atomic_t        numainfo_events;
++      atomic64_t      numainfo_events;
+       atomic_t        numainfo_updating;
+ #endif
+diff --git a/include/linux/memory.h b/include/linux/memory.h
+index 093607f..9717227 100644
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -126,7 +126,7 @@ extern struct memory_block *find_memory_block(struct mem_section *);
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ #define hotplug_memory_notifier(fn, pri) ({           \
+-      static __meminitdata struct notifier_block fn##_mem_nb =\
++      static __meminitconst struct notifier_block fn##_mem_nb =\
+               { .notifier_call = fn, .priority = pri };\
+       register_memory_notifier(&fn##_mem_nb);                 \
+ })
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 5e5b296..629113f 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
+ }
+ #define vma_policy(vma) ((vma)->vm_policy)
++static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
++{
++      vma->vm_policy = pol;
++}
+ static inline void mpol_get(struct mempolicy *pol)
+ {
+@@ -236,6 +240,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+ }
+ #define vma_policy(vma) NULL
++static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
++{
++}
+ static inline int
+ vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 277cd39..27ecb26 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -107,6 +107,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
+ #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+ extern unsigned long sysctl_user_reserve_kbytes;
+ extern unsigned long sysctl_admin_reserve_kbytes;
+@@ -182,6 +183,11 @@ extern unsigned int kobjsize(const void *objp);
+ #define VM_ACCOUNT    0x00100000      /* Is a VM accounted object */
+ #define VM_NORESERVE  0x00200000      /* should the VM suppress accounting */
+ #define VM_HUGETLB    0x00400000      /* Huge TLB Page VM */
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++#define VM_PAGEEXEC   0x00800000      /* vma->vm_page_prot needs special handling */
++#endif
++
+ #define VM_ARCH_1     0x01000000      /* Architecture-specific flag */
+ #define VM_ARCH_2     0x02000000
+ #define VM_DONTDUMP   0x04000000      /* Do not include in the core dump */
+@@ -364,8 +370,8 @@ struct vm_operations_struct {
+       /* called by access_process_vm when get_user_pages() fails, typically
+        * for use by special VMAs that can switch between memory and hardware
+        */
+-      int (*access)(struct vm_area_struct *vma, unsigned long addr,
+-                    void *buf, int len, int write);
++      ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
++                    void *buf, size_t len, int write);
+       /* Called by the /proc/PID/maps code to ask the vma whether it
+        * has a special name.  Returning non-NULL will also cause this
+@@ -403,6 +409,7 @@ struct vm_operations_struct {
+       struct page *(*find_special_page)(struct vm_area_struct *vma,
+                                         unsigned long addr);
+ };
++typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
+ struct mmu_gather;
+ struct inode;
+@@ -1237,8 +1244,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+               unsigned int flags, unsigned long *prot, resource_size_t *phys);
+-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+-                      void *buf, int len, int write);
++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
++                      void *buf, size_t len, int write);
+ static inline void unmap_shared_mapping_range(struct address_space *mapping,
+               loff_t const holebegin, loff_t const holelen)
+@@ -1278,9 +1285,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+ }
+ #endif
+-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+-              void *buf, int len, int write);
++extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
++extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++              void *buf, size_t len, int write);
+ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                     unsigned long start, unsigned long nr_pages,
+@@ -1370,39 +1377,11 @@ int clear_page_dirty_for_io(struct page *page);
+ int get_cmdline(struct task_struct *task, char *buffer, int buflen);
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+-{
+-      return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+ {
+       return !vma->vm_ops;
+ }
+-static inline int stack_guard_page_start(struct vm_area_struct *vma,
+-                                           unsigned long addr)
+-{
+-      return (vma->vm_flags & VM_GROWSDOWN) &&
+-              (vma->vm_start == addr) &&
+-              !vma_growsdown(vma->vm_prev, addr);
+-}
+-
+-/* Is the vma a continuation of the stack vma below it? */
+-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+-{
+-      return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+-}
+-
+-static inline int stack_guard_page_end(struct vm_area_struct *vma,
+-                                         unsigned long addr)
+-{
+-      return (vma->vm_flags & VM_GROWSUP) &&
+-              (vma->vm_end == addr) &&
+-              !vma_growsup(vma->vm_next, addr);
+-}
+-
+ int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
+ extern unsigned long move_page_tables(struct vm_area_struct *vma,
+@@ -1547,8 +1526,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+ {
+       return 0;
+ }
++
++static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
++                                              unsigned long address)
++{
++      return 0;
++}
+ #else
+ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+ #endif
+ #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
+@@ -1558,6 +1544,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+       return 0;
+ }
++static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
++                                              unsigned long address)
++{
++      return 0;
++}
++
+ static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
+ static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+@@ -1570,6 +1562,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+ #else
+ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
+ static inline void mm_nr_pmds_init(struct mm_struct *mm)
+ {
+@@ -1606,11 +1599,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+               NULL: pud_offset(pgd, address);
+ }
++static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++      return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
++              NULL: pud_offset(pgd, address);
++}
++
+ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+ {
+       return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+               NULL: pmd_offset(pud, address);
+ }
++
++static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++      return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
++              NULL: pmd_offset(pud, address);
++}
+ #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+ #if USE_SPLIT_PTE_PTLOCKS
+@@ -1995,12 +2000,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+       bool *need_rmap_locks);
+ extern void exit_mmap(struct mm_struct *);
++#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
++extern void gr_learn_resource(const struct task_struct *task, const int res,
++                            const unsigned long wanted, const int gt);
++#else
++static inline void gr_learn_resource(const struct task_struct *task, const int res,
++                                   const unsigned long wanted, const int gt)
++{
++}
++#endif
++
+ static inline int check_data_rlimit(unsigned long rlim,
+                                   unsigned long new,
+                                   unsigned long start,
+                                   unsigned long end_data,
+                                   unsigned long start_data)
+ {
++      gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
+       if (rlim < RLIM_INFINITY) {
+               if (((new - start) + (end_data - start_data)) > rlim)
+                       return -ENOSPC;
+@@ -2036,6 +2052,7 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot, unsigned long flags,
+       vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
+ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
+ static inline unsigned long
+ do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -2072,10 +2089,11 @@ struct vm_unmapped_area_info {
+       unsigned long high_limit;
+       unsigned long align_mask;
+       unsigned long align_offset;
++      unsigned long threadstack_offset;
+ };
+-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
+-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
++extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
++extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
+ /*
+  * Search for an unmapped address range.
+@@ -2087,7 +2105,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
+  */
+ static inline unsigned long
+-vm_unmapped_area(struct vm_unmapped_area_info *info)
++vm_unmapped_area(const struct vm_unmapped_area_info *info)
+ {
+       if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
+               return unmapped_area_topdown(info);
+@@ -2148,6 +2166,9 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+                                            struct vm_area_struct **pprev);
++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
++
+ /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+    NULL if none.  Assume start_addr < end_addr. */
+ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+@@ -2177,10 +2198,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+ }
+ #ifdef CONFIG_MMU
+-pgprot_t vm_get_page_prot(unsigned long vm_flags);
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
+ void vma_set_page_prot(struct vm_area_struct *vma);
+ #else
+-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
+ {
+       return __pgprot(0);
+ }
+@@ -2366,7 +2387,7 @@ extern int get_hwpoison_page(struct page *page);
+ extern int sysctl_memory_failure_early_kill;
+ extern int sysctl_memory_failure_recovery;
+ extern void shake_page(struct page *p, int access);
+-extern atomic_long_t num_poisoned_pages;
++extern atomic_long_unchecked_t num_poisoned_pages;
+ extern int soft_offline_page(struct page *page, int flags);
+@@ -2454,5 +2475,11 @@ void __init setup_nr_node_ids(void);
+ static inline void setup_nr_node_ids(void) {}
+ #endif
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
++#else
++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 903200f..c868416 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -358,7 +358,9 @@ struct vm_area_struct {
+       struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
+ #endif
+       struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+-};
++
++      struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
++} __randomize_layout;
+ struct core_thread {
+       struct task_struct *task;
+@@ -518,7 +520,25 @@ struct mm_struct {
+ #ifdef CONFIG_MMU
+       struct work_struct async_put_work;
+ #endif
+-};
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++      unsigned long pax_flags;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++      unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++      unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++      unsigned long delta_mmap;               /* randomized offset */
++      unsigned long delta_stack;              /* randomized offset */
++#endif
++
++} __randomize_layout;
+ static inline void mm_init_cpumask(struct mm_struct *mm)
+ {
+diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
+index 3ba327a..85cd5ce 100644
+--- a/include/linux/mmiotrace.h
++++ b/include/linux/mmiotrace.h
+@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
+ /* Called from ioremap.c */
+ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+                                                       void __iomem *addr);
+-extern void mmiotrace_iounmap(volatile void __iomem *addr);
++extern void mmiotrace_iounmap(const volatile void __iomem *addr);
+ /* For anyone to insert markers. Remember trailing newline. */
+ extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
+@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
+ {
+ }
+-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
++static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+ }
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 7f2ae99..27ca9cf 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -517,7 +517,7 @@ struct zone {
+       ZONE_PADDING(_pad3_)
+       /* Zone statistics */
+-      atomic_long_t           vm_stat[NR_VM_ZONE_STAT_ITEMS];
++      atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+ } ____cacheline_internodealigned_in_smp;
+ enum pgdat_flags {
+@@ -721,7 +721,7 @@ typedef struct pglist_data {
+       /* Per-node vmstats */
+       struct per_cpu_nodestat __percpu *per_cpu_nodestats;
+-      atomic_long_t           vm_stat[NR_VM_NODE_STAT_ITEMS];
++      atomic_long_unchecked_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+ } pg_data_t;
+ #define node_present_pages(nid)       (NODE_DATA(nid)->node_present_pages)
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index ed84c07..c29bce4 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -139,7 +139,7 @@ struct usb_device_id {
+ #define USB_DEVICE_ID_MATCH_INT_PROTOCOL      0x0200
+ #define USB_DEVICE_ID_MATCH_INT_NUMBER                0x0400
+-#define HID_ANY_ID                            (~0)
++#define HID_ANY_ID                            (~0U)
+ #define HID_BUS_ANY                           0xffff
+ #define HID_GROUP_ANY                         0x0000
+@@ -480,7 +480,7 @@ struct dmi_system_id {
+       const char *ident;
+       struct dmi_strmatch matches[4];
+       void *driver_data;
+-};
++} __do_const;
+ /*
+  * struct dmi_device_id appears during expansion of
+  * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 0c3207d..18808a5 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -20,9 +20,11 @@
+ #include <linux/export.h>
+ #include <linux/extable.h>    /* only as arch move module.h -> extable.h */
+ #include <linux/rbtree_latch.h>
++#include <linux/fs.h>
+ #include <linux/percpu.h>
+ #include <asm/module.h>
++#include <asm/pgtable.h>
+ /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
+ #define MODULE_SIG_STRING "~Module signature appended~\n"
+@@ -46,7 +48,7 @@ struct module_kobject {
+       struct kobject *drivers_dir;
+       struct module_param_attrs *mp;
+       struct completion *kobj_completion;
+-};
++} __randomize_layout;
+ struct module_attribute {
+       struct attribute attr;
+@@ -58,12 +60,13 @@ struct module_attribute {
+       int (*test)(struct module *);
+       void (*free)(struct module *);
+ };
++typedef struct module_attribute __no_const module_attribute_no_const;
+ struct module_version_attribute {
+       struct module_attribute mattr;
+       const char *module_name;
+       const char *version;
+-} __attribute__ ((__aligned__(sizeof(void *))));
++} __do_const __attribute__ ((__aligned__(sizeof(void *))));
+ extern ssize_t __modver_version_show(struct module_attribute *,
+                                    struct module_kobject *, char *);
+@@ -290,19 +293,18 @@ struct mod_tree_node {
+ };
+ struct module_layout {
+-      /* The actual code + data. */
+-      void *base;
+-      /* Total size. */
+-      unsigned int size;
+-      /* The size of the executable code.  */
+-      unsigned int text_size;
+-      /* Size of RO section of the module (text+rodata) */
+-      unsigned int ro_size;
+-      /* Size of RO after init section */
+-      unsigned int ro_after_init_size;
++      /* The actual code. */
++      void *base_rx;
++      /* The actual data. */
++      void *base_rw;
++      /* Code size. */
++      unsigned int size_rx;
++      /* Data size. */
++      unsigned int size_rw;
+ #ifdef CONFIG_MODULES_TREE_LOOKUP
+-      struct mod_tree_node mtn;
++      struct mod_tree_node mtn_rx;
++      struct mod_tree_node mtn_rw;
+ #endif
+ };
+@@ -339,7 +341,7 @@ struct module {
+       /* Sysfs stuff. */
+       struct module_kobject mkobj;
+-      struct module_attribute *modinfo_attrs;
++      module_attribute_no_const *modinfo_attrs;
+       const char *version;
+       const char *srcversion;
+       struct kobject *holders_dir;
+@@ -447,6 +449,10 @@ struct module {
+       unsigned int num_trace_events;
+       struct trace_enum_map **trace_enums;
+       unsigned int num_trace_enums;
++      struct file_operations trace_id;
++      struct file_operations trace_enable;
++      struct file_operations trace_format;
++      struct file_operations trace_filter;
+ #endif
+ #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+       unsigned int num_ftrace_callsites;
+@@ -478,7 +484,8 @@ struct module {
+       ctor_fn_t *ctors;
+       unsigned int num_ctors;
+ #endif
+-} ____cacheline_aligned;
++} ____cacheline_aligned __randomize_layout;
++
+ #ifndef MODULE_ARCH_INIT
+ #define MODULE_ARCH_INIT {}
+ #endif
+@@ -499,18 +506,38 @@ bool is_module_address(unsigned long addr);
+ bool is_module_percpu_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
++static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++      if (ktla_ktva(addr) >= (unsigned long)start &&
++          ktla_ktva(addr) < (unsigned long)start + size)
++              return 1;
++#endif
++
++      return ((void *)addr >= start && (void *)addr < start + size);
++}
++
++static inline int within_module_rx(unsigned long addr, const struct module_layout *layout)
++{
++      return within_module_range(addr, layout->base_rx, layout->size_rx);
++}
++
++static inline int within_module_rw(unsigned long addr, const struct module_layout *layout)
++{
++      return within_module_range(addr, layout->base_rw, layout->size_rw);
++}
++
+ static inline bool within_module_core(unsigned long addr,
+                                     const struct module *mod)
+ {
+-      return (unsigned long)mod->core_layout.base <= addr &&
+-             addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
++      return within_module_rx(addr, &mod->core_layout) || within_module_rw(addr, &mod->core_layout);
+ }
+ static inline bool within_module_init(unsigned long addr,
+                                     const struct module *mod)
+ {
+-      return (unsigned long)mod->init_layout.base <= addr &&
+-             addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
++      return within_module_rx(addr, &mod->init_layout) || within_module_rw(addr, &mod->init_layout);
+ }
+ static inline bool within_module(unsigned long addr, const struct module *mod)
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index 4d0cb9b..3169ac7 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
+    sections.  Returns NULL on failure. */
+ void *module_alloc(unsigned long size);
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc_exec(unsigned long size);
++#else
++#define module_alloc_exec(x) module_alloc(x)
++#endif
++
+ /* Free memory returned from module_alloc. */
+ void module_memfree(void *module_region);
++#ifdef CONFIG_PAX_KERNEXEC
++void module_memfree_exec(void *module_region);
++#else
++#define module_memfree_exec(x) module_memfree((x))
++#endif
++
+ /*
+  * Apply the given relocation to the (simplified) ELF.  Return -error
+  * or 0.
+@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
+                                unsigned int relsec,
+                                struct module *me)
+ {
++#ifdef CONFIG_MODULES
+       printk(KERN_ERR "module %s: REL relocation unsupported\n",
+              module_name(me));
++#endif
+       return -ENOEXEC;
+ }
+ #endif
+@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
+                                    unsigned int relsec,
+                                    struct module *me)
+ {
++#ifdef CONFIG_MODULES
+       printk(KERN_ERR "module %s: REL relocation unsupported\n",
+              module_name(me));
++#endif
+       return -ENOEXEC;
+ }
+ #endif
+diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
+index 52666d9..f10563b 100644
+--- a/include/linux/moduleparam.h
++++ b/include/linux/moduleparam.h
+@@ -54,7 +54,7 @@ struct kernel_param_ops {
+       int (*get)(char *buffer, const struct kernel_param *kp);
+       /* Optional function to free kp->arg when module unloaded. */
+       void (*free)(void *arg);
+-};
++} __do_const;
+ /*
+  * Flags available for kernel_param
+@@ -226,15 +226,15 @@ struct kparam_array
+ /* Obsolete - use module_param_cb() */
+ #define module_param_call(name, set, get, arg, perm)                  \
+-      static const struct kernel_param_ops __param_ops_##name =               \
+-              { .flags = 0, (void *)set, (void *)get };               \
++      static const struct kernel_param_ops __param_ops_##name =       \
++              { .flags = 0, set, get };                               \
+       __module_param_call(MODULE_PARAM_PREFIX,                        \
+                           name, &__param_ops_##name, arg,             \
+                           (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
+ /* We don't get oldget: it's often a new-style param_get_uint, etc. */
+ static inline int
+-__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
++__check_old_set_param(int (*oldset)(const char *, const struct kernel_param *))
+ {
+       return 0;
+ }
+@@ -289,7 +289,7 @@ static inline void kernel_param_unlock(struct module *mod)
+  * @len is usually just sizeof(string).
+  */
+ #define module_param_string(name, string, len, perm)                  \
+-      static const struct kparam_string __param_string_##name         \
++      static const struct kparam_string __param_string_##name __used  \
+               = { len, string };                                      \
+       __module_param_call(MODULE_PARAM_PREFIX, name,                  \
+                           &param_ops_string,                          \
+@@ -441,7 +441,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
+  */
+ #define module_param_array_named(name, array, type, nump, perm)               \
+       param_check_##type(name, &(array)[0]);                          \
+-      static const struct kparam_array __param_arr_##name             \
++      static const struct kparam_array __param_arr_##name __used      \
+       = { .max = ARRAY_SIZE(array), .num = nump,                      \
+           .ops = &param_ops_##type,                                   \
+           .elemsize = sizeof(array[0]), .elem = array };              \
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index 54a594d..1f7fa02 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -67,7 +67,7 @@ struct vfsmount {
+       struct dentry *mnt_root;        /* root of the mounted tree */
+       struct super_block *mnt_sb;     /* pointer to superblock */
+       int mnt_flags;
+-};
++} __randomize_layout;
+ struct file; /* forward dec */
+ struct path;
+diff --git a/include/linux/msg.h b/include/linux/msg.h
+index f3f302f..a001305 100644
+--- a/include/linux/msg.h
++++ b/include/linux/msg.h
+@@ -29,7 +29,7 @@ struct msg_queue {
+       struct list_head q_messages;
+       struct list_head q_receivers;
+       struct list_head q_senders;
+-};
++} __randomize_layout;
+ /* Helper routines for sys_msgsnd and sys_msgrcv */
+ extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
+diff --git a/include/linux/net.h b/include/linux/net.h
+index b9f0ff4..fd3f501 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -196,7 +196,7 @@ struct net_proto_family {
+       int             (*create)(struct net *net, struct socket *sock,
+                                 int protocol, int kern);
+       struct module   *owner;
+-};
++} __do_const;
+ struct iovec;
+ struct kvec;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index e8d79d4..d9519a7 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1307,6 +1307,7 @@ struct net_device_ops {
+       int                     (*ndo_xdp)(struct net_device *dev,
+                                          struct netdev_xdp *xdp);
+ };
++typedef struct net_device_ops __no_const net_device_ops_no_const;
+ /**
+  * enum net_device_priv_flags - &struct net_device priv_flags
+@@ -1636,7 +1637,7 @@ struct net_device {
+       unsigned long           base_addr;
+       int                     irq;
+-      atomic_t                carrier_changes;
++      atomic_unchecked_t      carrier_changes;
+       /*
+        *      Some hardware also needs these fields (state,dev_list,
+@@ -1676,9 +1677,9 @@ struct net_device {
+       struct net_device_stats stats;
+-      atomic_long_t           rx_dropped;
+-      atomic_long_t           tx_dropped;
+-      atomic_long_t           rx_nohandler;
++      atomic_long_unchecked_t rx_dropped;
++      atomic_long_unchecked_t tx_dropped;
++      atomic_long_unchecked_t rx_nohandler;
+ #ifdef CONFIG_WIRELESS_EXT
+       const struct iw_handler_def *wireless_handlers;
+@@ -4218,7 +4219,7 @@ static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
+       return dev->priv_flags & IFF_MACSEC;
+ }
+-extern struct pernet_operations __net_initdata loopback_net_ops;
++extern struct pernet_operations __net_initconst loopback_net_ops;
+ /* Logging, debugging and troubleshooting/diagnostic helpers. */
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index 9230f9a..065b8f8 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -119,7 +119,7 @@ struct nf_sockopt_ops {
+ #endif
+       /* Use the module struct to lock set/get code in place */
+       struct module *owner;
+-};
++} __do_const;
+ /* Function to register/unregister hook points. */
+ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index 83b9a2e..5266f3b 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -104,8 +104,8 @@ struct ip_set_ext {
+ };
+ struct ip_set_counter {
+-      atomic64_t bytes;
+-      atomic64_t packets;
++      atomic64_unchecked_t bytes;
++      atomic64_unchecked_t packets;
+ };
+ struct ip_set_comment_rcu {
+@@ -297,25 +297,25 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+ static inline void
+ ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
+ {
+-      atomic64_add((long long)bytes, &(counter)->bytes);
++      atomic64_add_unchecked((long long)bytes, &(counter)->bytes);
+ }
+ static inline void
+ ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
+ {
+-      atomic64_add((long long)packets, &(counter)->packets);
++      atomic64_add_unchecked((long long)packets, &(counter)->packets);
+ }
+ static inline u64
+ ip_set_get_bytes(const struct ip_set_counter *counter)
+ {
+-      return (u64)atomic64_read(&(counter)->bytes);
++      return (u64)atomic64_read_unchecked(&(counter)->bytes);
+ }
+ static inline u64
+ ip_set_get_packets(const struct ip_set_counter *counter)
+ {
+-      return (u64)atomic64_read(&(counter)->packets);
++      return (u64)atomic64_read_unchecked(&(counter)->packets);
+ }
+ static inline void
+@@ -387,9 +387,9 @@ ip_set_init_counter(struct ip_set_counter *counter,
+                   const struct ip_set_ext *ext)
+ {
+       if (ext->bytes != ULLONG_MAX)
+-              atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
++              atomic64_set_unchecked(&(counter)->bytes, (long long)(ext->bytes));
+       if (ext->packets != ULLONG_MAX)
+-              atomic64_set(&(counter)->packets, (long long)(ext->packets));
++              atomic64_set_unchecked(&(counter)->packets, (long long)(ext->packets));
+ }
+ /* Netlink CB args */
+diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
+index 8d02485..a1e1aa5 100644
+--- a/include/linux/netfilter/ipset/ip_set_comment.h
++++ b/include/linux/netfilter/ipset/ip_set_comment.h
+@@ -58,8 +58,9 @@ ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+  * of the set data anymore.
+  */
+ static inline void
+-ip_set_comment_free(struct ip_set_comment *comment)
++ip_set_comment_free(void *_comment)
+ {
++      struct ip_set_comment *comment = _comment;
+       struct ip_set_comment_rcu *c;
+       c = rcu_dereference_protected(comment->c, 1);
+diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
+index 1d82dd5..d6b384c 100644
+--- a/include/linux/netfilter/nfnetlink.h
++++ b/include/linux/netfilter/nfnetlink.h
+@@ -19,7 +19,7 @@ struct nfnl_callback {
+                         const struct nlattr * const cda[]);
+       const struct nla_policy *policy;        /* netlink attribute policy */
+       const u_int16_t attr_count;             /* number of nlattr's */
+-};
++} __do_const;
+ struct nfnetlink_subsystem {
+       const char *name;
+diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
+new file mode 100644
+index 0000000..33f4af8
+--- /dev/null
++++ b/include/linux/netfilter/xt_gradm.h
+@@ -0,0 +1,9 @@
++#ifndef _LINUX_NETFILTER_XT_GRADM_H
++#define _LINUX_NETFILTER_XT_GRADM_H 1
++
++struct xt_gradm_mtinfo {
++      __u16 flags;
++      __u16 invflags;
++};
++
++#endif
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index da14ab6..874abff 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -150,19 +150,19 @@ struct netlink_dump_control {
+       void *data;
+       struct module *module;
+       u16 min_dump_alloc;
+-};
++} __do_const;
++typedef struct netlink_dump_control __no_const netlink_dump_control_no_const;
+ extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+-                              struct netlink_dump_control *control);
++                              struct netlink_dump_control *control,
++                              void *data,
++                              struct module *module);
+ static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+                                    const struct nlmsghdr *nlh,
+                                    struct netlink_dump_control *control)
+ {
+-      if (!control->module)
+-              control->module = THIS_MODULE;
+-
+-      return __netlink_dump_start(ssk, skb, nlh, control);
++      return __netlink_dump_start(ssk, skb, nlh, control, control->data, control->module ? : THIS_MODULE);
+ }
+ struct netlink_tap {
+diff --git a/include/linux/nls.h b/include/linux/nls.h
+index 520681b..2b7fabb 100644
+--- a/include/linux/nls.h
++++ b/include/linux/nls.h
+@@ -31,7 +31,7 @@ struct nls_table {
+       const unsigned char *charset2upper;
+       struct module *owner;
+       struct nls_table *next;
+-};
++} __do_const;
+ /* this value hold the maximum octet of charset */
+ #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
+@@ -46,7 +46,7 @@ enum utf16_endian {
+ /* nls_base.c */
+ extern int __register_nls(struct nls_table *, struct module *);
+ extern int unregister_nls(struct nls_table *);
+-extern struct nls_table *load_nls(char *);
++extern struct nls_table *load_nls(const char *);
+ extern void unload_nls(struct nls_table *);
+ extern struct nls_table *load_nls_default(void);
+ #define register_nls(nls) __register_nls((nls), THIS_MODULE)
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 4149868..0971cea 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -56,7 +56,8 @@ struct notifier_block {
+       notifier_fn_t notifier_call;
+       struct notifier_block __rcu *next;
+       int priority;
+-};
++} __do_const;
++typedef struct notifier_block __no_const notifier_block_no_const;
+ struct atomic_notifier_head {
+       spinlock_t lock;
+diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
+index b2a0f15..4d7da32 100644
+--- a/include/linux/oprofile.h
++++ b/include/linux/oprofile.h
+@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
+ int oprofilefs_create_ro_ulong(struct dentry * root,
+       char const * name, ulong * val);
+  
+-/** Create a file for read-only access to an atomic_t. */
++/** Create a file for read-only access to an atomic_unchecked_t. */
+ int oprofilefs_create_ro_atomic(struct dentry * root,
+-      char const * name, atomic_t * val);
++      char const * name, atomic_unchecked_t * val);
+  
+ /** create a directory */
+ struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 113ee62..70198a7 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -129,7 +129,7 @@ struct parallel_data {
+       struct padata_serial_queue      __percpu *squeue;
+       atomic_t                        reorder_objects;
+       atomic_t                        refcnt;
+-      atomic_t                        seq_nr;
++      atomic_unchecked_t              seq_nr;
+       struct padata_cpumask           cpumask;
+       spinlock_t                      lock ____cacheline_aligned;
+       unsigned int                    processed;
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 01e8443..3a4d158 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -215,7 +215,7 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
+                                 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
+ }
+-typedef int filler_t(void *, struct page *);
++typedef int filler_t(struct file *, struct page *);
+ pgoff_t page_cache_next_hole(struct address_space *mapping,
+                            pgoff_t index, unsigned long max_scan);
+@@ -359,7 +359,7 @@ extern int read_cache_pages(struct address_space *mapping,
+ static inline struct page *read_mapping_page(struct address_space *mapping,
+                               pgoff_t index, void *data)
+ {
+-      filler_t *filler = (filler_t *)mapping->a_ops->readpage;
++      filler_t *filler = mapping->a_ops->readpage;
+       return read_cache_page(mapping, index, filler, data);
+ }
+diff --git a/include/linux/path.h b/include/linux/path.h
+index d137218..be0c176 100644
+--- a/include/linux/path.h
++++ b/include/linux/path.h
+@@ -1,13 +1,15 @@
+ #ifndef _LINUX_PATH_H
+ #define _LINUX_PATH_H
++#include <linux/compiler.h>
++
+ struct dentry;
+ struct vfsmount;
+ struct path {
+       struct vfsmount *mnt;
+       struct dentry *dentry;
+-};
++} __randomize_layout;
+ extern void path_get(const struct path *);
+ extern void path_put(const struct path *);
+diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
+index 8c78950..0d74ed9 100644
+--- a/include/linux/pci_hotplug.h
++++ b/include/linux/pci_hotplug.h
+@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
+       int (*get_latch_status)         (struct hotplug_slot *slot, u8 *value);
+       int (*get_adapter_status)       (struct hotplug_slot *slot, u8 *value);
+       int (*reset_slot)               (struct hotplug_slot *slot, int probe);
+-};
++} __do_const;
++typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
+ /**
+  * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
+diff --git a/include/linux/percpu.h b/include/linux/percpu.h
+index 56939d3..7fb18e3 100644
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -28,7 +28,7 @@
+  * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
+  * larger than PERCPU_DYNAMIC_EARLY_SIZE.
+  */
+-#define PERCPU_DYNAMIC_EARLY_SLOTS    128
++#define PERCPU_DYNAMIC_EARLY_SLOTS    256
+ #define PERCPU_DYNAMIC_EARLY_SIZE     (12 << 10)
+ /*
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 2b6b43c..7021115 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -575,8 +575,8 @@ struct perf_event {
+       enum perf_event_active_state    state;
+       unsigned int                    attach_state;
+-      local64_t                       count;
+-      atomic64_t                      child_count;
++      local64_t                       count; /* PaX: fix it one day */
++      atomic64_unchecked_t            child_count;
+       /*
+        * These are the total time in nanoseconds that the event
+@@ -627,8 +627,8 @@ struct perf_event {
+        * These accumulate total time (in nanoseconds) that children
+        * events have been enabled and running, respectively.
+        */
+-      atomic64_t                      child_total_time_enabled;
+-      atomic64_t                      child_total_time_running;
++      atomic64_unchecked_t            child_total_time_enabled;
++      atomic64_unchecked_t            child_total_time_running;
+       /*
+        * Protect attach/detach and child_list:
+@@ -1077,7 +1077,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
+ static inline u64 __perf_event_count(struct perf_event *event)
+ {
+-      return local64_read(&event->count) + atomic64_read(&event->child_count);
++      return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
+ }
+ extern void perf_event_mmap(struct vm_area_struct *vma);
+@@ -1128,7 +1128,7 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
+       }
+ }
+-extern int sysctl_perf_event_paranoid;
++extern int sysctl_perf_event_legitimately_concerned;
+ extern int sysctl_perf_event_mlock;
+ extern int sysctl_perf_event_sample_rate;
+ extern int sysctl_perf_cpu_time_max_percent;
+@@ -1145,19 +1145,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+ int perf_event_max_stack_handler(struct ctl_table *table, int write,
+                                void __user *buffer, size_t *lenp, loff_t *ppos);
++static inline bool perf_paranoid_any(void)
++{
++      return sysctl_perf_event_legitimately_concerned > 2;
++}
++
+ static inline bool perf_paranoid_tracepoint_raw(void)
+ {
+-      return sysctl_perf_event_paranoid > -1;
++      return sysctl_perf_event_legitimately_concerned > -1;
+ }
+ static inline bool perf_paranoid_cpu(void)
+ {
+-      return sysctl_perf_event_paranoid > 0;
++      return sysctl_perf_event_legitimately_concerned > 0;
+ }
+ static inline bool perf_paranoid_kernel(void)
+ {
+-      return sysctl_perf_event_paranoid > 1;
++      return sysctl_perf_event_legitimately_concerned > 1;
+ }
+ extern void perf_event_init(void);
+@@ -1317,7 +1322,7 @@ struct perf_pmu_events_attr {
+       struct device_attribute attr;
+       u64 id;
+       const char *event_str;
+-};
++} __do_const;
+ struct perf_pmu_events_ht_attr {
+       struct device_attribute                 attr;
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index 23705a5..af2bfb4 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
+@@ -169,8 +169,8 @@ static inline pid_t pid_nr(struct pid *pid)
+       return nr;
+ }
+-pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
+-pid_t pid_vnr(struct pid *pid);
++pid_t pid_nr_ns(const struct pid *pid, const struct pid_namespace *ns);
++pid_t pid_vnr(const struct pid *pid);
+ #define do_each_pid_task(pid, type, task)                             \
+       do {                                                            \
+diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
+index 918b117..7af374b7 100644
+--- a/include/linux/pid_namespace.h
++++ b/include/linux/pid_namespace.h
+@@ -45,7 +45,7 @@ struct pid_namespace {
+       int hide_pid;
+       int reboot;     /* group exit code if this pidns was rebooted */
+       struct ns_common ns;
+-};
++} __randomize_layout;
+ extern struct pid_namespace init_pid_ns;
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 24f5470..deb6089 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -48,10 +48,10 @@ struct pipe_inode_info {
+       struct mutex mutex;
+       wait_queue_head_t wait;
+       unsigned int nrbufs, curbuf, buffers;
+-      unsigned int readers;
+-      unsigned int writers;
+-      unsigned int files;
+-      unsigned int waiting_writers;
++      atomic_t readers;
++      atomic_t writers;
++      atomic_t files;
++      atomic_t waiting_writers;
+       unsigned int r_counter;
+       unsigned int w_counter;
+       struct page *tmp_page;
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 06eb353..dbf4a34 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -631,6 +631,7 @@ struct dev_pm_domain {
+       void (*sync)(struct device *dev);
+       void (*dismiss)(struct device *dev);
+ };
++typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
+ /*
+  * The PM_EVENT_ messages are also used by drivers implementing the legacy
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 31fec85..97f3906 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -35,7 +35,7 @@ struct gpd_dev_ops {
+       int (*start)(struct device *dev);
+       int (*stop)(struct device *dev);
+       bool (*active_wakeup)(struct device *dev);
+-};
++} __no_const;
+ struct genpd_power_state {
+       s64 power_off_latency_ns;
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 2e14d26..aed7c63 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -116,7 +116,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
+ static inline void pm_runtime_mark_last_busy(struct device *dev)
+ {
+-      ACCESS_ONCE(dev->power.last_busy) = jiffies;
++      ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
+ }
+ static inline bool pm_runtime_is_irq_safe(struct device *dev)
+diff --git a/include/linux/pnp.h b/include/linux/pnp.h
+index 2588ca6..b705409 100644
+--- a/include/linux/pnp.h
++++ b/include/linux/pnp.h
+@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
+ struct pnp_fixup {
+       char id[7];
+       void (*quirk_function) (struct pnp_dev * dev);  /* fixup function */
+-};
++} __do_const;
+ /* config parameters */
+ #define PNP_CONFIG_NORMAL     0x0001
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 51334ed..7fda393 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -19,8 +19,8 @@
+  * under normal circumstances, used to verify that nobody uses
+  * non-initialized list entries.
+  */
+-#define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
+-#define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
++#define LIST_POISON1  ((void *) (long)0xFFFFFF02)
++#define LIST_POISON2  ((void *) (long)0xFFFFFF04)
+ /********** include/linux/timer.h **********/
+ /*
+diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
+index d8b187c3..9a9257a 100644
+--- a/include/linux/power/smartreflex.h
++++ b/include/linux/power/smartreflex.h
+@@ -238,7 +238,7 @@ struct omap_sr_class_data {
+       int (*notify)(struct omap_sr *sr, u32 status);
+       u8 notify_flags;
+       u8 class_type;
+-};
++} __do_const;
+ /**
+  * struct omap_sr_nvalue_table        - Smartreflex n-target value info
+diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
+index 4ea1d37..80f4b33 100644
+--- a/include/linux/ppp-comp.h
++++ b/include/linux/ppp-comp.h
+@@ -84,7 +84,7 @@ struct compressor {
+       struct module *owner;
+       /* Extra skb space needed by the compressor algorithm */
+       unsigned int comp_extra;
+-};
++} __do_const;
+ /*
+  * The return value from decompress routine is the length of the
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 75e4e30..fcfde15 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -134,11 +134,16 @@ extern void preempt_count_sub(int val);
+ #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
+ #endif
++#define raw_preempt_count_add(val)    __preempt_count_add(val)
++#define raw_preempt_count_sub(val)    __preempt_count_sub(val)
++
+ #define __preempt_count_inc() __preempt_count_add(1)
+ #define __preempt_count_dec() __preempt_count_sub(1)
+ #define preempt_count_inc() preempt_count_add(1)
++#define raw_preempt_count_inc() raw_preempt_count_add(1)
+ #define preempt_count_dec() preempt_count_sub(1)
++#define raw_preempt_count_dec() raw_preempt_count_sub(1)
+ #ifdef CONFIG_PREEMPT_COUNT
+@@ -148,6 +153,12 @@ do { \
+       barrier(); \
+ } while (0)
++#define raw_preempt_disable() \
++do { \
++      raw_preempt_count_inc(); \
++      barrier(); \
++} while (0)
++
+ #define sched_preempt_enable_no_resched() \
+ do { \
+       barrier(); \
+@@ -156,6 +167,12 @@ do { \
+ #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++#define raw_preempt_enable_no_resched() \
++do { \
++      barrier(); \
++      raw_preempt_count_dec(); \
++} while (0)
++
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+ #ifdef CONFIG_PREEMPT
+@@ -216,8 +233,10 @@ do { \
+  * region.
+  */
+ #define preempt_disable()                     barrier()
++#define raw_preempt_disable()                 barrier()
+ #define sched_preempt_enable_no_resched()     barrier()
+ #define preempt_enable_no_resched()           barrier()
++#define raw_preempt_enable_no_resched()               barrier()
+ #define preempt_enable()                      barrier()
+ #define preempt_check_resched()                       do { } while (0)
+@@ -232,11 +251,13 @@ do { \
+ /*
+  * Modules have no business playing preemption tricks.
+  */
++#ifndef CONFIG_PAX_KERNEXEC
+ #undef sched_preempt_enable_no_resched
+ #undef preempt_enable_no_resched
+ #undef preempt_enable_no_resched_notrace
+ #undef preempt_check_resched
+ #endif
++#endif
+ #define preempt_set_need_resched() \
+ do { \
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 696a56b..c7cff38 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -43,7 +43,7 @@ static inline const char *printk_skip_level(const char *buffer)
+ #define CONSOLE_LOGLEVEL_DEBUG        10 /* issue debug messages */
+ #define CONSOLE_LOGLEVEL_MOTORMOUTH 15        /* You can't shut this one up */
+-extern int console_printk[];
++extern int console_printk[4];
+ #define console_loglevel (console_printk[0])
+ #define default_message_loglevel (console_printk[1])
+@@ -144,6 +144,8 @@ static inline void printk_nmi_flush(void) { }
+ static inline void printk_nmi_flush_on_panic(void) { }
+ #endif /* PRINTK_NMI */
++extern int kptr_restrict;
++
+ #ifdef CONFIG_PRINTK
+ asmlinkage __printf(5, 0)
+ int vprintk_emit(int facility, int level,
+@@ -171,14 +173,13 @@ __printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+  * with all other unrelated printk_ratelimit() callsites.  Instead use
+  * printk_ratelimited() or plain old __ratelimit().
+  */
+-extern int __printk_ratelimit(const char *func);
++extern int __printk_ratelimit(const char *func) __nocapture(1);
+ #define printk_ratelimit() __printk_ratelimit(__func__)
+ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+                                  unsigned int interval_msec);
+ extern int printk_delay_msec;
+ extern int dmesg_restrict;
+-extern int kptr_restrict;
+ extern int
+ devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index b97bf2e..f14c92d4 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
+ extern struct proc_dir_entry *proc_symlink(const char *,
+               struct proc_dir_entry *, const char *);
+ extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
++extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
+ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
+                                             struct proc_dir_entry *, void *);
++extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
++                                            struct proc_dir_entry *, void *);
+ extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
+                                             struct proc_dir_entry *);
+  
+@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
+       return proc_create_data(name, mode, parent, proc_fops, NULL);
+ }
++static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
++      struct proc_dir_entry *parent, const struct file_operations *proc_fops)
++{
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
++#else
++      return proc_create_data(name, mode, parent, proc_fops, NULL);
++#endif
++}
++
++
+ extern void proc_set_size(struct proc_dir_entry *, loff_t);
+ extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
+ extern void *PDE_DATA(const struct inode *);
+@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
+               struct proc_dir_entry *parent,const char *dest) { return NULL;}
+ static inline struct proc_dir_entry *proc_mkdir(const char *name,
+       struct proc_dir_entry *parent) {return NULL;}
++static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
++      struct proc_dir_entry *parent) { return NULL; }
+ static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
+       umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
++static inline  struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
++      umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
+ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
+       umode_t mode, struct proc_dir_entry *parent) { return NULL; }
+ #define proc_create(name, mode, parent, proc_fops) ({NULL;})
+@@ -79,7 +99,7 @@ struct net;
+ static inline struct proc_dir_entry *proc_net_mkdir(
+       struct net *net, const char *name, struct proc_dir_entry *parent)
+ {
+-      return proc_mkdir_data(name, 0, parent, net);
++      return proc_mkdir_data_restrict(name, 0, parent, net);
+ }
+ #endif /* _LINUX_PROC_FS_H */
+diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
+index de0e771..9e746e9 100644
+--- a/include/linux/proc_ns.h
++++ b/include/linux/proc_ns.h
+@@ -18,7 +18,7 @@ struct proc_ns_operations {
+       struct ns_common *(*get)(struct task_struct *task);
+       void (*put)(struct ns_common *ns);
+       int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
+-};
++} __do_const __randomize_layout;
+ extern const struct proc_ns_operations netns_operations;
+ extern const struct proc_ns_operations utsns_operations;
+diff --git a/include/linux/psci.h b/include/linux/psci.h
+index bdea1cb..a094b75 100644
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -33,7 +33,7 @@ struct psci_operations {
+       int (*affinity_info)(unsigned long target_affinity,
+                       unsigned long lowest_affinity_level);
+       int (*migrate_info_type)(void);
+-};
++} __no_const;
+ extern struct psci_operations psci_ops;
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index 55107a8..eb06178 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -76,7 +76,7 @@ struct kqid {                        /* Type in which we store the quota identifier */
+ extern bool qid_eq(struct kqid left, struct kqid right);
+ extern bool qid_lt(struct kqid left, struct kqid right);
+-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
++extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
+ extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
+ extern bool qid_valid(struct kqid qid);
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 3d6e981..4925f17 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -18,9 +18,19 @@ struct random_ready_callback {
+ };
+ extern void add_device_randomness(const void *, unsigned int);
++
++#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
++static inline void add_latent_entropy(void)
++{
++      add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++}
++#else
++static inline void add_latent_entropy(void) {}
++#endif
++
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+-                               unsigned int value);
+-extern void add_interrupt_randomness(int irq, int irq_flags);
++                               unsigned int value) __latent_entropy;
++extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern int add_random_ready_callback(struct random_ready_callback *rdy);
+@@ -52,6 +62,11 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+ #define prandom_init_once(pcpu_state)                 \
+       DO_ONCE(prandom_seed_full_state, (pcpu_state))
++static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
++{
++      return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
++}
++
+ /**
+  * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+  * @ep_ro: right open interval endpoint
+@@ -64,7 +79,7 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+  *
+  * Returns: pseudo-random number in interval [0, ep_ro)
+  */
+-static inline u32 prandom_u32_max(u32 ep_ro)
++static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
+ {
+       return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
+ }
+diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
+index 57c9e06..fe14126 100644
+--- a/include/linux/ratelimit.h
++++ b/include/linux/ratelimit.h
+@@ -72,7 +72,8 @@ ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags)
+ extern struct ratelimit_state printk_ratelimit_state;
+-extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
++extern __nocapture(2)
++int ___ratelimit(struct ratelimit_state *rs, const char *func);
+ #define __ratelimit(state) ___ratelimit(state, __func__)
+ #ifdef CONFIG_PRINTK
+diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
+index d076183..9702b6e 100644
+--- a/include/linux/rbtree_augmented.h
++++ b/include/linux/rbtree_augmented.h
+@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new)    \
+       old->rbaugmented = rbcompute(old);                              \
+ }                                                                     \
+ rbstatic const struct rb_augment_callbacks rbname = {                 \
+-      rbname ## _propagate, rbname ## _copy, rbname ## _rotate        \
++      .propagate = rbname ## _propagate,                              \
++      .copy = rbname ## _copy,                                        \
++      .rotate = rbname ## _rotate                                     \
+ };
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index 8beb98d..c515d45 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
+                   struct list_head *prev, struct list_head *next);
+ #endif
++void __pax_list_add_rcu(struct list_head *new,
++                      struct list_head *prev, struct list_head *next);
++
+ /**
+  * list_add_rcu - add a new entry to rcu-protected list
+  * @new: new entry to be added
+@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
+       __list_add_rcu(new, head, head->next);
+ }
++static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
++{
++      __pax_list_add_rcu(new, head, head->next);
++}
++
+ /**
+  * list_add_tail_rcu - add a new entry to rcu-protected list
+  * @new: new entry to be added
+@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
+       __list_add_rcu(new, head->prev, head);
+ }
++static inline void pax_list_add_tail_rcu(struct list_head *new,
++                                      struct list_head *head)
++{
++      __pax_list_add_rcu(new, head->prev, head);
++}
++
+ /**
+  * list_del_rcu - deletes entry from list without re-initialization
+  * @entry: the element to delete from the list.
+@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
+       entry->prev = LIST_POISON2;
+ }
++extern void pax_list_del_rcu(struct list_head *entry);
++
+ /**
+  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
+  * @n: the element to delete from the hash list.
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 1aa62e1..8f67337 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -863,6 +863,7 @@ static inline void rcu_preempt_sleep_check(void)
+  * read-side critical sections may be preempted and they may also block, but
+  * only when acquiring spinlocks that are subject to priority inheritance.
+  */
++static inline void rcu_read_lock(void) __acquires(RCU);
+ static inline void rcu_read_lock(void)
+ {
+       __rcu_read_lock();
+@@ -917,6 +918,7 @@ static inline void rcu_read_lock(void)
+  *
+  * See rcu_read_lock() for more information.
+  */
++static inline void rcu_read_unlock(void) __releases(RCU);
+ static inline void rcu_read_unlock(void)
+ {
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+@@ -943,6 +945,7 @@ static inline void rcu_read_unlock(void)
+  * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
+  * was invoked from some other task.
+  */
++static inline void rcu_read_lock_bh(void) __acquires(RCU_BH);
+ static inline void rcu_read_lock_bh(void)
+ {
+       local_bh_disable();
+@@ -957,6 +960,7 @@ static inline void rcu_read_lock_bh(void)
+  *
+  * See rcu_read_lock_bh() for more information.
+  */
++static inline void rcu_read_unlock_bh(void) __releases(RCU_BH);
+ static inline void rcu_read_unlock_bh(void)
+ {
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+@@ -979,6 +983,7 @@ static inline void rcu_read_unlock_bh(void)
+  * rcu_read_unlock_sched() from process context if the matching
+  * rcu_read_lock_sched() was invoked from an NMI handler.
+  */
++static inline void rcu_read_lock_sched(void) __acquires(RCU_SCHED);
+ static inline void rcu_read_lock_sched(void)
+ {
+       preempt_disable();
+@@ -989,6 +994,7 @@ static inline void rcu_read_lock_sched(void)
+ }
+ /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
++static inline notrace void rcu_read_lock_sched_notrace(void) __acquires(RCU_SCHED);
+ static inline notrace void rcu_read_lock_sched_notrace(void)
+ {
+       preempt_disable_notrace();
+@@ -1000,6 +1006,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
+  *
+  * See rcu_read_lock_sched for more information.
+  */
++static inline void rcu_read_unlock_sched(void) __releases(RCU_SCHED);
+ static inline void rcu_read_unlock_sched(void)
+ {
+       RCU_LOCKDEP_WARN(!rcu_is_watching(),
+@@ -1010,6 +1017,7 @@ static inline void rcu_read_unlock_sched(void)
+ }
+ /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
++static inline notrace void rcu_read_unlock_sched_notrace(void) __releases(RCU_SCHED);
+ static inline notrace void rcu_read_unlock_sched_notrace(void)
+ {
+       __release(RCU_SCHED);
+diff --git a/include/linux/reboot.h b/include/linux/reboot.h
+index a7ff409..03e2fa8 100644
+--- a/include/linux/reboot.h
++++ b/include/linux/reboot.h
+@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
+  */
+ extern void migrate_to_reboot_cpu(void);
+-extern void machine_restart(char *cmd);
+-extern void machine_halt(void);
+-extern void machine_power_off(void);
++extern void machine_restart(char *cmd) __noreturn;
++extern void machine_halt(void) __noreturn;
++extern void machine_power_off(void) __noreturn;
+ extern void machine_shutdown(void);
+ struct pt_regs;
+@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
+  */
+ extern void kernel_restart_prepare(char *cmd);
+-extern void kernel_restart(char *cmd);
+-extern void kernel_halt(void);
+-extern void kernel_power_off(void);
++extern void kernel_restart(char *cmd) __noreturn;
++extern void kernel_halt(void) __noreturn;
++extern void kernel_power_off(void) __noreturn;
+ extern int C_A_D; /* for sysctl */
+ void ctrl_alt_del(void);
+@@ -77,7 +77,7 @@ extern void orderly_reboot(void);
+  * Emergency restart, callable from an interrupt handler.
+  */
+-extern void emergency_restart(void);
++extern void emergency_restart(void) __noreturn;
+ #include <asm/emergency-restart.h>
+ #endif /* _LINUX_REBOOT_H */
+diff --git a/include/linux/regset.h b/include/linux/regset.h
+index 8e0c9fe..ac4d221 100644
+--- a/include/linux/regset.h
++++ b/include/linux/regset.h
+@@ -161,7 +161,8 @@ struct user_regset {
+       unsigned int                    align;
+       unsigned int                    bias;
+       unsigned int                    core_note_type;
+-};
++} __do_const;
++typedef struct user_regset __no_const user_regset_no_const;
+ /**
+  * struct user_regset_view - available regsets
+diff --git a/include/linux/relay.h b/include/linux/relay.h
+index d7c8359..818daf5 100644
+--- a/include/linux/relay.h
++++ b/include/linux/relay.h
+@@ -157,7 +157,7 @@ struct rchan_callbacks
+        * The callback should return 0 if successful, negative if not.
+        */
+       int (*remove_buf_file)(struct dentry *dentry);
+-};
++} __no_const;
+ /*
+  * CONFIG_RELAY kernel API, kernel/relay.c
+diff --git a/include/linux/rio.h b/include/linux/rio.h
+index 37b95c4..2457ca92 100644
+--- a/include/linux/rio.h
++++ b/include/linux/rio.h
+@@ -429,7 +429,7 @@ struct rio_ops {
+       int (*map_outb)(struct rio_mport *mport, u16 destid, u64 rstart,
+                       u32 size, u32 flags, dma_addr_t *laddr);
+       void (*unmap_outb)(struct rio_mport *mport, u16 destid, u64 rstart);
+-};
++} __no_const;
+ #define RIO_RESOURCE_MEM      0x00000100
+ #define RIO_RESOURCE_DOORBELL 0x00000200
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index b46bb56..f5a4748 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -139,8 +139,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
+ void anon_vma_init(void);     /* create anon_vma_cachep */
+ int  anon_vma_prepare(struct vm_area_struct *);
+ void unlink_anon_vmas(struct vm_area_struct *);
+-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
+ static inline void anon_vma_merge(struct vm_area_struct *vma,
+                                 struct vm_area_struct *next)
+diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
+index cb3c8fe..85365ba 100644
+--- a/include/linux/scatterlist.h
++++ b/include/linux/scatterlist.h
+@@ -1,6 +1,7 @@
+ #ifndef _LINUX_SCATTERLIST_H
+ #define _LINUX_SCATTERLIST_H
++#include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/bug.h>
+@@ -136,10 +137,17 @@ static inline struct page *sg_page(struct scatterlist *sg)
+ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
+                             unsigned int buflen)
+ {
++      const void *realbuf = buf;
++
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      if (object_starts_on_stack(buf))
++              realbuf = buf - current->stack + current->lowmem_stack;
++#endif
++
+ #ifdef CONFIG_DEBUG_SG
+-      BUG_ON(!virt_addr_valid(buf));
++      BUG_ON(!virt_addr_valid(realbuf));
+ #endif
+-      sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
++      sg_set_page(sg, virt_to_page(realbuf), buflen, offset_in_page(realbuf));
+ }
+ /*
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 62c68e5..7058558 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -7,7 +7,7 @@
+ struct sched_param {
+-      int sched_priority;
++      unsigned int sched_priority;
+ };
+ #include <asm/param.h>        /* for HZ */
+@@ -134,6 +134,7 @@ struct perf_event_context;
+ struct blk_plug;
+ struct filename;
+ struct nameidata;
++struct linux_binprm;
+ #define VMACACHE_BITS 2
+ #define VMACACHE_SIZE (1U << VMACACHE_BITS)
+@@ -452,6 +453,18 @@ struct nsproxy;
+ struct user_namespace;
+ #ifdef CONFIG_MMU
++
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
++#else
++static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++      return 0;
++}
++#endif
++
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
++
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+@@ -791,6 +804,17 @@ struct signal_struct {
+ #ifdef CONFIG_TASKSTATS
+       struct taskstats *stats;
+ #endif
++
++#ifdef CONFIG_GRKERNSEC
++      u32 curr_ip;
++      u32 saved_ip;
++      u32 gr_saddr;
++      u32 gr_daddr;
++      u16 gr_sport;
++      u16 gr_dport;
++      u8 used_accept:1;
++#endif
++
+ #ifdef CONFIG_AUDIT
+       unsigned audit_tty;
+       struct tty_audit_buf *tty_audit_buf;
+@@ -808,7 +832,7 @@ struct signal_struct {
+       struct mutex cred_guard_mutex;  /* guard against foreign influences on
+                                        * credential calculations
+                                        * (notably. ptrace) */
+-};
++} __randomize_layout;
+ /*
+  * Bits in flags field of signal_struct.
+@@ -863,6 +887,14 @@ struct user_struct {
+       struct key *session_keyring;    /* UID's default session keyring */
+ #endif
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++      unsigned char kernel_banned;
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++      unsigned char sugid_banned;
++      unsigned long sugid_ban_expires;
++#endif
++
+       /* Hash table maintenance information */
+       struct hlist_node uidhash_node;
+       kuid_t uid;
+@@ -870,7 +902,7 @@ struct user_struct {
+ #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
+       atomic_long_t locked_vm;
+ #endif
+-};
++} __randomize_layout;
+ extern int uids_sysfs_init(void);
+@@ -1460,6 +1492,9 @@ struct tlbflush_unmap_batch {
+ struct task_struct {
+       volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
+       void *stack;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      void *lowmem_stack;
++#endif
+       atomic_t usage;
+       unsigned int flags;     /* per process flags, defined below */
+       unsigned int ptrace;
+@@ -1599,8 +1634,8 @@ struct task_struct {
+       struct list_head thread_node;
+       struct completion *vfork_done;          /* for vfork() */
+-      int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
+-      int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
++      pid_t __user *set_child_tid;            /* CLONE_CHILD_SETTID */
++      pid_t __user *clear_child_tid;          /* CLONE_CHILD_CLEARTID */
+       cputime_t utime, stime, utimescaled, stimescaled;
+       cputime_t gtime;
+@@ -1630,11 +1665,6 @@ struct task_struct {
+       struct task_cputime cputime_expires;
+       struct list_head cpu_timers[3];
+-/* process credentials */
+-      const struct cred __rcu *real_cred; /* objective and real subjective task
+-                                       * credentials (COW) */
+-      const struct cred __rcu *cred;  /* effective (overridable) subjective task
+-                                       * credentials (COW) */
+       char comm[TASK_COMM_LEN]; /* executable name excluding path
+                                    - access with [gs]et_task_comm (which lock
+                                      it with task_lock())
+@@ -1650,6 +1680,8 @@ struct task_struct {
+ /* hung task detection */
+       unsigned long last_switch_count;
+ #endif
++/* CPU-specific state of this task */
++      struct thread_struct thread;
+ /* filesystem information */
+       struct fs_struct *fs;
+ /* open file information */
+@@ -1660,8 +1692,11 @@ struct task_struct {
+       struct signal_struct *signal;
+       struct sighand_struct *sighand;
+-      sigset_t blocked, real_blocked;
+-      sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
++      sigset_t real_blocked;
++      struct {
++              sigset_t blocked;
++              sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
++      };
+       struct sigpending pending;
+       unsigned long sas_ss_sp;
+@@ -1728,6 +1763,10 @@ struct task_struct {
+       unsigned int in_ubsan;
+ #endif
++/* process credentials */
++      const struct cred __rcu *real_cred; /* objective and real subjective task
++                                       * credentials (COW) */
++
+ /* journalling filesystem info */
+       void *journal_info;
+@@ -1766,6 +1805,10 @@ struct task_struct {
+       /* cg_list protected by css_set_lock and tsk->alloc_lock */
+       struct list_head cg_list;
+ #endif
++
++      const struct cred __rcu *cred;  /* effective (overridable) subjective task
++                                       * credentials (COW) */
++
+ #ifdef CONFIG_FUTEX
+       struct robust_list_head __user *robust_list;
+ #ifdef CONFIG_COMPAT
+@@ -1881,7 +1924,7 @@ struct task_struct {
+        * Number of functions that haven't been traced
+        * because of depth overrun.
+        */
+-      atomic_t trace_overrun;
++      atomic_unchecked_t trace_overrun;
+       /* Pause for the tracing */
+       atomic_t tracing_graph_pause;
+ #endif
+@@ -1923,22 +1966,93 @@ struct task_struct {
+ #ifdef CONFIG_MMU
+       struct task_struct *oom_reaper_list;
+ #endif
+-/* CPU-specific state of this task */
+-      struct thread_struct thread;
+-/*
+- * WARNING: on x86, 'thread_struct' contains a variable-sized
+- * structure.  It *MUST* be at the end of 'task_struct'.
+- *
+- * Do not put anything below here!
+- */
+-};
++
++#ifdef CONFIG_GRKERNSEC
++      /* grsecurity */
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      u64 exec_id;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++      const struct cred *delayed_cred;
++#endif
++      struct dentry *gr_chroot_dentry;
++      struct acl_subject_label *acl;
++      struct acl_subject_label *tmpacl;
++      struct acl_role_label *role;
++      struct file *exec_file;
++      unsigned long brute_expires;
++      u16 acl_role_id;
++      u8 inherited;
++      /* is this the task that authenticated to the special role */
++      u8 acl_sp_role;
++      u8 is_writable;
++      u8 brute;
++      u8 gr_is_chrooted;
++#endif
++
++/* thread_info moved to task_struct */
++#ifdef CONFIG_X86
++      struct thread_info tinfo;
++#endif
++} __randomize_layout;
+ #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+-extern int arch_task_struct_size __read_mostly;
++extern size_t arch_task_struct_size __read_mostly;
+ #else
+ # define arch_task_struct_size (sizeof(struct task_struct))
+ #endif
++#define MF_PAX_PAGEEXEC               0x01000000      /* Paging based non-executable pages */
++#define MF_PAX_EMUTRAMP               0x02000000      /* Emulate trampolines */
++#define MF_PAX_MPROTECT               0x04000000      /* Restrict mprotect() */
++#define MF_PAX_RANDMMAP               0x08000000      /* Randomize mmap() base */
++/*#define MF_PAX_RANDEXEC             0x10000000*/    /* Randomize ET_EXEC base */
++#define MF_PAX_SEGMEXEC               0x20000000      /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++extern int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++#define PAX_PARSE_FLAGS_FALLBACK      (~0UL)
++
++/* if tsk != current then task_lock must be held on it */
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline unsigned long pax_get_flags(struct task_struct *tsk)
++{
++      if (likely(tsk->mm))
++              return tsk->mm->pax_flags;
++      else
++              return 0UL;
++}
++
++/* if tsk != current then task_lock must be held on it */
++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
++{
++      if (likely(tsk->mm)) {
++              tsk->mm->pax_flags = flags;
++              return 0;
++      }
++      return -EINVAL;
++}
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_initial_flags(struct linux_binprm *bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++#endif
++
++#ifdef CONFIG_PAX_SIZE_OVERFLOW
++extern bool pax_size_overflow_report_only;
++#endif
++
++struct path;
++extern char *pax_get_path(const struct path *path, char *buf, int buflen);
++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_refcount_error(struct pt_regs *regs, const char *kind);
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+@@ -2051,7 +2165,7 @@ struct pid_namespace;
+ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+                       struct pid_namespace *ns);
+-static inline pid_t task_pid_nr(struct task_struct *tsk)
++static inline pid_t task_pid_nr(const struct task_struct *tsk)
+ {
+       return tsk->pid;
+ }
+@@ -2418,6 +2532,48 @@ extern u64 sched_clock_cpu(int cpu);
+ extern void sched_clock_init(void);
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static inline void populate_stack(void *stack, unsigned int size)
++{
++      int c;
++      int *ptr = stack;
++      int *end = stack + size;
++
++      while (ptr < end) {
++              c = *(volatile int *)ptr;
++              (void)c;
++              ptr += PAGE_SIZE/sizeof(int);
++      }
++}
++#else
++static inline void populate_stack(void *stack, unsigned int size)
++{
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++static inline bool current_is_ptracer(struct task_struct *task, u64 *exec_id)
++{
++      bool ret = false;
++        if (!task->ptrace)
++              return ret;
++
++      rcu_read_lock();
++      read_lock(&tasklist_lock);
++      if (task->parent && task->parent == current) {
++              ret = true;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++              if (exec_id)
++                      *exec_id = task->parent->exec_id;
++#endif
++      }
++      read_unlock(&tasklist_lock);
++      rcu_read_unlock();
++
++      return ret;
++}
++#endif
++
+ #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+ static inline void sched_clock_tick(void)
+ {
+@@ -2573,7 +2729,9 @@ extern void set_curr_task(int cpu, struct task_struct *p);
+ void yield(void);
+ union thread_union {
++#ifndef CONFIG_X86
+       struct thread_info thread_info;
++#endif
+       unsigned long stack[THREAD_SIZE/sizeof(long)];
+ };
+@@ -2606,6 +2764,7 @@ extern struct pid_namespace init_pid_ns;
+  */
+ extern struct task_struct *find_task_by_vpid(pid_t nr);
++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
+ extern struct task_struct *find_task_by_pid_ns(pid_t nr,
+               struct pid_namespace *ns);
+@@ -2637,7 +2796,7 @@ extern void proc_caches_init(void);
+ extern void flush_signals(struct task_struct *);
+ extern void ignore_signals(struct task_struct *);
+ extern void flush_signal_handlers(struct task_struct *, int force_default);
+-extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
++extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) __must_hold(&tsk->sighand->siglock);
+ static inline int kernel_dequeue_signal(siginfo_t *info)
+ {
+@@ -2889,7 +3048,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+ extern void exit_itimers(struct signal_struct *);
+ extern void flush_itimer_signals(void);
+-extern void do_group_exit(int);
++extern __noreturn void do_group_exit(int);
+ extern int do_execve(struct filename *,
+                    const char __user * const __user *,
+@@ -3004,11 +3163,13 @@ static inline int thread_group_empty(struct task_struct *p)
+  * It must not be nested with write_lock_irq(&tasklist_lock),
+  * neither inside nor outside.
+  */
++static inline void task_lock(struct task_struct *p) __acquires(&p->alloc_lock);
+ static inline void task_lock(struct task_struct *p)
+ {
+       spin_lock(&p->alloc_lock);
+ }
++static inline void task_unlock(struct task_struct *p) __releases(&p->alloc_lock);
+ static inline void task_unlock(struct task_struct *p)
+ {
+       spin_unlock(&p->alloc_lock);
+@@ -3094,9 +3255,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+ #define task_stack_end_corrupted(task) \
+               (*(end_of_stack(task)) != STACK_END_MAGIC)
+-static inline int object_is_on_stack(void *obj)
++static inline int object_starts_on_stack(const void *obj)
+ {
+-      void *stack = task_stack_page(current);
++      const void *stack = task_stack_page(current);
+       return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+ }
+@@ -3473,7 +3634,7 @@ static inline unsigned long rlimit_max(unsigned int limit)
+ struct update_util_data {
+       void (*func)(struct update_util_data *data,
+                    u64 time, unsigned long util, unsigned long max);
+-};
++} __no_const;
+ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+                       void (*func)(struct update_util_data *data, u64 time,
+diff --git a/include/linux/scif.h b/include/linux/scif.h
+index 49a35d6..c6209dd 100644
+--- a/include/linux/scif.h
++++ b/include/linux/scif.h
+@@ -156,7 +156,7 @@ struct scif_client {
+       void (*probe)(struct scif_peer_dev *spdev);
+       void (*remove)(struct scif_peer_dev *spdev);
+       struct subsys_interface si;
+-};
++} __do_const;
+ #define SCIF_OPEN_FAILED ((scif_epd_t)-1)
+ #define SCIF_REGISTER_FAILED ((off_t)-1)
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 7831cd5..9e82896 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -30,6 +30,7 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/fs.h>
++#include <linux/grsecurity.h>
+ struct linux_binprm;
+ struct cred;
+diff --git a/include/linux/sem.h b/include/linux/sem.h
+index d0efd6e..c68948c 100644
+--- a/include/linux/sem.h
++++ b/include/linux/sem.h
+@@ -22,7 +22,7 @@ struct sem_array {
+       int                     sem_nsems;      /* no. of semaphores in array */
+       int                     complex_count;  /* pending complex operations */
+       bool                    complex_mode;   /* no parallel simple ops */
+-};
++} __randomize_layout;
+ #ifdef CONFIG_SYSVIPC
+diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
+index dc368b8..e895209 100644
+--- a/include/linux/semaphore.h
++++ b/include/linux/semaphore.h
+@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
+ }
+ extern void down(struct semaphore *sem);
+-extern int __must_check down_interruptible(struct semaphore *sem);
++extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
+ extern int __must_check down_killable(struct semaphore *sem);
+ extern int __must_check down_trylock(struct semaphore *sem);
+ extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
+diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
+index fb7eb9c..fcfd102 100644
+--- a/include/linux/seq_buf.h
++++ b/include/linux/seq_buf.h
+@@ -16,7 +16,7 @@
+  * @readpos:  The next position to read in the buffer.
+  */
+ struct seq_buf {
+-      char                    *buffer;
++      unsigned char           *buffer;
+       size_t                  size;
+       size_t                  len;
+       loff_t                  readpos;
+@@ -78,7 +78,7 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
+  * Return the number of bytes available in the buffer, or zero if
+  * there's no space.
+  */
+-static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
++static inline size_t seq_buf_get_buf(struct seq_buf *s, unsigned char **bufp)
+ {
+       WARN_ON(s->len > s->size + 1);
+diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
+index f3d45dd..4539816 100644
+--- a/include/linux/seq_file.h
++++ b/include/linux/seq_file.h
+@@ -25,6 +25,9 @@ struct seq_file {
+       const struct seq_operations *op;
+       int poll_event;
+       const struct file *file;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      u64 exec_id;
++#endif
+       void *private;
+ };
+@@ -34,6 +37,7 @@ struct seq_operations {
+       void * (*next) (struct seq_file *m, void *v, loff_t *pos);
+       int (*show) (struct seq_file *m, void *v);
+ };
++typedef struct seq_operations __no_const seq_operations_no_const;
+ #define SEQ_SKIP 1
+@@ -106,6 +110,7 @@ void seq_pad(struct seq_file *m, char c);
+ char *mangle_path(char *s, const char *p, const char *esc);
+ int seq_open(struct file *, const struct seq_operations *);
++int seq_open_restrict(struct file *, const struct seq_operations *);
+ ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
+ loff_t seq_lseek(struct file *, loff_t, int);
+ int seq_release(struct inode *, struct file *);
+@@ -133,6 +138,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
+                 const struct path *root, const char *esc);
+ int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
++int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
+ int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
+ int single_release(struct inode *, struct file *);
+ void *__seq_open_private(struct file *, const struct seq_operations *, int);
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index ead9765..2379f94 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -443,42 +443,49 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+  * Acts like a normal spin_lock/unlock.
+  * Don't need preempt_disable() because that is in the spin_lock already.
+  */
++static inline void write_seqlock(seqlock_t *sl) __acquires(sl);
+ static inline void write_seqlock(seqlock_t *sl)
+ {
+       spin_lock(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+ }
++static inline void write_sequnlock(seqlock_t *sl) __releases(sl);
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock(&sl->lock);
+ }
++static inline void write_seqlock_bh(seqlock_t *sl) __acquires(sl);
+ static inline void write_seqlock_bh(seqlock_t *sl)
+ {
+       spin_lock_bh(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+ }
++static inline void write_sequnlock_bh(seqlock_t *sl) __releases(sl);
+ static inline void write_sequnlock_bh(seqlock_t *sl)
+ {
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_bh(&sl->lock);
+ }
++static inline void write_seqlock_irq(seqlock_t *sl) __acquires(sl);
+ static inline void write_seqlock_irq(seqlock_t *sl)
+ {
+       spin_lock_irq(&sl->lock);
+       write_seqcount_begin(&sl->seqcount);
+ }
++static inline void write_sequnlock_irq(seqlock_t *sl) __releases(sl);
+ static inline void write_sequnlock_irq(seqlock_t *sl)
+ {
+       write_seqcount_end(&sl->seqcount);
+       spin_unlock_irq(&sl->lock);
+ }
++static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) __acquires(sl);
+ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ {
+       unsigned long flags;
+@@ -491,6 +498,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ #define write_seqlock_irqsave(lock, flags)                            \
+       do { flags = __write_seqlock_irqsave(lock); } while (0)
++static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) __releases(sl);
+ static inline void
+ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ {
+@@ -503,11 +511,13 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+  * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
+  * Don't need preempt_disable() because that is in the spin_lock already.
+  */
++static inline void read_seqlock_excl(seqlock_t *sl) __acquires(sl);
+ static inline void read_seqlock_excl(seqlock_t *sl)
+ {
+       spin_lock(&sl->lock);
+ }
++static inline void read_sequnlock_excl(seqlock_t *sl) __releases(sl);
+ static inline void read_sequnlock_excl(seqlock_t *sl)
+ {
+       spin_unlock(&sl->lock);
+diff --git a/include/linux/shm.h b/include/linux/shm.h
+index 04e8818..af85805 100644
+--- a/include/linux/shm.h
++++ b/include/linux/shm.h
+@@ -22,7 +22,11 @@ struct shmid_kernel /* private to the kernel */
+       /* The task created the shm object.  NULL if the task is dead. */
+       struct task_struct      *shm_creator;
+       struct list_head        shm_clist;      /* list by creator */
+-};
++#ifdef CONFIG_GRKERNSEC
++      u64                     shm_createtime;
++      pid_t                   shm_lapid;
++#endif
++} __randomize_layout;
+ /* shm_mode upper byte flags */
+ #define       SHM_DEST        01000   /* segment will be destroyed on last detach */
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index b63f63e..fe39718 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -303,7 +303,7 @@ static inline void allow_signal(int sig)
+        * know it'll be handled, so that they don't get converted to
+        * SIGKILL or just silently dropped.
+        */
+-      kernel_sigaction(sig, (__force __sighandler_t)2);
++      kernel_sigaction(sig, (__force_user __sighandler_t)2);
+ }
+ static inline void disallow_signal(int sig)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 0f665cb..fa26c21 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -906,7 +906,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+                           int node);
+ struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+ struct sk_buff *build_skb(void *data, unsigned int frag_size);
+-static inline struct sk_buff *alloc_skb(unsigned int size,
++static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
+                                       gfp_t priority)
+ {
+       return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
+@@ -2215,7 +2215,7 @@ static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
+       return skb->head + skb->csum_start;
+ }
+-static inline int skb_transport_offset(const struct sk_buff *skb)
++static inline int __intentional_overflow(0) skb_transport_offset(const struct sk_buff *skb)
+ {
+       return skb_transport_header(skb) - skb->data;
+ }
+@@ -2230,7 +2230,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+       return skb->inner_transport_header - skb->inner_network_header;
+ }
+-static inline int skb_network_offset(const struct sk_buff *skb)
++static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
+ {
+       return skb_network_header(skb) - skb->data;
+ }
+@@ -2290,7 +2290,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+  */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD   max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD   max(_AC(32,UL), L1_CACHE_BYTES)
+ #endif
+ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+@@ -2997,9 +2997,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+                                 int *err);
+ unsigned int datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
+-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
++int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
+                          struct iov_iter *to, int size);
+-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
++static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
+                                       struct msghdr *msg, int size)
+ {
+       return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
+@@ -3536,6 +3536,9 @@ static inline void nf_reset(struct sk_buff *skb)
+       nf_bridge_put(skb->nf_bridge);
+       skb->nf_bridge = NULL;
+ #endif
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
++      skb->nf_trace = 0;
++#endif
+ }
+ static inline void nf_reset_trace(struct sk_buff *skb)
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 4293808..9bdcc4e 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -15,14 +15,29 @@
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
++#include <linux/err.h>
+ /*
+  * Flags to pass to kmem_cache_create().
+  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+  */
+ #define SLAB_CONSISTENCY_CHECKS       0x00000100UL    /* DEBUG: Perform (expensive) checks on alloc/free */
++
++#ifdef CONFIG_PAX_USERCOPY
++#define SLAB_USERCOPY         0x00000200UL    /* PaX: Allow copying objs to/from userland */
++#else
++#define SLAB_USERCOPY         0x00000000UL
++#endif
++
+ #define SLAB_RED_ZONE         0x00000400UL    /* DEBUG: Red zone objs in a cache */
+ #define SLAB_POISON           0x00000800UL    /* DEBUG: Poison objects */
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++#define SLAB_NO_SANITIZE      0x00001000UL    /* PaX: Do not sanitize objs on free */
++#else
++#define SLAB_NO_SANITIZE      0x00000000UL
++#endif
++
+ #define SLAB_HWCACHE_ALIGN    0x00002000UL    /* Align objs on cache lines */
+ #define SLAB_CACHE_DMA                0x00004000UL    /* Use GFP_DMA memory */
+ #define SLAB_STORE_USER               0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
+@@ -109,10 +124,13 @@
+  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+  * Both make kfree a no-op.
+  */
+-#define ZERO_SIZE_PTR ((void *)16)
++#define ZERO_SIZE_PTR                         \
++({                                            \
++      BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
++      (void *)(-MAX_ERRNO-1L);                \
++})
+-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
+-                              (unsigned long)ZERO_SIZE_PTR)
++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
+ #include <linux/kmemleak.h>
+ #include <linux/kasan.h>
+@@ -127,6 +145,9 @@ bool slab_is_available(void);
+ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+                       unsigned long,
+                       void (*)(void *));
++struct kmem_cache *kmem_cache_create_usercopy(const char *, size_t, size_t,
++                      unsigned long, size_t, size_t,
++                      void (*)(void *));
+ void kmem_cache_destroy(struct kmem_cache *);
+ int kmem_cache_shrink(struct kmem_cache *);
+@@ -146,6 +167,11 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
+               sizeof(struct __struct), __alignof__(struct __struct),\
+               (__flags), NULL)
++#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) kmem_cache_create_usercopy(#__struct,\
++              sizeof(struct __struct), __alignof__(struct __struct),\
++              (__flags), offsetof(struct __struct, __field),\
++              sizeof(((struct __struct *)0)->__field), NULL)
++
+ /*
+  * Common kmalloc functions provided by all allocators
+  */
+@@ -154,18 +180,10 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
++bool is_usercopy_object(const void *ptr);
+-#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+ const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page);
+-#else
+-static inline const char *__check_heap_object(const void *ptr,
+-                                            unsigned long n,
+-                                            struct page *page)
+-{
+-      return NULL;
+-}
+-#endif
+ /*
+  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+@@ -276,6 +294,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+ #endif
++#ifdef CONFIG_PAX_USERCOPY
++extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
++#endif
++
+ /*
+  * Figure out which kmalloc slab an allocation of a certain size
+  * belongs to.
+@@ -284,7 +306,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+  * 2 = 129 .. 192 bytes
+  * n = 2^(n-1)+1 .. 2^n
+  */
+-static __always_inline int kmalloc_index(size_t size)
++static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
+ {
+       if (!size)
+               return 0;
+@@ -327,7 +349,7 @@ static __always_inline int kmalloc_index(size_t size)
+ }
+ #endif /* !CONFIG_SLOB */
+-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
++void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __alloc_size(1) __size_overflow(1);
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
+ void kmem_cache_free(struct kmem_cache *, void *);
+@@ -351,10 +373,10 @@ static __always_inline void kfree_bulk(size_t size, void **p)
+ }
+ #ifdef CONFIG_NUMA
+-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
++void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __alloc_size(1) __size_overflow(1);
+ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
+ #else
+-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
++static __always_inline void * __alloc_size(1) __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+       return __kmalloc(size, flags);
+ }
+diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
+index 4ad2c5a..ebff702 100644
+--- a/include/linux/slab_def.h
++++ b/include/linux/slab_def.h
+@@ -40,7 +40,7 @@ struct kmem_cache {
+ /* 4) cache creation/removal */
+       const char *name;
+       struct list_head list;
+-      int refcount;
++      atomic_t refcount;
+       int object_size;
+       int align;
+@@ -56,10 +56,14 @@ struct kmem_cache {
+       unsigned long node_allocs;
+       unsigned long node_frees;
+       unsigned long node_overflow;
+-      atomic_t allochit;
+-      atomic_t allocmiss;
+-      atomic_t freehit;
+-      atomic_t freemiss;
++      atomic_unchecked_t allochit;
++      atomic_unchecked_t allocmiss;
++      atomic_unchecked_t freehit;
++      atomic_unchecked_t freemiss;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      atomic_unchecked_t sanitized;
++      atomic_unchecked_t not_sanitized;
++#endif
+ #ifdef CONFIG_DEBUG_SLAB_LEAK
+       atomic_t store_user_clean;
+ #endif
+@@ -84,6 +88,9 @@ struct kmem_cache {
+       unsigned int *random_seq;
+ #endif
++      size_t useroffset;      /* USERCOPY region offset */
++      size_t usersize;        /* USERCOPY region size */
++
+       struct kmem_cache_node *node[MAX_NUMNODES];
+ };
+diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
+index 75f56c2..97880d2 100644
+--- a/include/linux/slub_def.h
++++ b/include/linux/slub_def.h
+@@ -74,7 +74,7 @@ struct kmem_cache {
+       struct kmem_cache_order_objects max;
+       struct kmem_cache_order_objects min;
+       gfp_t allocflags;       /* gfp flags to use on each alloc */
+-      int refcount;           /* Refcount for slab cache destroy */
++      atomic_t refcount;      /* Refcount for slab cache destroy */
+       void (*ctor)(void *);
+       int inuse;              /* Offset to metadata */
+       int align;              /* Alignment */
+@@ -108,6 +108,9 @@ struct kmem_cache {
+       struct kasan_cache kasan_info;
+ #endif
++      size_t useroffset;      /* USERCOPY region offset */
++      size_t usersize;        /* USERCOPY region size */
++
+       struct kmem_cache_node *node[MAX_NUMNODES];
+ };
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index eccae469..58e69b8 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
+ #endif
+ #define get_cpu()             ({ preempt_disable(); smp_processor_id(); })
++#define raw_get_cpu()         ({ raw_preempt_disable(); raw_smp_processor_id(); })
+ #define put_cpu()             preempt_enable()
++#define raw_put_cpu_no_resched()      raw_preempt_enable_no_resched()
+ /*
+  * Callback to arch code if there's nosmp or maxcpus=0 on the
+diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
+index a0596ca0..6c9245f 100644
+--- a/include/linux/sock_diag.h
++++ b/include/linux/sock_diag.h
+@@ -16,7 +16,7 @@ struct sock_diag_handler {
+       int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+       int (*get_info)(struct sk_buff *skb, struct sock *sk);
+       int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
+-};
++} __do_const;
+ int sock_diag_register(const struct sock_diag_handler *h);
+ void sock_diag_unregister(const struct sock_diag_handler *h);
+diff --git a/include/linux/sonet.h b/include/linux/sonet.h
+index 680f9a3..f13aeb0 100644
+--- a/include/linux/sonet.h
++++ b/include/linux/sonet.h
+@@ -7,7 +7,7 @@
+ #include <uapi/linux/sonet.h>
+ struct k_sonet_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+       __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 47dd0ce..3275f16 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -142,14 +142,17 @@ do {                                                             \
+  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+ #else
+-static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
++static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
++static inline void do_raw_spin_lock(raw_spinlock_t *lock)
+ {
+       __acquire(lock);
+       arch_spin_lock(&lock->raw_lock);
+ }
+ static inline void
+-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
++do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock);
++static inline void
++do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+ {
+       __acquire(lock);
+       arch_spin_lock_flags(&lock->raw_lock, *flags);
+@@ -160,7 +163,8 @@ static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+       return arch_spin_trylock(&(lock)->raw_lock);
+ }
+-static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
++static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
++static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
+ {
+       arch_spin_unlock(&lock->raw_lock);
+       __release(lock);
+@@ -297,11 +301,13 @@ do {                                                     \
+       raw_spin_lock_init(&(_lock)->rlock);            \
+ } while (0)
++static __always_inline void spin_lock(spinlock_t *lock) __acquires(lock);
+ static __always_inline void spin_lock(spinlock_t *lock)
+ {
+       raw_spin_lock(&lock->rlock);
+ }
++static __always_inline void spin_lock_bh(spinlock_t *lock) __acquires(lock);
+ static __always_inline void spin_lock_bh(spinlock_t *lock)
+ {
+       raw_spin_lock_bh(&lock->rlock);
+@@ -327,6 +333,7 @@ do {                                                                       \
+       raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
+ } while (0)
++static __always_inline void spin_lock_irq(spinlock_t *lock) __acquires(lock);
+ static __always_inline void spin_lock_irq(spinlock_t *lock)
+ {
+       raw_spin_lock_irq(&lock->rlock);
+@@ -342,21 +349,25 @@ do {                                                                     \
+       raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+ } while (0)
++static __always_inline void spin_unlock(spinlock_t *lock) __releases(lock);
+ static __always_inline void spin_unlock(spinlock_t *lock)
+ {
+       raw_spin_unlock(&lock->rlock);
+ }
++static __always_inline void spin_unlock_bh(spinlock_t *lock) __releases(lock);
+ static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ {
+       raw_spin_unlock_bh(&lock->rlock);
+ }
++static __always_inline void spin_unlock_irq(spinlock_t *lock) __releases(lock);
+ static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ {
+       raw_spin_unlock_irq(&lock->rlock);
+ }
++static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(lock);
+ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+ {
+       raw_spin_unlock_irqrestore(&lock->rlock, flags);
+diff --git a/include/linux/srcu.h b/include/linux/srcu.h
+index dc8eb63..b4b9482 100644
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -228,7 +228,8 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
+  * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
+  * was invoked in process context.
+  */
+-static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
++static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
++static inline int srcu_read_lock(struct srcu_struct *sp)
+ {
+       int retval;
+@@ -246,8 +247,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
+  *
+  * Exit an SRCU read-side critical section.
+  */
++static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
+ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
+-      __releases(sp)
+ {
+       rcu_lock_release(&(sp)->dep_map);
+       __srcu_read_unlock(sp, idx);
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 26b6f6a..434ee17 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -18,51 +18,51 @@ extern void *memdup_user_nul(const void __user *, size_t);
+ #include <asm/string.h>
+ #ifndef __HAVE_ARCH_STRCPY
+-extern char * strcpy(char *,const char *);
++extern char * strcpy(char *,const char *) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRNCPY
+-extern char * strncpy(char *,const char *, __kernel_size_t);
++extern char * strncpy(char *,const char *, __kernel_size_t) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRLCPY
+-size_t strlcpy(char *, const char *, size_t);
++size_t strlcpy(char *, const char *, size_t) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRSCPY
+-ssize_t __must_check strscpy(char *, const char *, size_t);
++ssize_t __must_check strscpy(char *, const char *, size_t) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRCAT
+-extern char * strcat(char *, const char *);
++extern char * strcat(char *, const char *) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRNCAT
+-extern char * strncat(char *, const char *, __kernel_size_t);
++extern char * strncat(char *, const char *, __kernel_size_t) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRLCAT
+-extern size_t strlcat(char *, const char *, __kernel_size_t);
++extern size_t strlcat(char *, const char *, __kernel_size_t) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRCMP
+-extern int strcmp(const char *,const char *);
++extern int strcmp(const char *,const char *) __nocapture();
+ #endif
+ #ifndef __HAVE_ARCH_STRNCMP
+-extern int strncmp(const char *,const char *,__kernel_size_t);
++extern int strncmp(const char *,const char *,__kernel_size_t) __nocapture(1, 2);
+ #endif
+ #ifndef __HAVE_ARCH_STRCASECMP
+-extern int strcasecmp(const char *s1, const char *s2);
++extern int strcasecmp(const char *s1, const char *s2) __nocapture();
+ #endif
+ #ifndef __HAVE_ARCH_STRNCASECMP
+-extern int strncasecmp(const char *s1, const char *s2, size_t n);
++extern int strncasecmp(const char *s1, const char *s2, size_t n) __nocapture(1, 2);
+ #endif
+ #ifndef __HAVE_ARCH_STRCHR
+-extern char * strchr(const char *,int);
++extern char * strchr(const char *,int) __nocapture(-1);
+ #endif
+ #ifndef __HAVE_ARCH_STRCHRNUL
+-extern char * strchrnul(const char *,int);
++extern char * strchrnul(const char *,int) __nocapture(-1);
+ #endif
+ #ifndef __HAVE_ARCH_STRNCHR
+-extern char * strnchr(const char *, size_t, int);
++extern char * strnchr(const char *, size_t, int) __nocapture(-1);
+ #endif
+ #ifndef __HAVE_ARCH_STRRCHR
+-extern char * strrchr(const char *,int);
++extern char * strrchr(const char *,int) __nocapture(-1);
+ #endif
+-extern char * __must_check skip_spaces(const char *);
++extern char * __must_check skip_spaces(const char *) __nocapture(-1);
+ extern char *strim(char *);
+@@ -72,63 +72,63 @@ static inline __must_check char *strstrip(char *str)
+ }
+ #ifndef __HAVE_ARCH_STRSTR
+-extern char * strstr(const char *, const char *);
++extern char * strstr(const char *, const char *) __nocapture(-1, 2);
+ #endif
+ #ifndef __HAVE_ARCH_STRNSTR
+ extern char * strnstr(const char *, const char *, size_t);
+ #endif
+ #ifndef __HAVE_ARCH_STRLEN
+-extern __kernel_size_t strlen(const char *);
++extern __kernel_size_t strlen(const char *) __nocapture(1);
+ #endif
+ #ifndef __HAVE_ARCH_STRNLEN
+-extern __kernel_size_t strnlen(const char *,__kernel_size_t);
++extern __kernel_size_t strnlen(const char *,__kernel_size_t) __nocapture(1);
+ #endif
+ #ifndef __HAVE_ARCH_STRPBRK
+-extern char * strpbrk(const char *,const char *);
++extern char * strpbrk(const char *,const char *) __nocapture(-1, 2);
+ #endif
+ #ifndef __HAVE_ARCH_STRSEP
+-extern char * strsep(char **,const char *);
++extern char * strsep(char **,const char *) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_STRSPN
+-extern __kernel_size_t strspn(const char *,const char *);
++extern __kernel_size_t strspn(const char *,const char *) __nocapture();
+ #endif
+ #ifndef __HAVE_ARCH_STRCSPN
+-extern __kernel_size_t strcspn(const char *,const char *);
++extern __kernel_size_t strcspn(const char *,const char *) __nocapture();
+ #endif
+ #ifndef __HAVE_ARCH_MEMSET
+ extern void * memset(void *,int,__kernel_size_t);
+ #endif
+ #ifndef __HAVE_ARCH_MEMCPY
+-extern void * memcpy(void *,const void *,__kernel_size_t);
++extern void * memcpy(void *,const void *,__kernel_size_t) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_MEMMOVE
+-extern void * memmove(void *,const void *,__kernel_size_t);
++extern void * memmove(void *,const void *,__kernel_size_t) __nocapture(2);
+ #endif
+ #ifndef __HAVE_ARCH_MEMSCAN
+ extern void * memscan(void *,int,__kernel_size_t);
+ #endif
+ #ifndef __HAVE_ARCH_MEMCMP
+-extern int memcmp(const void *,const void *,__kernel_size_t);
++extern int memcmp(const void *,const void *,__kernel_size_t) __nocapture(1, 2);
+ #endif
+ #ifndef __HAVE_ARCH_MEMCHR
+-extern void * memchr(const void *,int,__kernel_size_t);
++extern void * memchr(const void *,int,__kernel_size_t) __nocapture(-1);
+ #endif
+-void *memchr_inv(const void *s, int c, size_t n);
++void *memchr_inv(const void *s, int c, size_t n) __nocapture(-1);
+ char *strreplace(char *s, char old, char new);
+ extern void kfree_const(const void *x);
+-extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
+-extern const char *kstrdup_const(const char *s, gfp_t gfp);
+-extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
+-extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
++extern char *kstrdup(const char *s, gfp_t gfp) __malloc __nocapture(1);
++extern const char *kstrdup_const(const char *s, gfp_t gfp) __nocapture(1);
++extern char *kstrndup(const char *s, size_t len, gfp_t gfp) __nocapture(1);
++extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __nocapture(1);
+ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
+ extern void argv_free(char **argv);
+-extern bool sysfs_streq(const char *s1, const char *s2);
+-extern int kstrtobool(const char *s, bool *res);
++extern bool sysfs_streq(const char *s1, const char *s2) __nocapture();
++extern int kstrtobool(const char *s, bool *res) __nocapture(1);
+ static inline int strtobool(const char *s, bool *res)
+ {
+       return kstrtobool(s, res);
+@@ -137,8 +137,8 @@ static inline int strtobool(const char *s, bool *res)
+ int match_string(const char * const *array, size_t n, const char *string);
+ #ifdef CONFIG_BINARY_PRINTF
+-int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+-int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
++int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) __nocapture(3);
++int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) __nocapture(3);
+ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
+index 5c9c6cd..f16c5c9 100644
+--- a/include/linux/sunrpc/addr.h
++++ b/include/linux/sunrpc/addr.h
+@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+ {
+       switch (sap->sa_family) {
+       case AF_INET:
+-              return ntohs(((struct sockaddr_in *)sap)->sin_port);
++              return ntohs(((const struct sockaddr_in *)sap)->sin_port);
+       case AF_INET6:
+-              return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
++              return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
+       }
+       return 0;
+ }
+@@ -58,7 +58,7 @@ static inline bool rpc_cmp_addr4(const struct sockaddr *sap1,
+ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+                                   const struct sockaddr *src)
+ {
+-      const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
++      const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
+       struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+       dsin->sin_family = ssin->sin_family;
+@@ -177,7 +177,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
+       if (sa->sa_family != AF_INET6)
+               return 0;
+-      return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
++      return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
+ }
+ #endif /* _LINUX_SUNRPC_ADDR_H */
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 5c02b06..93e07c5 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -103,7 +103,7 @@ struct rpc_procinfo {
+       unsigned int            p_timer;        /* Which RTT timer to use */
+       u32                     p_statidx;      /* Which procedure to account */
+       const char *            p_name;         /* name of procedure */
+-};
++} __do_const;
+ #ifdef __KERNEL__
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 7321ae9..f37a11e 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -426,7 +426,7 @@ struct svc_procedure {
+       unsigned int            pc_count;       /* call count */
+       unsigned int            pc_cachetype;   /* cache info (NFS) */
+       unsigned int            pc_xdrressize;  /* maximum size of XDR reply */
+-};
++} __do_const;
+ /*
+  * Mode for mapping cpus to pools.
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index d6917b8..e05ca83 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -54,15 +54,15 @@ extern unsigned int svcrdma_max_requests;
+ extern unsigned int svcrdma_max_bc_requests;
+ extern unsigned int svcrdma_max_req_size;
+-extern atomic_t rdma_stat_recv;
+-extern atomic_t rdma_stat_read;
+-extern atomic_t rdma_stat_write;
+-extern atomic_t rdma_stat_sq_starve;
+-extern atomic_t rdma_stat_rq_starve;
+-extern atomic_t rdma_stat_rq_poll;
+-extern atomic_t rdma_stat_rq_prod;
+-extern atomic_t rdma_stat_sq_poll;
+-extern atomic_t rdma_stat_sq_prod;
++extern atomic_unchecked_t rdma_stat_recv;
++extern atomic_unchecked_t rdma_stat_read;
++extern atomic_unchecked_t rdma_stat_write;
++extern atomic_unchecked_t rdma_stat_sq_starve;
++extern atomic_unchecked_t rdma_stat_rq_starve;
++extern atomic_unchecked_t rdma_stat_rq_poll;
++extern atomic_unchecked_t rdma_stat_rq_prod;
++extern atomic_unchecked_t rdma_stat_sq_poll;
++extern atomic_unchecked_t rdma_stat_sq_prod;
+ /*
+  * Contexts are built when an RDMA request is created and are a
+diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
+index d039320..035edad 100644
+--- a/include/linux/sunrpc/svcauth.h
++++ b/include/linux/sunrpc/svcauth.h
+@@ -128,7 +128,7 @@ struct auth_ops {
+       int     (*release)(struct svc_rqst *rq);
+       void    (*domain_release)(struct auth_domain *);
+       int     (*set_client)(struct svc_rqst *rq);
+-};
++} __do_const;
+ #define       SVC_GARBAGE     1
+ #define       SVC_SYSERR      2
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index 5c3a5f3..84a8bef 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -165,7 +165,7 @@ static inline int is_write_migration_entry(swp_entry_t entry)
+ #ifdef CONFIG_MEMORY_FAILURE
+-extern atomic_long_t num_poisoned_pages __read_mostly;
++extern atomic_long_unchecked_t num_poisoned_pages __read_mostly;
+ /*
+  * Support for hardware poisoned pages
+@@ -188,22 +188,22 @@ static inline bool test_set_page_hwpoison(struct page *page)
+ static inline void num_poisoned_pages_inc(void)
+ {
+-      atomic_long_inc(&num_poisoned_pages);
++      atomic_long_inc_unchecked(&num_poisoned_pages);
+ }
+ static inline void num_poisoned_pages_dec(void)
+ {
+-      atomic_long_dec(&num_poisoned_pages);
++      atomic_long_dec_unchecked(&num_poisoned_pages);
+ }
+ static inline void num_poisoned_pages_add(long num)
+ {
+-      atomic_long_add(num, &num_poisoned_pages);
++      atomic_long_add_unchecked(num, &num_poisoned_pages);
+ }
+ static inline void num_poisoned_pages_sub(long num)
+ {
+-      atomic_long_sub(num, &num_poisoned_pages);
++      atomic_long_sub_unchecked(num, &num_poisoned_pages);
+ }
+ #else
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index d022390..80f9811 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -102,7 +102,14 @@ union bpf_attr;
+ #define __TYPE_IS_L(t)        (__same_type((t)0, 0L))
+ #define __TYPE_IS_UL(t)       (__same_type((t)0, 0UL))
+ #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
+-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
++#define __SC_TYPE(t) __typeof__(                              \
++      __builtin_choose_expr(                                  \
++              sizeof(t) > sizeof(int),                        \
++              (t) 0,                                          \
++              __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L)   \
++      ))
++#define __SC_LONG(t, a)       __SC_TYPE(t) a
++#define __SC_WRAP(t, a)       (__SC_TYPE(t)) a
+ #define __SC_CAST(t, a)       (t) a
+ #define __SC_ARGS(t, a)       a
+ #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
+@@ -192,17 +199,18 @@ extern struct trace_event_functions exit_syscall_print_funcs;
+ #define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
+ #define __SYSCALL_DEFINEx(x, name, ...)                                       \
+-      asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))       \
+-              __attribute__((alias(__stringify(SyS##name))));         \
+       static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));  \
+-      asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));      \
+-      asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))       \
++      static inline asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+       {                                                               \
+               long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__));  \
+               __MAP(x,__SC_TEST,__VA_ARGS__);                         \
+               __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__));       \
+               return ret;                                             \
+       }                                                               \
++      asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))       \
++      {                                                               \
++              return SyS##name(__MAP(x,__SC_WRAP,__VA_ARGS__));       \
++      }                                                               \
+       static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
+@@ -384,11 +392,11 @@ asmlinkage long sys_sync(void);
+ asmlinkage long sys_fsync(unsigned int fd);
+ asmlinkage long sys_fdatasync(unsigned int fd);
+ asmlinkage long sys_bdflush(int func, long data);
+-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
+-                              char __user *type, unsigned long flags,
++asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
++                              const char __user *type, unsigned long flags,
+                               void __user *data);
+-asmlinkage long sys_umount(char __user *name, int flags);
+-asmlinkage long sys_oldumount(char __user *name);
++asmlinkage long sys_umount(const char __user *name, int flags);
++asmlinkage long sys_oldumount(const char __user *name);
+ asmlinkage long sys_truncate(const char __user *path, long length);
+ asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+ asmlinkage long sys_stat(const char __user *filename,
+@@ -457,7 +465,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
+                       unsigned long prot, unsigned long pgoff,
+                       unsigned long flags);
+ asmlinkage long sys_msync(unsigned long start, size_t len, int flags);
+-asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice);
++asmlinkage long sys_fadvise64(int fd, loff_t offset, loff_t len, int advice);
+ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
+ asmlinkage long sys_munmap(unsigned long addr, size_t len);
+ asmlinkage long sys_mlock(unsigned long start, size_t len);
+@@ -610,7 +618,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
+ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
+ asmlinkage long sys_send(int, void __user *, size_t, unsigned);
+ asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
+-                              struct sockaddr __user *, int);
++                              struct sockaddr __user *, int) __intentional_overflow(0);
+ asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+ asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
+                            unsigned int vlen, unsigned flags);
+@@ -669,10 +677,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+ asmlinkage long sys_semget(key_t key, int nsems, int semflg);
+ asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
+-                              unsigned nsops);
++                              long nsops);
+ asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
+ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
+-                              unsigned nsops,
++                              long nsops,
+                               const struct timespec __user *timeout);
+ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
+ asmlinkage long sys_shmget(key_t key, size_t size, int flag);
+@@ -706,7 +714,7 @@ asmlinkage long sys_sysfs(int option,
+                               unsigned long arg1, unsigned long arg2);
+ asmlinkage long sys_syslog(int type, char __user *buf, int len);
+ asmlinkage long sys_uselib(const char __user *library);
+-asmlinkage long sys_ni_syscall(void);
++asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
+                          unsigned long data);
+@@ -885,7 +893,7 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
+                           const char __user *uargs);
+ asmlinkage long sys_getrandom(char __user *buf, size_t count,
+                             unsigned int flags);
+-asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
++asmlinkage long sys_bpf(int cmd, union bpf_attr __user *attr, unsigned int size);
+ asmlinkage long sys_execveat(int dfd, const char __user *filename,
+                       const char __user *const __user *argv,
+diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
+index 27b3b0b..e093dd9 100644
+--- a/include/linux/syscore_ops.h
++++ b/include/linux/syscore_ops.h
+@@ -16,7 +16,7 @@ struct syscore_ops {
+       int (*suspend)(void);
+       void (*resume)(void);
+       void (*shutdown)(void);
+-};
++} __do_const;
+ extern void register_syscore_ops(struct syscore_ops *ops);
+ extern void unregister_syscore_ops(struct syscore_ops *ops);
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index a4f7203..dcad65f 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -40,12 +40,18 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
+ extern int proc_dostring(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
++extern int proc_dostring_modpriv(struct ctl_table *, int,
++                       void __user *, size_t *, loff_t *);
+ extern int proc_dointvec(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
++extern int proc_dointvec_secure(struct ctl_table *, int,
++                              void __user *, size_t *, loff_t *);
+ extern int proc_douintvec(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_minmax(struct ctl_table *, int,
+                               void __user *, size_t *, loff_t *);
++extern int proc_dointvec_minmax_secure(struct ctl_table *, int,
++                                     void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_jiffies(struct ctl_table *, int,
+                                void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
+@@ -116,7 +122,8 @@ struct ctl_table
+       struct ctl_table_poll *poll;
+       void *extra1;
+       void *extra2;
+-};
++} __do_const __randomize_layout;
++typedef struct ctl_table __no_const ctl_table_no_const;
+ struct ctl_node {
+       struct rb_node node;
+diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
+index c6f0f0d..e663567 100644
+--- a/include/linux/sysfs.h
++++ b/include/linux/sysfs.h
+@@ -34,7 +34,8 @@ struct attribute {
+       struct lock_class_key   *key;
+       struct lock_class_key   skey;
+ #endif
+-};
++} __do_const;
++typedef struct attribute __no_const attribute_no_const;
+ /**
+  *    sysfs_attr_init - initialize a dynamically allocated sysfs attribute
+@@ -88,7 +89,8 @@ struct attribute_group {
+                                                 struct bin_attribute *, int);
+       struct attribute        **attrs;
+       struct bin_attribute    **bin_attrs;
+-};
++} __do_const;
++typedef struct attribute_group __no_const attribute_group_no_const;
+ /**
+  * Use these macros to make defining attributes easier. See include/linux/device.h
+@@ -162,7 +164,8 @@ struct bin_attribute {
+                        char *, loff_t, size_t);
+       int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
+                   struct vm_area_struct *vma);
+-};
++} __do_const;
++typedef struct bin_attribute __no_const bin_attribute_no_const;
+ /**
+  *    sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
+@@ -512,7 +515,7 @@ static inline void sysfs_notify_dirent(struct kernfs_node *kn)
+ }
+ static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
+-                                                 const unsigned char *name)
++                                                 const char *name)
+ {
+       return kernfs_find_and_get(parent, name);
+ }
+diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
+index 387fa7d..3fcde6b 100644
+--- a/include/linux/sysrq.h
++++ b/include/linux/sysrq.h
+@@ -16,6 +16,7 @@
+ #include <linux/errno.h>
+ #include <linux/types.h>
++#include <linux/compiler.h>
+ /* Possible values of bitmask for enabling sysrq functions */
+ /* 0x0001 is reserved for enable everything */
+@@ -33,7 +34,7 @@ struct sysrq_key_op {
+       char *help_msg;
+       char *action_msg;
+       int enable_mask;
+-};
++} __do_const;
+ #ifdef CONFIG_MAGIC_SYSRQ
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 7be9b12..66bf0a8 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -68,13 +68,13 @@ struct tcp_fastopen_cookie {
+ /* This defines a selective acknowledgement block. */
+ struct tcp_sack_block_wire {
+-      __be32  start_seq;
+-      __be32  end_seq;
++      __be32  start_seq __intentional_overflow(-1);
++      __be32  end_seq __intentional_overflow(-1);
+ };
+ struct tcp_sack_block {
+-      u32     start_seq;
+-      u32     end_seq;
++      u32     start_seq __intentional_overflow(-1);
++      u32     end_seq __intentional_overflow(-1);
+ };
+ /*These are used to set the sack_ok field in struct tcp_options_received */
+@@ -162,7 +162,7 @@ struct tcp_sock {
+                                * total number of data segments in.
+                                */
+       u32     rcv_nxt;        /* What we want to receive next         */
+-      u32     copied_seq;     /* Head of yet unread data              */
++      u32     copied_seq __intentional_overflow(-1);  /* Head of yet unread data              */
+       u32     rcv_wup;        /* rcv_nxt on last window update sent   */
+       u32     snd_nxt;        /* Next sequence we send                */
+       u32     segs_out;       /* RFC4898 tcpEStatsPerfSegsOut
+@@ -270,7 +270,7 @@ struct tcp_sock {
+       u32     delivered;      /* Total data packets delivered incl. rexmits */
+       u32     rcv_wnd;        /* Current receiver window              */
+-      u32     write_seq;      /* Tail(+1) of data held in tcp send buffer */
++      u32     write_seq __intentional_overflow(-1);   /* Tail(+1) of data held in tcp send buffer */
+       u32     notsent_lowat;  /* TCP_NOTSENT_LOWAT */
+       u32     pushed_seq;     /* Last pushed seq, required to talk to windows */
+       u32     lost_out;       /* Lost packets                 */
+@@ -311,7 +311,7 @@ struct tcp_sock {
+       int     undo_retrans;   /* number of undoable retransmissions. */
+       u32     total_retrans;  /* Total retransmits for entire connection */
+-      u32     urg_seq;        /* Seq of received urgent pointer */
++      u32     urg_seq __intentional_overflow(-1);     /* Seq of received urgent pointer */
+       unsigned int            keepalive_time;   /* time before keep alive takes place */
+       unsigned int            keepalive_intvl;  /* time interval between keep alive probes */
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index 2b5b10e..37b4c2c 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -50,6 +50,13 @@ struct restart_block {
+ extern long do_no_restart_syscall(struct restart_block *parm);
++enum {
++      BAD_STACK = -1,
++      NOT_STACK = 0,
++      GOOD_STACK,
++      GOOD_FRAME,
++};
++
+ #include <linux/bitops.h>
+ #include <asm/thread_info.h>
+@@ -106,11 +113,11 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
+ #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+ #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+-static inline int arch_within_stack_frames(const void * const stack,
+-                                         const void * const stackend,
+-                                         const void *obj, unsigned long len)
++static inline int arch_within_stack_frames(unsigned long stack,
++                                         unsigned long stackend,
++                                         unsigned long obj, unsigned long len)
+ {
+-      return 0;
++      return GOOD_STACK;
+ }
+ #endif
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 40144f3..610732a 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -225,7 +225,7 @@ struct tty_port {
+       const struct tty_port_operations *ops;  /* Port operations */
+       spinlock_t              lock;           /* Lock protecting tty field */
+       int                     blocked_open;   /* Waiting to open */
+-      int                     count;          /* Usage count */
++      atomic_t                count;          /* Usage count */
+       wait_queue_head_t       open_wait;      /* Open waiters */
+       wait_queue_head_t       delta_msr_wait; /* Modem status change */
+       unsigned long           flags;          /* User TTY flags ASYNC_ */
+@@ -326,7 +326,7 @@ struct tty_struct {
+       /* If the tty has a pending do_SAK, queue it here - akpm */
+       struct work_struct SAK_work;
+       struct tty_port *port;
+-};
++} __randomize_layout;
+ /* Each of a tty's open files has private_data pointing to tty_file_private */
+ struct tty_file_private {
+@@ -646,7 +646,7 @@ extern int tty_port_open(struct tty_port *port,
+                               struct tty_struct *tty, struct file *filp);
+ static inline int tty_port_users(struct tty_port *port)
+ {
+-      return port->count + port->blocked_open;
++      return atomic_read(&port->count) + port->blocked_open;
+ }
+ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index b742b5e..76dc1fa 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -291,7 +291,7 @@ struct tty_operations {
+       void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
+ #endif
+       const struct file_operations *proc_fops;
+-};
++} __do_const __randomize_layout;
+ struct tty_driver {
+       int     magic;          /* magic number for this structure */
+@@ -325,7 +325,7 @@ struct tty_driver {
+       const struct tty_operations *ops;
+       struct list_head tty_drivers;
+-};
++} __randomize_layout;
+ extern struct list_head tty_drivers;
+diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
+index 3971cf0..7704c48 100644
+--- a/include/linux/tty_ldisc.h
++++ b/include/linux/tty_ldisc.h
+@@ -202,7 +202,7 @@ struct tty_ldisc_ops {
+       struct  module *owner;
+-      int refcount;
++      atomic_t refcount;
+ };
+ struct tty_ldisc {
+diff --git a/include/linux/types.h b/include/linux/types.h
+index baf7183..161f20f 100644
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -159,8 +159,10 @@ typedef unsigned __bitwise__ fmode_t;
+ #ifdef CONFIG_PHYS_ADDR_T_64BIT
+ typedef u64 phys_addr_t;
++#define RESOURCE_SIZE_MAX ULLONG_MAX
+ #else
+ typedef u32 phys_addr_t;
++#define RESOURCE_SIZE_MAX ULONG_MAX
+ #endif
+ typedef phys_addr_t resource_size_t;
+@@ -175,10 +177,26 @@ typedef struct {
+       int counter;
+ } atomic_t;
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      int counter;
++} atomic_unchecked_t;
++#else
++typedef atomic_t atomic_unchecked_t;
++#endif
++
+ #ifdef CONFIG_64BIT
+ typedef struct {
+       long counter;
+ } atomic64_t;
++
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++      long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
+ #endif
+ struct list_head {
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index f30c187..d2b4ce2 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -109,7 +109,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+  * Returns 0 on success, or -EFAULT.
+  */
+ #define probe_kernel_address(addr, retval)            \
+-      probe_kernel_read(&retval, addr, sizeof(retval))
++      probe_kernel_read(&(retval), addr, sizeof(retval))
+ #ifndef user_access_begin
+ #define user_access_begin() do { } while (0)
+diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
+index 25e9d92..1b34fff 100644
+--- a/include/linux/uidgid.h
++++ b/include/linux/uidgid.h
+@@ -187,4 +187,10 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
+ #endif /* CONFIG_USER_NS */
++#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
++#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
++#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
++#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
++#define gr_is_global_nonroot_gid(x) (!gid_eq((x), GLOBAL_ROOT_GID))
++
+ #endif /* _LINUX_UIDGID_H */
+diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
+index 32c0e83..671eb35 100644
+--- a/include/linux/uio_driver.h
++++ b/include/linux/uio_driver.h
+@@ -67,7 +67,7 @@ struct uio_device {
+         struct module           *owner;
+         struct device           *dev;
+         int                     minor;
+-        atomic_t                event;
++        atomic_unchecked_t      event;
+         struct fasync_struct    *async_queue;
+         wait_queue_head_t       wait;
+         struct uio_info         *info;
+diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
+index 33383ca..44211d6 100644
+--- a/include/linux/unaligned/access_ok.h
++++ b/include/linux/unaligned/access_ok.h
+@@ -4,34 +4,34 @@
+ #include <linux/kernel.h>
+ #include <asm/byteorder.h>
+-static __always_inline u16 get_unaligned_le16(const void *p)
++static __always_inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
+ {
+-      return le16_to_cpup((__le16 *)p);
++      return le16_to_cpup((const __le16 *)p);
+ }
+-static __always_inline u32 get_unaligned_le32(const void *p)
++static __always_inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
+ {
+-      return le32_to_cpup((__le32 *)p);
++      return le32_to_cpup((const __le32 *)p);
+ }
+-static __always_inline u64 get_unaligned_le64(const void *p)
++static __always_inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
+ {
+-      return le64_to_cpup((__le64 *)p);
++      return le64_to_cpup((const __le64 *)p);
+ }
+-static __always_inline u16 get_unaligned_be16(const void *p)
++static __always_inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
+ {
+-      return be16_to_cpup((__be16 *)p);
++      return be16_to_cpup((const __be16 *)p);
+ }
+-static __always_inline u32 get_unaligned_be32(const void *p)
++static __always_inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
+ {
+-      return be32_to_cpup((__be32 *)p);
++      return be32_to_cpup((const __be32 *)p);
+ }
+-static __always_inline u64 get_unaligned_be64(const void *p)
++static __always_inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
+ {
+-      return be64_to_cpup((__be64 *)p);
++      return be64_to_cpup((const __be64 *)p);
+ }
+ static __always_inline void put_unaligned_le16(u16 val, void *p)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index eba1f10..94c966f 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -370,7 +370,7 @@ struct usb_bus {
+                                        * with the URB_SHORT_NOT_OK flag set.
+                                        */
+       unsigned no_sg_constraint:1;    /* no sg constraint */
+-      unsigned sg_tablesize;          /* 0 or largest number of sg list entries */
++      unsigned short sg_tablesize;    /* 0 or largest number of sg list entries */
+       int devnum_next;                /* Next open device number in
+                                        * round-robin allocation */
+@@ -599,7 +599,7 @@ struct usb_device {
+       int maxchild;
+       u32 quirks;
+-      atomic_t urbnum;
++      atomic_unchecked_t urbnum;
+       unsigned long active_duration;
+@@ -1793,10 +1793,10 @@ void usb_sg_wait(struct usb_sg_request *io);
+ /* NOTE:  these are not the standard USB_ENDPOINT_XFER_* values!! */
+ /* (yet ... they're the values used by usbfs) */
+-#define PIPE_ISOCHRONOUS              0
+-#define PIPE_INTERRUPT                        1
+-#define PIPE_CONTROL                  2
+-#define PIPE_BULK                     3
++#define PIPE_ISOCHRONOUS              0U
++#define PIPE_INTERRUPT                        1U
++#define PIPE_CONTROL                  2U
++#define PIPE_BULK                     3U
+ #define usb_pipein(pipe)      ((pipe) & USB_DIR_IN)
+ #define usb_pipeout(pipe)     (!usb_pipein(pipe))
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 66fc137..9602956 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -24,6 +24,7 @@
+ #include <linux/rwsem.h>
+ #include <linux/interrupt.h>
+ #include <linux/idr.h>
++#include <scsi/scsi_host.h>
+ #define MAX_TOPO_LEVEL                6
+diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
+index 00a47d0..ed482765 100644
+--- a/include/linux/usb/renesas_usbhs.h
++++ b/include/linux/usb/renesas_usbhs.h
+@@ -39,7 +39,7 @@ enum {
+  */
+ struct renesas_usbhs_driver_callback {
+       int (*notify_hotplug)(struct platform_device *pdev);
+-};
++} __no_const;
+ /*
+  * callback functions for platform
+diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
+index 9217169..61e5eeb 100644
+--- a/include/linux/user_namespace.h
++++ b/include/linux/user_namespace.h
+@@ -39,7 +39,7 @@ struct user_namespace {
+       struct key              *persistent_keyring_register;
+       struct rw_semaphore     persistent_keyring_register_sem;
+ #endif
+-};
++} __randomize_layout;
+ extern struct user_namespace init_user_ns;
+diff --git a/include/linux/utsname.h b/include/linux/utsname.h
+index 5093f58..c103e58 100644
+--- a/include/linux/utsname.h
++++ b/include/linux/utsname.h
+@@ -25,7 +25,7 @@ struct uts_namespace {
+       struct new_utsname name;
+       struct user_namespace *user_ns;
+       struct ns_common ns;
+-};
++} __randomize_layout;
+ extern struct uts_namespace init_uts_ns;
+ #ifdef CONFIG_UTS_NS
+diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
+index 6f8fbcf..4efc177 100644
+--- a/include/linux/vermagic.h
++++ b/include/linux/vermagic.h
+@@ -25,9 +25,42 @@
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
++#ifdef CONFIG_PAX_REFCOUNT
++#define MODULE_PAX_REFCOUNT "REFCOUNT "
++#else
++#define MODULE_PAX_REFCOUNT ""
++#endif
++
++#ifdef CONSTIFY_PLUGIN
++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
++#else
++#define MODULE_CONSTIFY_PLUGIN ""
++#endif
++
++#ifdef STACKLEAK_PLUGIN
++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
++#else
++#define MODULE_STACKLEAK_PLUGIN ""
++#endif
++
++#ifdef RANDSTRUCT_PLUGIN
++#include <generated/randomize_layout_hash.h>
++#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
++#else
++#define MODULE_RANDSTRUCT_PLUGIN
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define MODULE_GRSEC "GRSEC "
++#else
++#define MODULE_GRSEC ""
++#endif
++
+ #define VERMAGIC_STRING                                               \
+       UTS_RELEASE " "                                                 \
+       MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT                     \
+       MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS       \
+-      MODULE_ARCH_VERMAGIC
++      MODULE_ARCH_VERMAGIC                                            \
++      MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
++      MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
+diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
+index 960bedb..1616043 100644
+--- a/include/linux/vga_switcheroo.h
++++ b/include/linux/vga_switcheroo.h
+@@ -170,9 +170,9 @@ enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
+ void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
+-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
++int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
+ void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
+-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
++int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
+ #else
+ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
+@@ -194,9 +194,9 @@ static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct p
+ static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
+-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
++static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
+ static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
+-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
++static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
+ #endif
+ #endif /* _LINUX_VGA_SWITCHEROO_H_ */
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index 3d9d786..b7e5717 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -19,6 +19,14 @@ struct notifier_block;              /* in notifier.h */
+ #define VM_UNINITIALIZED      0x00000020      /* vm_struct is not fully initialized */
+ #define VM_NO_GUARD           0x00000040      /* don't add guard page */
+ #define VM_KASAN              0x00000080      /* has allocated kasan shadow memory */
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++#define VM_KERNEXEC           0x00000100      /* allocate from executable kernel memory range */
++#endif
++
++#define VM_USERCOPY           0x00000200      /* allocation intended for copies to userland */
++
++
+ /* bits [20..32] reserved for arch specific ioremap internals */
+ /*
+@@ -67,7 +75,11 @@ static inline void vmalloc_init(void)
+ }
+ #endif
++#if defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) && defined(CONFIG_X86_64)
++extern void *vzalloc_irq_stack(void);
++#endif
+ extern void *vmalloc(unsigned long size);
++extern void *vmalloc_usercopy(unsigned long size);
+ extern void *vzalloc(unsigned long size);
+ extern void *vmalloc_user(unsigned long size);
+ extern void *vmalloc_node(unsigned long size, int node);
+@@ -87,6 +99,10 @@ extern void *vmap(struct page **pages, unsigned int count,
+                       unsigned long flags, pgprot_t prot);
+ extern void vunmap(const void *addr);
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++extern void unmap_process_stacks(struct task_struct *task);
++#endif
++
+ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
+                                      unsigned long uaddr, void *kaddr,
+                                      unsigned long size);
+@@ -151,7 +167,7 @@ extern void free_vm_area(struct vm_struct *area);
+ /* for /dev/kmem */
+ extern long vread(char *buf, char *addr, unsigned long count);
+-extern long vwrite(char *buf, char *addr, unsigned long count);
++extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
+ /*
+  *    Internals.  Dont't use..
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index 6137719..f925b2f 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -107,26 +107,26 @@ static inline void vm_events_fold_cpu(int cpu)
+ /*
+  * Zone and node-based page accounting with per cpu differentials.
+  */
+-extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
+-extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
++extern atomic_long_unchecked_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
++extern atomic_long_unchecked_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
+ static inline void zone_page_state_add(long x, struct zone *zone,
+                                enum zone_stat_item item)
+ {
+-      atomic_long_add(x, &zone->vm_stat[item]);
+-      atomic_long_add(x, &vm_zone_stat[item]);
++      atomic_long_add_unchecked(x, &zone->vm_stat[item]);
++      atomic_long_add_unchecked(x, &vm_zone_stat[item]);
+ }
+ static inline void node_page_state_add(long x, struct pglist_data *pgdat,
+                                enum node_stat_item item)
+ {
+-      atomic_long_add(x, &pgdat->vm_stat[item]);
+-      atomic_long_add(x, &vm_node_stat[item]);
++      atomic_long_add_unchecked(x, &pgdat->vm_stat[item]);
++      atomic_long_add_unchecked(x, &vm_node_stat[item]);
+ }
+ static inline unsigned long global_page_state(enum zone_stat_item item)
+ {
+-      long x = atomic_long_read(&vm_zone_stat[item]);
++      long x = atomic_long_read_unchecked(&vm_zone_stat[item]);
+ #ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+@@ -136,7 +136,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
+ static inline unsigned long global_node_page_state(enum node_stat_item item)
+ {
+-      long x = atomic_long_read(&vm_node_stat[item]);
++      long x = atomic_long_read_unchecked(&vm_node_stat[item]);
+ #ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+@@ -144,10 +144,10 @@ static inline unsigned long global_node_page_state(enum node_stat_item item)
+       return x;
+ }
+-static inline unsigned long zone_page_state(struct zone *zone,
++static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
+                                       enum zone_stat_item item)
+ {
+-      long x = atomic_long_read(&zone->vm_stat[item]);
++      long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+ #ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+@@ -164,7 +164,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
+ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
+ {
+-      long x = atomic_long_read(&zone->vm_stat[item]);
++      long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+ #ifdef CONFIG_SMP
+       int cpu;
+@@ -180,7 +180,7 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
+                                       enum node_stat_item item)
+ {
+-      long x = atomic_long_read(&pgdat->vm_stat[item]);
++      long x = atomic_long_read_unchecked(&pgdat->vm_stat[item]);
+ #ifdef CONFIG_SMP
+       int cpu;
+@@ -267,26 +267,26 @@ static inline void __mod_node_page_state(struct pglist_data *pgdat,
+ static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+-      atomic_long_inc(&zone->vm_stat[item]);
+-      atomic_long_inc(&vm_zone_stat[item]);
++      atomic_long_inc_unchecked(&zone->vm_stat[item]);
++      atomic_long_inc_unchecked(&vm_zone_stat[item]);
+ }
+ static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ {
+-      atomic_long_inc(&pgdat->vm_stat[item]);
+-      atomic_long_inc(&vm_node_stat[item]);
++      atomic_long_inc_unchecked(&pgdat->vm_stat[item]);
++      atomic_long_inc_unchecked(&vm_node_stat[item]);
+ }
+ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+-      atomic_long_dec(&zone->vm_stat[item]);
+-      atomic_long_dec(&vm_zone_stat[item]);
++      atomic_long_dec_unchecked(&zone->vm_stat[item]);
++      atomic_long_dec_unchecked(&vm_zone_stat[item]);
+ }
+ static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ {
+-      atomic_long_dec(&pgdat->vm_stat[item]);
+-      atomic_long_dec(&vm_node_stat[item]);
++      atomic_long_dec_unchecked(&pgdat->vm_stat[item]);
++      atomic_long_dec_unchecked(&vm_node_stat[item]);
+ }
+ static inline void __inc_zone_page_state(struct page *page,
+diff --git a/include/linux/writeback.h b/include/linux/writeback.h
+index fc1e16c..73b1d36 100644
+--- a/include/linux/writeback.h
++++ b/include/linux/writeback.h
+@@ -278,8 +278,9 @@ static inline void inode_detach_wb(struct inode *inode)
+ }
+ static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
++                                             struct inode *inode) __releases(&inode->i_lock);
++static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+                                              struct inode *inode)
+-      __releases(&inode->i_lock)
+ {
+       spin_unlock(&inode->i_lock);
+ }
+diff --git a/include/linux/xattr.h b/include/linux/xattr.h
+index 94079ba..ae4c218 100644
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -35,7 +35,7 @@ struct xattr_handler {
+       int (*set)(const struct xattr_handler *, struct dentry *dentry,
+                  struct inode *inode, const char *name, const void *buffer,
+                  size_t size, int flags);
+-};
++} __do_const;
+ const char *xattr_full_name(const struct xattr_handler *, const char *);
+@@ -46,6 +46,9 @@ struct xattr {
+ };
+ ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ssize_t pax_getxattr(struct dentry *, void *, size_t);
++#endif
+ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
+diff --git a/include/linux/zlib.h b/include/linux/zlib.h
+index 92dbbd3..13ab0b3 100644
+--- a/include/linux/zlib.h
++++ b/include/linux/zlib.h
+@@ -31,6 +31,7 @@
+ #define _ZLIB_H
+ #include <linux/zconf.h>
++#include <linux/compiler.h>
+ /* zlib deflate based on ZLIB_VERSION "1.1.3" */
+ /* zlib inflate based on ZLIB_VERSION "1.2.3" */
+@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
+                         /* basic functions */
+-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
++extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
+ /*
+    Returns the number of bytes that needs to be allocated for a per-
+    stream workspace with the specified parameters.  A pointer to this
+diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
+index a122b1b..bcb7940 100644
+--- a/include/media/v4l2-dev.h
++++ b/include/media/v4l2-dev.h
+@@ -160,7 +160,7 @@ struct v4l2_file_operations {
+       int (*mmap) (struct file *, struct vm_area_struct *);
+       int (*open) (struct file *);
+       int (*release) (struct file *);
+-};
++} __do_const;
+ /*
+  * Newer version of video_device, handled by videodev2.c
+diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
+index a9d6aa4..124a822 100644
+--- a/include/media/v4l2-device.h
++++ b/include/media/v4l2-device.h
+@@ -139,7 +139,7 @@ int __must_check v4l2_device_register(struct device *dev,
+  * then the name will be set to cx18-0 since cx180 would look really odd.
+  */
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+-                       atomic_t *instance);
++                       atomic_unchecked_t *instance);
+ /**
+  * v4l2_device_disconnect - Change V4L2 device state to disconnected.
+diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
+index 5122b5e..598b440 100644
+--- a/include/net/9p/transport.h
++++ b/include/net/9p/transport.h
+@@ -62,7 +62,7 @@ struct p9_trans_module {
+       int (*cancelled)(struct p9_client *, struct p9_req_t *req);
+       int (*zc_request)(struct p9_client *, struct p9_req_t *,
+                         struct iov_iter *, struct iov_iter *, int , int, int);
+-};
++} __do_const;
+ void v9fs_register_trans(struct p9_trans_module *m);
+ void v9fs_unregister_trans(struct p9_trans_module *m);
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index fd60ecc..64e2a1e 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -36,7 +36,7 @@ struct unix_skb_parms {
+       u32                     secid;          /* Security ID          */
+ #endif
+       u32                     consumed;
+-};
++} __randomize_layout;
+ #define UNIXCB(skb)   (*(struct unix_skb_parms *)&((skb)->cb))
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index 5ee3c68..54f883a 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -619,7 +619,7 @@ struct l2cap_ops {
+       struct sk_buff          *(*alloc_skb) (struct l2cap_chan *chan,
+                                              unsigned long hdr_len,
+                                              unsigned long len, int nb);
+-};
++} __do_const;
+ struct l2cap_conn {
+       struct hci_conn         *hcon;
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 6360c25..6eb51ef 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -707,7 +707,7 @@ extern struct rtnl_link_ops bond_link_ops;
+ static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+ {
+-      atomic_long_inc(&dev->tx_dropped);
++      atomic_long_inc_unchecked(&dev->tx_dropped);
+       dev_kfree_skb_any(skb);
+ }
+diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
+index f2ae33d..c457cf0 100644
+--- a/include/net/caif/cfctrl.h
++++ b/include/net/caif/cfctrl.h
+@@ -52,7 +52,7 @@ struct cfctrl_rsp {
+       void (*radioset_rsp)(void);
+       void (*reject_rsp)(struct cflayer *layer, u8 linkid,
+                               struct cflayer *client_layer);
+-};
++} __no_const;
+ /* Link Setup Parameters for CAIF-Links. */
+ struct cfctrl_link_param {
+@@ -101,8 +101,8 @@ struct cfctrl_request_info {
+ struct cfctrl {
+       struct cfsrvl serv;
+       struct cfctrl_rsp res;
+-      atomic_t req_seq_no;
+-      atomic_t rsp_seq_no;
++      atomic_unchecked_t req_seq_no;
++      atomic_unchecked_t rsp_seq_no;
+       struct list_head list;
+       /* Protects from simultaneous access to first_req list */
+       spinlock_t info_list_lock;
+diff --git a/include/net/cfg80211-wext.h b/include/net/cfg80211-wext.h
+index 25baddc..f9a1374 100644
+--- a/include/net/cfg80211-wext.h
++++ b/include/net/cfg80211-wext.h
+@@ -22,34 +22,34 @@
+  */
+ int cfg80211_wext_giwname(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        char *name, char *extra);
++                        union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
+-                        u32 *mode, char *extra);
++                        union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info,
+-                        u32 *mode, char *extra);
++                        union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_siwscan(struct net_device *dev,
+                         struct iw_request_info *info,
+                         union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_giwscan(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_point *data, char *extra);
++                        union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_giwrange(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_point *data, char *extra);
++                         union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_siwrts(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *rts, char *extra);
++                       union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_giwrts(struct net_device *dev,
+                        struct iw_request_info *info,
+-                       struct iw_param *rts, char *extra);
++                       union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_siwfrag(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *frag, char *extra);
++                        union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_giwfrag(struct net_device *dev,
+                         struct iw_request_info *info,
+-                        struct iw_param *frag, char *extra);
++                        union iwreq_data *wrqu, char *extra);
+ int cfg80211_wext_giwretry(struct net_device *dev,
+                          struct iw_request_info *info,
+-                         struct iw_param *retry, char *extra);
++                         union iwreq_data *wrqu, char *extra);
+ #endif /* __NET_CFG80211_WEXT_H */
+diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
+index 795ca40..97964b2 100644
+--- a/include/net/cfg802154.h
++++ b/include/net/cfg802154.h
+@@ -354,7 +354,7 @@ struct wpan_dev {
+       /* MAC BSN field */
+       atomic_t bsn;
+       /* MAC DSN field */
+-      atomic_t dsn;
++      atomic_unchecked_t dsn;
+       u8 min_be;
+       u8 max_be;
+diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
+index 456e4a6..32ce9c4 100644
+--- a/include/net/fib_rules.h
++++ b/include/net/fib_rules.h
+@@ -33,8 +33,12 @@ struct fib_rule {
+       struct rcu_head         rcu;
+ };
++typedef struct rt6_info *(*pol_lookup_t)(struct net *,
++                                       struct fib6_table *,
++                                       struct flowi6 *, int);
++
+ struct fib_lookup_arg {
+-      void                    *lookup_ptr;
++      pol_lookup_t            lookup_ptr;
+       void                    *result;
+       struct fib_rule         *rule;
+       u32                     table;
+diff --git a/include/net/flow.h b/include/net/flow.h
+index d47ef4b..ab39dc5 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -243,7 +243,7 @@ void flow_cache_fini(struct net *net);
+ void flow_cache_flush(struct net *net);
+ void flow_cache_flush_deferred(struct net *net);
+-extern atomic_t flow_cache_genid;
++extern atomic_unchecked_t flow_cache_genid;
+ __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index 8d4608c..460372d 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -128,7 +128,7 @@ struct genl_ops {
+       u8                      cmd;
+       u8                      internal_flags;
+       u8                      flags;
+-};
++} __do_const;
+ int __genl_register_family(struct genl_family *family);
+diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
+index d15214d..f6de1b4 100644
+--- a/include/net/gro_cells.h
++++ b/include/net/gro_cells.h
+@@ -25,7 +25,7 @@ static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *sk
+       cell = this_cpu_ptr(gcells->cells);
+       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+-              atomic_long_inc(&dev->rx_dropped);
++              atomic_long_inc_unchecked(&dev->rx_dropped);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 49dcad4..6d2c708 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -65,7 +65,7 @@ struct inet_connection_sock_af_ops {
+       int         (*bind_conflict)(const struct sock *sk,
+                                    const struct inet_bind_bucket *tb, bool relax);
+       void        (*mtu_reduced)(struct sock *sk);
+-};
++} __do_const;
+ /** inet_connection_sock - INET connection oriented sock
+  *
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 236a810..0dae469 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -44,7 +44,7 @@
+ struct ip_options {
+       __be32          faddr;
+       __be32          nexthop;
+-      unsigned char   optlen;
++      unsigned char   optlen __intentional_overflow(0);
+       unsigned char   srr;
+       unsigned char   rr;
+       unsigned char   ts;
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 235c781..160d4a3 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -52,7 +52,7 @@ struct inet_peer {
+        */
+       union {
+               struct {
+-                      atomic_t                        rid;            /* Frag reception counter */
++                      atomic_unchecked_t              rid;            /* Frag reception counter */
+               };
+               struct rcu_head         rcu;
+               struct inet_peer        *gc_next;
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 9742b92..f47d922 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -326,7 +326,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+       return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
+ }
+-u32 ip_idents_reserve(u32 hash, int segs);
++u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
+ void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
+ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index fb961a5..754f4432 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -248,10 +248,6 @@ struct fib6_table {
+ #define RT6_TABLE_LOCAL               RT6_TABLE_MAIN
+ #endif
+-typedef struct rt6_info *(*pol_lookup_t)(struct net *,
+-                                       struct fib6_table *,
+-                                       struct flowi6 *, int);
+-
+ /*
+  *    exported functions
+  */
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 7d4a72e..f4ec499 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -175,7 +175,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+ #define FIB_RES_SADDR(net, res)                               \
+       ((FIB_RES_NH(res).nh_saddr_genid ==             \
+-        atomic_read(&(net)->ipv4.dev_addr_genid)) ?   \
++        atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
+        FIB_RES_NH(res).nh_saddr :                     \
+        fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
+ #define FIB_RES_GW(res)                       (FIB_RES_NH(res).nh_gw)
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index cd6018a..996671f 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -543,7 +543,7 @@ struct ip_vs_conn {
+       struct ip_vs_conn       *control;       /* Master control connection */
+       atomic_t                n_control;      /* Number of controlled ones */
+       struct ip_vs_dest       *dest;          /* real server */
+-      atomic_t                in_pkts;        /* incoming packet counter */
++      atomic_unchecked_t      in_pkts;        /* incoming packet counter */
+       /* Packet transmitter for different forwarding methods.  If it
+        * mangles the packet, it must return NF_DROP or better NF_STOLEN,
+@@ -664,7 +664,7 @@ struct ip_vs_dest {
+       __be16                  port;           /* port number of the server */
+       union nf_inet_addr      addr;           /* IP address of the server */
+       volatile unsigned int   flags;          /* dest status flags */
+-      atomic_t                conn_flags;     /* flags to copy to conn */
++      atomic_unchecked_t      conn_flags;     /* flags to copy to conn */
+       atomic_t                weight;         /* server weight */
+       atomic_t                refcnt;         /* reference counter */
+@@ -931,11 +931,11 @@ struct netns_ipvs {
+       /* ip_vs_lblc */
+       int                     sysctl_lblc_expiration;
+       struct ctl_table_header *lblc_ctl_header;
+-      struct ctl_table        *lblc_ctl_table;
++      ctl_table_no_const      *lblc_ctl_table;
+       /* ip_vs_lblcr */
+       int                     sysctl_lblcr_expiration;
+       struct ctl_table_header *lblcr_ctl_header;
+-      struct ctl_table        *lblcr_ctl_table;
++      ctl_table_no_const      *lblcr_ctl_table;
+       /* ip_vs_est */
+       struct list_head        est_list;       /* estimator list */
+       spinlock_t              est_lock;
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 8fed1cd..3ac5db9 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -788,7 +788,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only lower 20 bits are relevant.
+        */
+-      rol32(hash, 16);
++      hash = rol32(hash, 16);
+       flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
+index 8d4f588..2e37ad2 100644
+--- a/include/net/irda/ircomm_tty.h
++++ b/include/net/irda/ircomm_tty.h
+@@ -33,6 +33,7 @@
+ #include <linux/termios.h>
+ #include <linux/timer.h>
+ #include <linux/tty.h>                /* struct tty_struct */
++#include <asm/local.h>
+ #include <net/irda/irias_object.h>
+ #include <net/irda/ircomm_core.h>
+diff --git a/include/net/irda/irias_object.h b/include/net/irda/irias_object.h
+index 83f7808..a925cf8 100644
+--- a/include/net/irda/irias_object.h
++++ b/include/net/irda/irias_object.h
+@@ -83,7 +83,7 @@ void irias_insert_object(struct ias_object *obj);
+ int  irias_delete_object(struct ias_object *obj);
+ int  irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib,
+                        int cleanobject);
+-void __irias_delete_object(struct ias_object *obj);
++void __irias_delete_object(void *_obj);
+ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
+                             int user);
+diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
+index f132924..f80b01d 100644
+--- a/include/net/irda/irlmp.h
++++ b/include/net/irda/irlmp.h
+@@ -194,6 +194,7 @@ struct irlmp_cb {
+ /* Prototype declarations */
+ int  irlmp_init(void);
+ void irlmp_cleanup(void);
++void irlmp_kfree(void *arg);
+ struct lsap_cb *irlmp_open_lsap(__u8 slsap, notify_t *notify, __u8 pid);
+ void irlmp_close_lsap( struct lsap_cb *self);
+diff --git a/include/net/irda/irlmp_event.h b/include/net/irda/irlmp_event.h
+index 9e4ec17..c3247bb 100644
+--- a/include/net/irda/irlmp_event.h
++++ b/include/net/irda/irlmp_event.h
+@@ -82,9 +82,9 @@ typedef enum {
+ extern const char *const irlmp_state[];
+ extern const char *const irlsap_state[];
+-void irlmp_watchdog_timer_expired(void *data);
+-void irlmp_discovery_timer_expired(void *data);
+-void irlmp_idle_timer_expired(void *data);
++void irlmp_watchdog_timer_expired(unsigned long data);
++void irlmp_discovery_timer_expired(unsigned long data);
++void irlmp_idle_timer_expired(unsigned long data);
+ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event, 
+                       struct sk_buff *skb);
+diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h
+index cb2615c..8223ae7 100644
+--- a/include/net/irda/timer.h
++++ b/include/net/irda/timer.h
+@@ -72,12 +72,10 @@ struct lap_cb;
+ #define WATCHDOG_TIMEOUT        (20*HZ)       /* 20 sec */
+-typedef void (*TIMER_CALLBACK)(void *);
+-
+ static inline void irda_start_timer(struct timer_list *ptimer, int timeout, 
+-                                  void* data, TIMER_CALLBACK callback)
++                                  void* data, void (*callback)(unsigned long))
+ {
+-      ptimer->function = (void (*)(unsigned long)) callback;
++      ptimer->function = callback;
+       ptimer->data = (unsigned long) data;
+       
+       /* Set new value for timer (update or add timer).
+diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
+index 714cc9a..ea05f3e 100644
+--- a/include/net/iucv/af_iucv.h
++++ b/include/net/iucv/af_iucv.h
+@@ -149,7 +149,7 @@ struct iucv_skb_cb {
+ struct iucv_sock_list {
+       struct hlist_head head;
+       rwlock_t          lock;
+-      atomic_t          autobind_name;
++      atomic_unchecked_t autobind_name;
+ };
+ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
+index f3be818..bf46196 100644
+--- a/include/net/llc_c_ac.h
++++ b/include/net/llc_c_ac.h
+@@ -87,7 +87,7 @@
+ #define LLC_CONN_AC_STOP_SENDACK_TMR                  70
+ #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING  71
+-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
+ int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+ int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
+index 3948cf1..83b28c4 100644
+--- a/include/net/llc_c_ev.h
++++ b/include/net/llc_c_ev.h
+@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
+       return (struct llc_conn_state_ev *)skb->cb;
+ }
+-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
+-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
+ int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+ int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
+index 48f3f89..0e92c50 100644
+--- a/include/net/llc_c_st.h
++++ b/include/net/llc_c_st.h
+@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
+       u8                 next_state;
+       const llc_conn_ev_qfyr_t *ev_qualifiers;
+       const llc_conn_action_t  *ev_actions;
+-};
++} __do_const;
+ struct llc_conn_state {
+       u8                          current_state;
+diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
+index a61b98c..aade1eb 100644
+--- a/include/net/llc_s_ac.h
++++ b/include/net/llc_s_ac.h
+@@ -23,7 +23,7 @@
+ #define SAP_ACT_TEST_IND      9
+ /* All action functions must look like this */
+-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
++typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
+ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
+ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
+index c4359e2..76dbc4a 100644
+--- a/include/net/llc_s_st.h
++++ b/include/net/llc_s_st.h
+@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
+       llc_sap_ev_t      ev;
+       u8                next_state;
+       const llc_sap_action_t *ev_actions;
+-};
++} __do_const;
+ struct llc_sap_state {
+       u8                         curr_state;
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index cca510a..04adc84 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1567,7 +1567,7 @@ enum ieee80211_key_flags {
+  * @iv_len: The IV length for this key type
+  */
+ struct ieee80211_key_conf {
+-      atomic64_t tx_pn;
++      atomic64_unchecked_t tx_pn;
+       u32 cipher;
+       u8 icv_len;
+       u8 iv_len;
+@@ -5358,7 +5358,7 @@ struct ieee80211_tx_rate_control {
+       struct sk_buff *skb;
+       struct ieee80211_tx_rate reported_rate;
+       bool rts, short_preamble;
+-      u8 max_rate_idx;
++      s8 max_rate_idx;
+       u32 rate_idx_mask;
+       u8 *rate_idx_mcs_mask;
+       bool bss;
+@@ -5395,7 +5395,7 @@ struct rate_control_ops {
+       void (*remove_sta_debugfs)(void *priv, void *priv_sta);
+       u32 (*get_expected_throughput)(void *priv_sta);
+-};
++} __do_const;
+ static inline int rate_supported(struct ieee80211_sta *sta,
+                                enum nl80211_band band,
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 8b68384..48fe40e 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -142,7 +142,7 @@ struct neighbour {
+       unsigned int            arp_queue_len_bytes;
+       struct timer_list       timer;
+       unsigned long           used;
+-      atomic_t                probes;
++      atomic_unchecked_t      probes;
+       __u8                    flags;
+       __u8                    nud_state;
+       __u8                    type;
+@@ -163,7 +163,7 @@ struct neigh_ops {
+       void                    (*error_report)(struct neighbour *, struct sk_buff *);
+       int                     (*output)(struct neighbour *, struct sk_buff *);
+       int                     (*connected_output)(struct neighbour *, struct sk_buff *);
+-};
++} __do_const;
+ struct pneigh_entry {
+       struct pneigh_entry     *next;
+@@ -217,7 +217,7 @@ struct neigh_table {
+       struct neigh_statistics __percpu *stats;
+       struct neigh_hash_table __rcu *nht;
+       struct pneigh_entry     **phash_buckets;
+-};
++} __randomize_layout;
+ enum {
+       NEIGH_ARP_TABLE = 0,
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 0933c74..11d1250 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -53,7 +53,7 @@ struct net {
+                                                */
+       spinlock_t              rules_mod_lock;
+-      atomic64_t              cookie_gen;
++      atomic64_unchecked_t    cookie_gen;
+       struct list_head        list;           /* list of network namespaces */
+       struct list_head        cleanup_list;   /* namespaces on death row */
+@@ -141,8 +141,8 @@ struct net {
+       struct netns_mpls       mpls;
+ #endif
+       struct sock             *diag_nlsk;
+-      atomic_t                fnhe_genid;
+-};
++      atomic_unchecked_t      fnhe_genid;
++} __randomize_layout;
+ #include <linux/seq_file_net.h>
+@@ -277,7 +277,11 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
+ #define __net_init    __init
+ #define __net_exit    __ref
+ #define __net_initdata        __initdata
++#ifdef CONSTIFY_PLUGIN
+ #define __net_initconst       __initconst
++#else
++#define __net_initconst       __initdata
++#endif
+ #endif
+ int peernet2id_alloc(struct net *net, struct net *peer);
+@@ -292,7 +296,7 @@ struct pernet_operations {
+       void (*exit_batch)(struct list_head *net_exit_list);
+       int *id;
+       size_t size;
+-};
++} __do_const;
+ /*
+  * Use these carefully.  If you implement a network device and it
+@@ -340,12 +344,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
+ static inline int rt_genid_ipv4(struct net *net)
+ {
+-      return atomic_read(&net->ipv4.rt_genid);
++      return atomic_read_unchecked(&net->ipv4.rt_genid);
+ }
+ static inline void rt_genid_bump_ipv4(struct net *net)
+ {
+-      atomic_inc(&net->ipv4.rt_genid);
++      atomic_inc_unchecked(&net->ipv4.rt_genid);
+ }
+ extern void (*__fib6_flush_trees)(struct net *net);
+@@ -372,12 +376,12 @@ static inline void rt_genid_bump_all(struct net *net)
+ static inline int fnhe_genid(struct net *net)
+ {
+-      return atomic_read(&net->fnhe_genid);
++      return atomic_read_unchecked(&net->fnhe_genid);
+ }
+ static inline void fnhe_genid_bump(struct net *net)
+ {
+-      atomic_inc(&net->fnhe_genid);
++      atomic_inc_unchecked(&net->fnhe_genid);
+ }
+ #endif /* __NET_NET_NAMESPACE_H */
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index 445b019..b776cb2 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -301,7 +301,7 @@ static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+ struct kernel_param;
+-int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
++int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
+ int nf_conntrack_hash_resize(unsigned int hashsize);
+ extern unsigned int nf_conntrack_htable_size;
+ extern unsigned int nf_conntrack_max;
+diff --git a/include/net/netlabel.h b/include/net/netlabel.h
+index efe9806..bec155a 100644
+--- a/include/net/netlabel.h
++++ b/include/net/netlabel.h
+@@ -669,6 +669,7 @@ static inline int netlbl_skbuff_getattr(const struct sk_buff *skb,
+       return -ENOSYS;
+ }
+ static inline void netlbl_skbuff_err(struct sk_buff *skb,
++                                   u16 family,
+                                    int error,
+                                    int gateway)
+ {
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index 254a0fc..040f766 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -532,7 +532,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
+ {
+       if (mark) {
+               WARN_ON((unsigned char *) mark < skb->data);
+-              skb_trim(skb, (unsigned char *) mark - skb->data);
++              skb_trim(skb, (const unsigned char *) mark - skb->data);
+       }
+ }
+diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
+index 38b1a80..c2d91f1 100644
+--- a/include/net/netns/conntrack.h
++++ b/include/net/netns/conntrack.h
+@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
+ struct nf_proto_net {
+ #ifdef CONFIG_SYSCTL
+       struct ctl_table_header *ctl_table_header;
+-      struct ctl_table        *ctl_table;
++      ctl_table_no_const      *ctl_table;
+ #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+       struct ctl_table_header *ctl_compat_header;
+-      struct ctl_table        *ctl_compat_table;
++      ctl_table_no_const      *ctl_compat_table;
+ #endif
+ #endif
+       unsigned int            users;
+@@ -60,7 +60,7 @@ struct nf_ip_net {
+       struct nf_icmp_net      icmpv6;
+ #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+       struct ctl_table_header *ctl_table_header;
+-      struct ctl_table        *ctl_table;
++      ctl_table_no_const      *ctl_table;
+ #endif
+ };
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index d061ffe..cc6cdb96 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -119,7 +119,7 @@ struct netns_ipv4 {
+       struct ping_group_range ping_group_range;
+-      atomic_t dev_addr_genid;
++      atomic_unchecked_t dev_addr_genid;
+ #ifdef CONFIG_SYSCTL
+       unsigned long *sysctl_local_reserved_ports;
+@@ -136,6 +136,6 @@ struct netns_ipv4 {
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+       int sysctl_fib_multipath_use_neigh;
+ #endif
+-      atomic_t        rt_genid;
++      atomic_unchecked_t      rt_genid;
+ };
+ #endif
+diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
+index 10d0848..68bc2da 100644
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -83,8 +83,8 @@ struct netns_ipv6 {
+       struct fib_rules_ops    *mr6_rules_ops;
+ #endif
+ #endif
+-      atomic_t                dev_addr_genid;
+-      atomic_t                fib6_sernum;
++      atomic_unchecked_t      dev_addr_genid;
++      atomic_unchecked_t      fib6_sernum;
+ };
+ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
+index 24cd394..8310b26 100644
+--- a/include/net/netns/xfrm.h
++++ b/include/net/netns/xfrm.h
+@@ -78,7 +78,7 @@ struct netns_xfrm {
+       /* flow cache part */
+       struct flow_cache       flow_cache_global;
+-      atomic_t                flow_cache_genid;
++      atomic_unchecked_t      flow_cache_genid;
+       struct list_head        flow_cache_gc_list;
+       atomic_t                flow_cache_gc_count;
+       spinlock_t              flow_cache_gc_lock;
+diff --git a/include/net/ping.h b/include/net/ping.h
+index 4cd90d6..4947311 100644
+--- a/include/net/ping.h
++++ b/include/net/ping.h
+@@ -54,7 +54,7 @@ struct ping_iter_state {
+ extern struct proto ping_prot;
+ #if IS_ENABLED(CONFIG_IPV6)
+-extern struct pingv6_ops pingv6_ops;
++extern struct pingv6_ops *pingv6_ops;
+ #endif
+ struct pingfakehdr {
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index bf36ca3..c29da79 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -49,7 +49,7 @@ struct net_protocol {
+                                * socket lookup?
+                                */
+                               icmp_strict_tag_validation:1;
+-};
++} __do_const;
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol {
+@@ -62,7 +62,7 @@ struct inet6_protocol {
+                              u8 type, u8 code, int offset,
+                              __be32 info);
+       unsigned int    flags;  /* INET6_PROTO_xxx */
+-};
++} __do_const;
+ #define INET6_PROTO_NOPOLICY  0x1
+ #define INET6_PROTO_FINAL     0x2
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 4113916..afa5d60 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -103,7 +103,7 @@ struct rtnl_link_ops {
+       int                     (*fill_linkxstats)(struct sk_buff *skb,
+                                                  const struct net_device *dev,
+                                                  int *prividx, int attr);
+-};
++} __do_const;
+ int __rtnl_link_register(struct rtnl_link_ops *ops);
+ void __rtnl_link_unregister(struct rtnl_link_ops *ops);
+diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
+index 4a5b9a3..ca27d73 100644
+--- a/include/net/sctp/checksum.h
++++ b/include/net/sctp/checksum.h
+@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+                                       unsigned int offset)
+ {
+       struct sctphdr *sh = sctp_hdr(skb);
+-        __le32 ret, old = sh->checksum;
+-      const struct skb_checksum_ops ops = {
++      __le32 ret, old = sh->checksum;
++      static const struct skb_checksum_ops ops = {
+               .update  = sctp_csum_update,
+               .combine = sctp_csum_combine,
+       };
+diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
+index bafe2a0..f27e53c 100644
+--- a/include/net/sctp/sm.h
++++ b/include/net/sctp/sm.h
+@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
+ typedef struct {
+       sctp_state_fn_t *fn;
+       const char *name;
+-} sctp_sm_table_entry_t;
++} __do_const sctp_sm_table_entry_t;
+ /* A naming convention of "sctp_sf_xxx" applies to all the state functions
+  * currently in use.
+@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
+ __u32 sctp_generate_tsn(const struct sctp_endpoint *);
+ /* Extern declarations for major data structures.  */
+-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
++extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+ /* Get the size of a DATA chunk payload. */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index ced0df3..5d02406 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -514,7 +514,7 @@ struct sctp_pf {
+       void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
+       void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
+       struct sctp_af *af;
+-};
++} __do_const;
+ /* Structure to track chunk fragments that have been acked, but peer
+diff --git a/include/net/snmp.h b/include/net/snmp.h
+index c9228ad..5543dfb 100644
+--- a/include/net/snmp.h
++++ b/include/net/snmp.h
+@@ -67,7 +67,7 @@ struct icmp_mib {
+ #define ICMPMSG_MIB_MAX       __ICMPMSG_MIB_MAX
+ struct icmpmsg_mib {
+-      atomic_long_t   mibs[ICMPMSG_MIB_MAX];
++      atomic_long_unchecked_t mibs[ICMPMSG_MIB_MAX];
+ };
+ /* ICMP6 (IPv6-ICMP) */
+@@ -78,17 +78,17 @@ struct icmpv6_mib {
+ };
+ /* per device counters, (shared on all cpus) */
+ struct icmpv6_mib_device {
+-      atomic_long_t   mibs[ICMP6_MIB_MAX];
++      atomic_long_unchecked_t mibs[ICMP6_MIB_MAX];
+ };
+ #define ICMP6MSG_MIB_MAX  __ICMP6MSG_MIB_MAX
+ /* per network ns counters */
+ struct icmpv6msg_mib {
+-      atomic_long_t   mibs[ICMP6MSG_MIB_MAX];
++      atomic_long_unchecked_t mibs[ICMP6MSG_MIB_MAX];
+ };
+ /* per device counters, (shared on all cpus) */
+ struct icmpv6msg_mib_device {
+-      atomic_long_t   mibs[ICMP6MSG_MIB_MAX];
++      atomic_long_unchecked_t mibs[ICMP6MSG_MIB_MAX];
+ };
+@@ -127,7 +127,7 @@ struct linux_xfrm_mib {
+                       __this_cpu_inc(mib->mibs[field])
+ #define SNMP_INC_STATS_ATOMIC_LONG(mib, field)        \
+-                      atomic_long_inc(&mib->mibs[field])
++                      atomic_long_inc_unchecked(&mib->mibs[field])
+ #define SNMP_INC_STATS(mib, field)    \
+                       this_cpu_inc(mib->mibs[field])
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 8741988..ed2e15c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -188,7 +188,7 @@ struct sock_common {
+       struct in6_addr         skc_v6_rcv_saddr;
+ #endif
+-      atomic64_t              skc_cookie;
++      atomic64_unchecked_t    skc_cookie;
+       /* following fields are padding to force
+        * offset(struct sock, sk_refcnt) == 128 on 64bit arches
+@@ -364,7 +364,7 @@ struct sock {
+       unsigned int            sk_napi_id;
+       unsigned int            sk_ll_usec;
+ #endif
+-      atomic_t                sk_drops;
++      atomic_unchecked_t      sk_drops;
+       int                     sk_rcvbuf;
+       struct sk_filter __rcu  *sk_filter;
+@@ -1069,7 +1069,7 @@ struct proto {
+       atomic_t                socks;
+ #endif
+       int                     (*diag_destroy)(struct sock *sk, int err);
+-};
++} __randomize_layout;
+ int proto_register(struct proto *prot, int alloc_slab);
+ void proto_unregister(struct proto *prot);
+@@ -1156,7 +1156,7 @@ static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+       return sk->sk_prot->sysctl_mem[index];
+ }
+-static inline long
++static inline long __intentional_overflow(-1)
+ sk_memory_allocated(const struct sock *sk)
+ {
+       return atomic_long_read(sk->sk_prot->memory_allocated);
+@@ -1769,7 +1769,7 @@ static inline bool sk_check_csum_caps(struct sock *sk)
+ }
+ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
+-                                         struct iov_iter *from, char *to,
++                                         struct iov_iter *from, unsigned char *to,
+                                          int copy, int offset)
+ {
+       if (skb->ip_summed == CHECKSUM_NONE) {
+@@ -2023,7 +2023,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+       }
+ }
+-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
++struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+                                   bool force_schedule);
+ /**
+@@ -2099,14 +2099,14 @@ struct sock_skb_cb {
+ static inline void
+ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
+ {
+-      SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
++      SOCK_SKB_CB(skb)->dropcount = atomic_read_unchecked(&sk->sk_drops);
+ }
+ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+ {
+       int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+-      atomic_add(segs, &sk->sk_drops);
++      atomic_add_unchecked(segs, &sk->sk_drops);
+ }
+ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 7717302..a633d63 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -543,7 +543,7 @@ void tcp_retransmit_timer(struct sock *sk);
+ void tcp_xmit_retransmit_queue(struct sock *);
+ void tcp_simple_retransmit(struct sock *);
+ int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
++int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t) __intentional_overflow(3);
+ void tcp_send_probe0(struct sock *);
+ void tcp_send_partial(struct sock *);
+@@ -732,8 +732,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
+  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
+  */
+ struct tcp_skb_cb {
+-      __u32           seq;            /* Starting sequence number     */
+-      __u32           end_seq;        /* SEQ + FIN + SYN + datalen    */
++      __u32           seq __intentional_overflow(-1); /* Starting sequence number     */
++      __u32           end_seq __intentional_overflow(-1);     /* SEQ + FIN + SYN + datalen    */
+       union {
+               /* Note : tcp_tw_isn is used in input path only
+                *        (isn chosen by tcp_timewait_state_process())
+@@ -763,7 +763,7 @@ struct tcp_skb_cb {
+       __u8            txstamp_ack:1,  /* Record TX timestamp for ack? */
+                       eor:1,          /* Is skb MSG_EOR marked? */
+                       unused:6;
+-      __u32           ack_seq;        /* Sequence number ACK'd        */
++      __u32           ack_seq __intentional_overflow(-1);     /* Sequence number ACK'd        */
+       union {
+               struct {
+                       /* There is space for up to 20 bytes */
+@@ -1872,7 +1872,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
+  */
+ static inline void tcp_listendrop(const struct sock *sk)
+ {
+-      atomic_inc(&((struct sock *)sk)->sk_drops);
++      atomic_inc_unchecked(&((struct sock *)sk)->sk_drops);
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ }
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 1793431..2feaff28 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -280,7 +280,6 @@ struct xfrm_dst;
+ struct xfrm_policy_afinfo {
+       unsigned short          family;
+       struct dst_ops          *dst_ops;
+-      void                    (*garbage_collect)(struct net *net);
+       struct dst_entry        *(*dst_lookup)(struct net *net,
+                                              int tos, int oif,
+                                              const xfrm_address_t *saddr,
+@@ -299,7 +298,7 @@ struct xfrm_policy_afinfo {
+                                           struct net_device *dev,
+                                           const struct flowi *fl);
+       struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
+-};
++} __do_const;
+ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+@@ -338,7 +337,7 @@ struct xfrm_state_afinfo {
+       int                     (*transport_finish)(struct sk_buff *skb,
+                                                   int async);
+       void                    (*local_error)(struct sk_buff *skb, u32 mtu);
+-};
++} __do_const;
+ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+@@ -433,7 +432,7 @@ struct xfrm_mode {
+       struct module *owner;
+       unsigned int encap;
+       int flags;
+-};
++} __do_const;
+ /* Flags for xfrm_mode. */
+ enum {
+@@ -528,7 +527,7 @@ struct xfrm_policy {
+       struct timer_list       timer;
+       struct flow_cache_object flo;
+-      atomic_t                genid;
++      atomic_unchecked_t      genid;
+       u32                     priority;
+       u32                     index;
+       struct xfrm_mark        mark;
+@@ -599,7 +598,7 @@ struct xfrm_mgr {
+                                          int num_bundles,
+                                          const struct xfrm_kmaddress *k);
+       bool                    (*is_alive)(const struct km_event *c);
+-};
++} __do_const;
+ int xfrm_register_km(struct xfrm_mgr *km);
+ int xfrm_unregister_km(struct xfrm_mgr *km);
+@@ -1168,6 +1167,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
+ }
+ void xfrm_garbage_collect(struct net *net);
++void xfrm_garbage_collect_deferred(struct net *net);
+ #else
+@@ -1206,6 +1206,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
+ static inline void xfrm_garbage_collect(struct net *net)
+ {
+ }
++static inline void xfrm_garbage_collect_deferred(struct net *net)
++{
++}
+ #endif
+ static __inline__
+diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
+index 92a7d85..1779570 100644
+--- a/include/rdma/ib_cm.h
++++ b/include/rdma/ib_cm.h
+@@ -486,8 +486,8 @@ int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event);
+  * @private_data_len: Size of the private data buffer, in bytes.
+  */
+ int ib_send_cm_rej(struct ib_cm_id *cm_id,
+-                 enum ib_cm_rej_reason reason,
+-                 void *ari,
++                 int reason,
++                 const void *ari,
+                  u8 ari_length,
+                  const void *private_data,
+                  u8 private_data_len);
+@@ -558,8 +558,8 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
+  * @private_data_len: Size of the private data buffer, in bytes.
+  */
+ int ib_send_cm_apr(struct ib_cm_id *cm_id,
+-                 enum ib_cm_apr_status status,
+-                 void *info,
++                 int status,
++                 const void *info,
+                  u8 info_length,
+                  const void *private_data,
+                  u8 private_data_len);
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index e1f9673..138c39f 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -1190,7 +1190,7 @@ struct ib_sge {
+ struct ib_cqe {
+       void (*done)(struct ib_cq *cq, struct ib_wc *wc);
+-};
++} __no_const;
+ struct ib_send_wr {
+       struct ib_send_wr      *next;
+diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
+index 7428a53..9d6aaef 100644
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -771,6 +771,7 @@ struct libfc_function_template {
+        */
+       void (*disc_stop_final) (struct fc_lport *);
+ };
++typedef struct libfc_function_template __no_const libfc_function_template_no_const;
+ /**
+  * struct fc_disc - Discovery context
+@@ -875,7 +876,7 @@ struct fc_lport {
+       struct fc_vport                *vport;
+       /* Operational Information */
+-      struct libfc_function_template tt;
++      libfc_function_template_no_const tt;
+       u8                             link_up;
+       u8                             qfull;
+       u16                            vlan;
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 8a95631..bd0f3e5 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -193,9 +193,9 @@ struct scsi_device {
+       unsigned int max_device_blocked; /* what device_blocked counts down from  */
+ #define SCSI_DEFAULT_DEVICE_BLOCKED   3
+-      atomic_t iorequest_cnt;
+-      atomic_t iodone_cnt;
+-      atomic_t ioerr_cnt;
++      atomic_unchecked_t iorequest_cnt;
++      atomic_unchecked_t iodone_cnt;
++      atomic_unchecked_t ioerr_cnt;
+       struct device           sdev_gendev,
+                               sdev_dev;
+diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
+index 891a658..fcd68df 100644
+--- a/include/scsi/scsi_driver.h
++++ b/include/scsi/scsi_driver.h
+@@ -14,7 +14,7 @@ struct scsi_driver {
+       void (*rescan)(struct device *);
+       int (*init_command)(struct scsi_cmnd *);
+       void (*uninit_command)(struct scsi_cmnd *);
+-      int (*done)(struct scsi_cmnd *);
++      unsigned int (*done)(struct scsi_cmnd *);
+       int (*eh_action)(struct scsi_cmnd *, int);
+ };
+ #define to_scsi_driver(drv) \
+diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
+index bf66ea6..1c719d83 100644
+--- a/include/scsi/scsi_transport_fc.h
++++ b/include/scsi/scsi_transport_fc.h
+@@ -758,7 +758,8 @@ struct fc_function_template {
+       unsigned long   show_host_system_hostname:1;
+       unsigned long   disable_target_scan:1;
+-};
++} __do_const;
++typedef struct fc_function_template __no_const fc_function_template_no_const;
+ /**
+diff --git a/include/scsi/sg.h b/include/scsi/sg.h
+index 3afec70..b196b43 100644
+--- a/include/scsi/sg.h
++++ b/include/scsi/sg.h
+@@ -52,7 +52,7 @@ typedef struct sg_io_hdr
+                                             or scatter gather list */
+     unsigned char __user *cmdp; /* [i], [*i] points to command to perform */
+     void __user *sbp;         /* [i], [*o] points to sense_buffer memory */
+-    unsigned int timeout;       /* [i] MAX_UINT->no timeout (unit: millisec) */
++    unsigned int timeout __intentional_overflow(-1);       /* [i] MAX_UINT->no timeout (unit: millisec) */
+     unsigned int flags;         /* [i] 0 -> default, see SG_FLAG... */
+     int pack_id;                /* [i->o] unused internally (normally) */
+     void __user * usr_ptr;      /* [i->o] unused internally */
+diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
+index cee8c00..0ee1834 100644
+--- a/include/sound/compress_driver.h
++++ b/include/sound/compress_driver.h
+@@ -132,7 +132,7 @@ struct snd_compr_ops {
+                       struct snd_compr_caps *caps);
+       int (*get_codec_caps) (struct snd_compr_stream *stream,
+                       struct snd_compr_codec_caps *codec);
+-};
++} __no_const;
+ /**
+  * struct snd_compr: Compressed device
+diff --git a/include/sound/control.h b/include/sound/control.h
+index 21d047f..9573462 100644
+--- a/include/sound/control.h
++++ b/include/sound/control.h
+@@ -214,8 +214,10 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
+  * Return: Zero if successful or a negative error code.
+  */
+ static inline int
+-snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
++snd_ctl_add_slave(void *_master, struct snd_kcontrol *slave)
+ {
++      struct snd_kcontrol *master = _master;
++
+       return _snd_ctl_add_slave(master, slave, 0);
+ }
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index af1fb37..0432863 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -1075,7 +1075,7 @@ int snd_pcm_update_state(struct snd_pcm_substream *substream,
+                        struct snd_pcm_runtime *runtime);
+ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream);
+ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr);
+-void snd_pcm_period_elapsed(struct snd_pcm_substream *substream);
++void snd_pcm_period_elapsed(void *_substream);
+ snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream,
+                                   const void __user *buf,
+                                   snd_pcm_uframes_t frames);
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index f730b91..0079544 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -159,8 +159,7 @@ void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream,
+ /* callbacks */
+-int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
+-                      const unsigned char *buffer, int count);
++int snd_rawmidi_receive(void *_substream, const void *_buffer, int count);
+ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream);
+ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+                             unsigned char *buffer, int count);
+diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
+index feb58d4..9ce81c1 100644
+--- a/include/sound/seq_kernel.h
++++ b/include/sound/seq_kernel.h
+@@ -80,7 +80,7 @@ int snd_seq_kernel_client_ctl(int client, unsigned int cmd, void *arg);
+ #define SNDRV_SEQ_EXT_USRPTR  0x80000000
+ #define SNDRV_SEQ_EXT_CHAINED 0x40000000
+-typedef int (*snd_seq_dump_func_t)(void *ptr, void *buf, int count);
++typedef int (*snd_seq_dump_func_t)(void *ptr, const void *buf, int count);
+ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
+                            int in_kernel, int size_aligned);
+ int snd_seq_dump_var_event(const struct snd_seq_event *event,
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 6144882..abe63c1 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -931,7 +931,7 @@ struct snd_soc_codec_driver {
+                            enum snd_soc_dapm_type, int);
+       bool ignore_pmdown_time;  /* Doesn't benefit from pmdown delay */
+-};
++} __do_const;
+ /* SoC platform interface */
+ struct snd_soc_platform_driver {
+@@ -958,7 +958,7 @@ struct snd_soc_platform_driver {
+       const struct snd_compr_ops *compr_ops;
+       int (*bespoke_trigger)(struct snd_pcm_substream *, int);
+-};
++} __do_const;
+ struct snd_soc_dai_link_component {
+       const char *name;
+diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
+new file mode 100644
+index 0000000..fb634b7
+--- /dev/null
++++ b/include/trace/events/fs.h
+@@ -0,0 +1,53 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM fs
++
++#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_FS_H
++
++#include <linux/fs.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(do_sys_open,
++
++      TP_PROTO(const char *filename, int flags, int mode),
++
++      TP_ARGS(filename, flags, mode),
++
++      TP_STRUCT__entry(
++              __string(       filename, filename              )
++              __field(        int, flags                      )
++              __field(        int, mode                       )
++      ),
++
++      TP_fast_assign(
++              __assign_str(filename, filename);
++              __entry->flags = flags;
++              __entry->mode = mode;
++      ),
++
++      TP_printk("\"%s\" %x %o",
++                __get_str(filename), __entry->flags, __entry->mode)
++);
++
++TRACE_EVENT(open_exec,
++
++      TP_PROTO(const char *filename),
++
++      TP_ARGS(filename),
++
++      TP_STRUCT__entry(
++              __string(       filename, filename              )
++      ),
++
++      TP_fast_assign(
++              __assign_str(filename, filename);
++      ),
++
++      TP_printk("\"%s\"",
++                __get_str(filename))
++);
++
++#endif /* _TRACE_FS_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
+index f95f25e..87ed448 100644
+--- a/include/trace/events/irq.h
++++ b/include/trace/events/irq.h
+@@ -51,7 +51,7 @@ SOFTIRQ_NAME_LIST
+  */
+ TRACE_EVENT(irq_handler_entry,
+-      TP_PROTO(int irq, struct irqaction *action),
++      TP_PROTO(int irq, const struct irqaction *action),
+       TP_ARGS(irq, action),
+@@ -81,7 +81,7 @@ TRACE_EVENT(irq_handler_entry,
+  */
+ TRACE_EVENT(irq_handler_exit,
+-      TP_PROTO(int irq, struct irqaction *action, int ret),
++      TP_PROTO(int irq, const struct irqaction *action, int ret),
+       TP_ARGS(irq, action, ret),
+diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
+index 5a81ab4..7b68dc7 100644
+--- a/include/trace/events/mmflags.h
++++ b/include/trace/events/mmflags.h
+@@ -135,6 +135,12 @@ IF_HAVE_PG_IDLE(PG_idle,          "idle"          )
+ #define IF_HAVE_VM_SOFTDIRTY(flag,name)
+ #endif
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++#define IF_HAVE_VM_PAGEEXEC(flag,name) {flag, name },
++#else
++#define IF_HAVE_VM_PAGEEXEC(flag,name)
++#endif
++
+ #define __def_vmaflag_names                                           \
+       {VM_READ,                       "read"          },              \
+       {VM_WRITE,                      "write"         },              \
+@@ -159,6 +165,7 @@ IF_HAVE_PG_IDLE(PG_idle,           "idle"          )
+       {VM_ACCOUNT,                    "account"       },              \
+       {VM_NORESERVE,                  "noreserve"     },              \
+       {VM_HUGETLB,                    "hugetlb"       },              \
++IF_HAVE_VM_PAGEEXEC(VM_PAGEEXEC,      "pageexec"      )               \
+       __VM_ARCH_SPECIFIC_1                            ,               \
+       __VM_ARCH_SPECIFIC_2                            ,               \
+       {VM_DONTDUMP,                   "dontdump"      },              \
+diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
+index 7caf44c..23c6f27 100644
+--- a/include/uapi/linux/a.out.h
++++ b/include/uapi/linux/a.out.h
+@@ -39,6 +39,14 @@ enum machine_type {
+   M_MIPS2 = 152               /* MIPS R6000/R4000 binary */
+ };
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC        1       /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP        2       /* Emulate trampolines */
++#define F_PAX_MPROTECT        4       /* Restrict mprotect() */
++#define F_PAX_RANDMMAP        8       /* Randomize mmap() base */
++/*#define F_PAX_RANDEXEC      16*/    /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC        32      /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
+index 22b6ad3..aeba37e 100644
+--- a/include/uapi/linux/bcache.h
++++ b/include/uapi/linux/bcache.h
+@@ -5,6 +5,7 @@
+  * Bcache on disk data structures
+  */
++#include <linux/compiler.h>
+ #include <asm/types.h>
+ #define BITMASK(name, type, field, offset, size)              \
+@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v)                      \
+ /* Btree keys - all units are in sectors */
+ struct bkey {
+-      __u64   high;
+-      __u64   low;
++      __u64   high __intentional_overflow(-1);
++      __u64   low __intentional_overflow(-1);
+       __u64   ptr[];
+ };
+diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
+index 4b93f2b..ffa1302 100644
+--- a/include/uapi/linux/byteorder/little_endian.h
++++ b/include/uapi/linux/byteorder/little_endian.h
+@@ -42,51 +42,51 @@
+ static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
+ {
+-      return (__force __le64)*p;
++      return (__force const __le64)*p;
+ }
+-static __always_inline __u64 __le64_to_cpup(const __le64 *p)
++static __always_inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
+ {
+-      return (__force __u64)*p;
++      return (__force const __u64)*p;
+ }
+ static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
+ {
+-      return (__force __le32)*p;
++      return (__force const __le32)*p;
+ }
+ static __always_inline __u32 __le32_to_cpup(const __le32 *p)
+ {
+-      return (__force __u32)*p;
++      return (__force const __u32)*p;
+ }
+ static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
+ {
+-      return (__force __le16)*p;
++      return (__force const __le16)*p;
+ }
+ static __always_inline __u16 __le16_to_cpup(const __le16 *p)
+ {
+-      return (__force __u16)*p;
++      return (__force const __u16)*p;
+ }
+ static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
+ {
+-      return (__force __be64)__swab64p(p);
++      return (__force const __be64)__swab64p(p);
+ }
+ static __always_inline __u64 __be64_to_cpup(const __be64 *p)
+ {
+-      return __swab64p((__u64 *)p);
++      return __swab64p((const __u64 *)p);
+ }
+ static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
+ {
+-      return (__force __be32)__swab32p(p);
++      return (__force const __be32)__swab32p(p);
+ }
+-static __always_inline __u32 __be32_to_cpup(const __be32 *p)
++static __always_inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
+ {
+-      return __swab32p((__u32 *)p);
++      return __swab32p((const __u32 *)p);
+ }
+ static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
+ {
+-      return (__force __be16)__swab16p(p);
++      return (__force const __be16)__swab16p(p);
+ }
+ static __always_inline __u16 __be16_to_cpup(const __be16 *p)
+ {
+-      return __swab16p((__u16 *)p);
++      return __swab16p((const __u16 *)p);
+ }
+ #define __cpu_to_le64s(x) do { (void)(x); } while (0)
+ #define __le64_to_cpus(x) do { (void)(x); } while (0)
+diff --git a/include/uapi/linux/connector.h b/include/uapi/linux/connector.h
+index 4cb2835..cfbc4e2 100644
+--- a/include/uapi/linux/connector.h
++++ b/include/uapi/linux/connector.h
+@@ -69,7 +69,7 @@ struct cb_id {
+ struct cn_msg {
+       struct cb_id id;
+-      __u32 seq;
++      __u32 seq __intentional_overflow(-1);
+       __u32 ack;
+       __u16 len;              /* Length of the following data */
+diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
+index b59ee07..acfaf4ca 100644
+--- a/include/uapi/linux/elf.h
++++ b/include/uapi/linux/elf.h
+@@ -37,6 +37,17 @@ typedef __s64       Elf64_Sxword;
+ #define PT_GNU_EH_FRAME               0x6474e550
+ #define PT_GNU_STACK  (PT_LOOS + 0x474e551)
++#define PT_GNU_RELRO  (PT_LOOS + 0x474e552)
++
++#define PT_PAX_FLAGS  (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC               1       /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP               2       /* Emulate trampolines */
++#define EF_PAX_MPROTECT               4       /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP               8       /* Randomize mmap() base */
++/*#define EF_PAX_RANDEXEC             16*/    /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC               32      /* Segmentation based non-executable pages */
+ /*
+  * Extended Numbering
+@@ -94,6 +105,8 @@ typedef __s64       Elf64_Sxword;
+ #define DT_DEBUG      21
+ #define DT_TEXTREL    22
+ #define DT_JMPREL     23
++#define DT_FLAGS      30
++  #define DF_TEXTREL  0x00000004
+ #define DT_ENCODING   32
+ #define OLD_DT_LOOS   0x60000000
+ #define DT_LOOS               0x6000000d
+@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
+ #define PF_W          0x2
+ #define PF_X          0x1
++#define PF_PAGEEXEC   (1U << 4)       /* Enable  PAGEEXEC */
++#define PF_NOPAGEEXEC (1U << 5)       /* Disable PAGEEXEC */
++#define PF_SEGMEXEC   (1U << 6)       /* Enable  SEGMEXEC */
++#define PF_NOSEGMEXEC (1U << 7)       /* Disable SEGMEXEC */
++#define PF_MPROTECT   (1U << 8)       /* Enable  MPROTECT */
++#define PF_NOMPROTECT (1U << 9)       /* Disable MPROTECT */
++/*#define PF_RANDEXEC (1U << 10)*/    /* Enable  RANDEXEC */
++/*#define PF_NORANDEXEC       (1U << 11)*/    /* Disable RANDEXEC */
++#define PF_EMUTRAMP   (1U << 12)      /* Enable  EMUTRAMP */
++#define PF_NOEMUTRAMP (1U << 13)      /* Disable EMUTRAMP */
++#define PF_RANDMMAP   (1U << 14)      /* Enable  RANDMMAP */
++#define PF_NORANDMMAP (1U << 15)      /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+   Elf32_Word  p_type;
+   Elf32_Off   p_offset;
+@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
+ #define       EI_OSABI        7
+ #define       EI_PAD          8
++#define       EI_PAX          14
++
+ #define       ELFMAG0         0x7f            /* EI_MAG */
+ #define       ELFMAG1         'E'
+ #define       ELFMAG2         'L'
+diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
+index aa169c4..6a2771d 100644
+--- a/include/uapi/linux/personality.h
++++ b/include/uapi/linux/personality.h
+@@ -30,6 +30,7 @@ enum {
+ #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC  | \
+                           ADDR_NO_RANDOMIZE  | \
+                           ADDR_COMPAT_LAYOUT | \
++                          ADDR_LIMIT_3GB     | \
+                           MMAP_PAGE_ZERO)
+ /*
+diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
+index 8b8d39d..1ca6c07 100644
+--- a/include/uapi/linux/screen_info.h
++++ b/include/uapi/linux/screen_info.h
+@@ -44,7 +44,7 @@ struct screen_info {
+       __u16 vesa_attributes;  /* 0x34 */
+       __u32 capabilities;     /* 0x36 */
+       __u32 ext_lfb_base;     /* 0x3a */
+-      __u8  _reserved[2];     /* 0x3e */
++      __u16 vesapm_size;      /* 0x3e */
+ } __attribute__((packed));
+ #define VIDEO_TYPE_MDA                0x10    /* Monochrome Text Display      */
+diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
+index 8f3a8f6..736a542 100644
+--- a/include/uapi/linux/swab.h
++++ b/include/uapi/linux/swab.h
+@@ -43,7 +43,7 @@
+  * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
+  */
+-static inline __attribute_const__ __u16 __fswab16(__u16 val)
++static inline __intentional_overflow(0) __attribute_const__ __u16 __fswab16(__u16 val)
+ {
+ #if defined (__arch_swab16)
+       return __arch_swab16(val);
+@@ -52,7 +52,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
+ #endif
+ }
+-static inline __attribute_const__ __u32 __fswab32(__u32 val)
++static inline __intentional_overflow(0) __attribute_const__ __u32 __fswab32(__u32 val)
+ {
+ #if defined(__arch_swab32)
+       return __arch_swab32(val);
+@@ -61,7 +61,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
+ #endif
+ }
+-static inline __attribute_const__ __u64 __fswab64(__u64 val)
++static inline __intentional_overflow(0) __attribute_const__ __u64 __fswab64(__u64 val)
+ {
+ #if defined (__arch_swab64)
+       return __arch_swab64(val);
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index 1590c49..6977e11 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -73,5 +73,10 @@
+ #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
++/* User namespace */
++#define XATTR_PAX_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_USER_PAX_FLAGS XATTR_USER_PREFIX XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
+ #endif /* _UAPI_LINUX_XATTR_H */
+diff --git a/include/video/udlfb.h b/include/video/udlfb.h
+index f9466fa..f4e2b81 100644
+--- a/include/video/udlfb.h
++++ b/include/video/udlfb.h
+@@ -53,10 +53,10 @@ struct dlfb_data {
+       u32 pseudo_palette[256];
+       int blank_mode; /*one of FB_BLANK_ */
+       /* blit-only rendering path metrics, exposed through sysfs */
+-      atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+-      atomic_t bytes_identical; /* saved effort with backbuffer comparison */
+-      atomic_t bytes_sent; /* to usb, after compression including overhead */
+-      atomic_t cpu_kcycles_used; /* transpired during pixel processing */
++      atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
++      atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
++      atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
++      atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
+ };
+ #define NR_USB_REQUEST_I2C_SUB_IO 0x02
+diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
+index 30f5362..8ed8ac9 100644
+--- a/include/video/uvesafb.h
++++ b/include/video/uvesafb.h
+@@ -122,6 +122,7 @@ struct uvesafb_par {
+       u8 ypan;                        /* 0 - nothing, 1 - ypan, 2 - ywrap */
+       u8 pmi_setpal;                  /* PMI for palette changes */
+       u16 *pmi_base;                  /* protected mode interface location */
++      u8 *pmi_code;                   /* protected mode code location */
+       void *pmi_start;
+       void *pmi_pal;
+       u8 *vbe_state_orig;             /*
+diff --git a/init/Kconfig b/init/Kconfig
+index cac3f09..fcf4fa4 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -31,6 +31,9 @@ menu "General setup"
+ config BROKEN
+       bool
++config BROKEN_SECURITY
++      bool
++
+ config BROKEN_ON_SMP
+       bool
+       depends on BROKEN || !SMP
+@@ -288,7 +291,8 @@ config FHANDLE
+ config USELIB
+       bool "uselib syscall"
+-      def_bool ALPHA || M68K || SPARC || X86_32 || IA32_EMULATION
++      default n
++      depends on !GRKERNSEC
+       help
+         This option enables the uselib syscall, a system call used in the
+         dynamic linker from libc5 and earlier.  glibc does not use this
+@@ -632,6 +636,7 @@ config RCU_FAST_NO_HZ
+ config TREE_RCU_TRACE
+       def_bool RCU_TRACE && ( TREE_RCU || PREEMPT_RCU )
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       help
+         This option provides tracing for the TREE_RCU and
+         PREEMPT_RCU implementations, permitting Makefile to
+@@ -1158,6 +1163,7 @@ endif # CGROUPS
+ config CHECKPOINT_RESTORE
+       bool "Checkpoint/restore support" if EXPERT
+       select PROC_CHILDREN
++      depends on !GRKERNSEC
+       default n
+       help
+         Enables additional kernel features in a sake of checkpoint/restore.
+@@ -1630,7 +1636,7 @@ config ADVISE_SYSCALLS
+ config USERFAULTFD
+       bool "Enable userfaultfd() system call"
+       select ANON_INODES
+-      depends on MMU
++      depends on MMU && !GRKERNSEC
+       help
+         Enable the userfaultfd() system call that allows to intercept and
+         handle page faults in userland.
+@@ -1743,7 +1749,7 @@ config SLUB_DEBUG
+ config COMPAT_BRK
+       bool "Disable heap randomization"
+-      default y
++      default n
+       help
+         Randomizing heap placement makes heap exploits harder, but it
+         also breaks ancient binaries (including anything libc5 based).
+@@ -1761,7 +1767,6 @@ choice
+ config SLAB
+       bool "SLAB"
+-      select HAVE_HARDENED_USERCOPY_ALLOCATOR
+       help
+         The regular slab allocator that is established and known to work
+         well in all environments. It organizes cache hot objects in
+@@ -1769,7 +1774,6 @@ config SLAB
+ config SLUB
+       bool "SLUB (Unqueued Allocator)"
+-      select HAVE_HARDENED_USERCOPY_ALLOCATOR
+       help
+          SLUB is a slab allocator that minimizes cache line usage
+          instead of managing queues of cached objects (SLAB approach).
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index dea5de9..497f996 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -363,11 +363,11 @@ static void __init get_fs_names(char *page)
+ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+ {
+       struct super_block *s;
+-      int err = sys_mount(name, "/root", fs, flags, data);
++      int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
+       if (err)
+               return err;
+-      sys_chdir("/root");
++      sys_chdir((const char __force_user *)"/root");
+       s = current->fs->pwd.dentry->d_sb;
+       ROOT_DEV = s->s_dev;
+       printk(KERN_INFO
+@@ -490,18 +490,18 @@ void __init change_floppy(char *fmt, ...)
+       va_start(args, fmt);
+       vsprintf(buf, fmt, args);
+       va_end(args);
+-      fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++      fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
+       if (fd >= 0) {
+               sys_ioctl(fd, FDEJECT, 0);
+               sys_close(fd);
+       }
+       printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
+-      fd = sys_open("/dev/console", O_RDWR, 0);
++      fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
+       if (fd >= 0) {
+               sys_ioctl(fd, TCGETS, (long)&termios);
+               termios.c_lflag &= ~ICANON;
+               sys_ioctl(fd, TCSETSF, (long)&termios);
+-              sys_read(fd, &c, 1);
++              sys_read(fd, (char __user *)&c, 1);
+               termios.c_lflag |= ICANON;
+               sys_ioctl(fd, TCSETSF, (long)&termios);
+               sys_close(fd);
+@@ -600,8 +600,8 @@ void __init prepare_namespace(void)
+       mount_root();
+ out:
+       devtmpfs_mount("dev");
+-      sys_mount(".", "/", NULL, MS_MOVE, NULL);
+-      sys_chroot(".");
++      sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++      sys_chroot((const char __force_user *)".");
+ }
+ static bool is_tmpfs;
+@@ -609,7 +609,7 @@ static struct dentry *rootfs_mount(struct file_system_type *fs_type,
+       int flags, const char *dev_name, void *data)
+ {
+       static unsigned long once;
+-      void *fill = ramfs_fill_super;
++      int (*fill)(struct super_block *, void *, int) = ramfs_fill_super;
+       if (test_and_set_bit(0, &once))
+               return ERR_PTR(-ENODEV);
+diff --git a/init/do_mounts.h b/init/do_mounts.h
+index 067af1d..b535547 100644
+--- a/init/do_mounts.h
++++ b/init/do_mounts.h
+@@ -15,15 +15,15 @@ extern int root_mountflags;
+ static inline int create_dev(char *name, dev_t dev)
+ {
+-      sys_unlink(name);
+-      return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
++      sys_unlink((char __force_user *)name);
++      return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
+ }
+ #if BITS_PER_LONG == 32
+ static inline u32 bstat(char *name)
+ {
+       struct stat64 stat;
+-      if (sys_stat64(name, &stat) != 0)
++      if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
+               return 0;
+       if (!S_ISBLK(stat.st_mode))
+               return 0;
+@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
+ static inline u32 bstat(char *name)
+ {
+       struct stat stat;
+-      if (sys_newstat(name, &stat) != 0)
++      if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
+               return 0;
+       if (!S_ISBLK(stat.st_mode))
+               return 0;
+diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
+index a1000ca..3137150 100644
+--- a/init/do_mounts_initrd.c
++++ b/init/do_mounts_initrd.c
+@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
+ {
+       sys_unshare(CLONE_FS | CLONE_FILES);
+       /* stdin/stdout/stderr for /linuxrc */
+-      sys_open("/dev/console", O_RDWR, 0);
++      sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
+       sys_dup(0);
+       sys_dup(0);
+       /* move initrd over / and chdir/chroot in initrd root */
+-      sys_chdir("/root");
+-      sys_mount(".", "/", NULL, MS_MOVE, NULL);
+-      sys_chroot(".");
++      sys_chdir((const char __force_user *)"/root");
++      sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++      sys_chroot((const char __force_user *)".");
+       sys_setsid();
+       return 0;
+ }
+@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
+       create_dev("/dev/root.old", Root_RAM0);
+       /* mount initrd on rootfs' /root */
+       mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
+-      sys_mkdir("/old", 0700);
+-      sys_chdir("/old");
++      sys_mkdir((const char __force_user *)"/old", 0700);
++      sys_chdir((const char __force_user *)"/old");
+       /* try loading default modules from initrd */
+       load_default_modules();
+@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
+       current->flags &= ~PF_FREEZER_SKIP;
+       /* move initrd to rootfs' /old */
+-      sys_mount("..", ".", NULL, MS_MOVE, NULL);
++      sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
+       /* switch root and cwd back to / of rootfs */
+-      sys_chroot("..");
++      sys_chroot((const char __force_user *)"..");
+       if (new_decode_dev(real_root_dev) == Root_RAM0) {
+-              sys_chdir("/old");
++              sys_chdir((const char __force_user *)"/old");
+               return;
+       }
+-      sys_chdir("/");
++      sys_chdir((const char __force_user *)"/");
+       ROOT_DEV = new_decode_dev(real_root_dev);
+       mount_root();
+       printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
+-      error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
++      error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
+       if (!error)
+               printk("okay\n");
+       else {
+-              int fd = sys_open("/dev/root.old", O_RDWR, 0);
++              int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
+               if (error == -ENOENT)
+                       printk("/initrd does not exist. Ignored.\n");
+               else
+                       printk("failed\n");
+               printk(KERN_NOTICE "Unmounting old root\n");
+-              sys_umount("/old", MNT_DETACH);
++              sys_umount((char __force_user *)"/old", MNT_DETACH);
+               printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
+               if (fd < 0) {
+                       error = fd;
+@@ -127,11 +127,11 @@ bool __init initrd_load(void)
+                * mounted in the normal path.
+                */
+               if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
+-                      sys_unlink("/initrd.image");
++                      sys_unlink((const char __force_user *)"/initrd.image");
+                       handle_initrd();
+                       return true;
+               }
+       }
+-      sys_unlink("/initrd.image");
++      sys_unlink((const char __force_user *)"/initrd.image");
+       return false;
+ }
+diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
+index 8cb6db5..d729f50 100644
+--- a/init/do_mounts_md.c
++++ b/init/do_mounts_md.c
+@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
+                       partitioned ? "_d" : "", minor,
+                       md_setup_args[ent].device_names);
+-              fd = sys_open(name, 0, 0);
++              fd = sys_open((char __force_user *)name, 0, 0);
+               if (fd < 0) {
+                       printk(KERN_ERR "md: open failed - cannot start "
+                                       "array %s\n", name);
+@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
+                        * array without it
+                        */
+                       sys_close(fd);
+-                      fd = sys_open(name, 0, 0);
++                      fd = sys_open((char __force_user *)name, 0, 0);
+                       sys_ioctl(fd, BLKRRPART, 0);
+               }
+               sys_close(fd);
+@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
+       wait_for_device_probe();
+-      fd = sys_open("/dev/md0", 0, 0);
++      fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
+       if (fd >= 0) {
+               sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
+               sys_close(fd);
+diff --git a/init/init_task.c b/init/init_task.c
+index ba0a7f36..a7b3aaa 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -23,4 +23,8 @@ EXPORT_SYMBOL(init_task);
+  * linker map entry.
+  */
+ union thread_union init_thread_union __init_task_data =
++#ifdef CONFIG_X86
++      { .stack[0] = ~0xabcd1234, };
++#else
+       { INIT_THREAD_INFO(init_task) };
++#endif
+diff --git a/init/initramfs.c b/init/initramfs.c
+index b32ad7d..05f6420 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
+       /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
+       while (count) {
+-              ssize_t rv = sys_write(fd, p, count);
++              ssize_t rv = sys_write(fd, (char __force_user *)p, count);
+               if (rv < 0) {
+                       if (rv == -EINTR || rv == -EAGAIN)
+@@ -107,7 +107,7 @@ static void __init free_hash(void)
+       }
+ }
+-static long __init do_utime(char *filename, time_t mtime)
++static long __init do_utime(char __force_user *filename, time_t mtime)
+ {
+       struct timespec t[2];
+@@ -142,7 +142,7 @@ static void __init dir_utime(void)
+       struct dir_entry *de, *tmp;
+       list_for_each_entry_safe(de, tmp, &dir_list, list) {
+               list_del(&de->list);
+-              do_utime(de->name, de->mtime);
++              do_utime((char __force_user *)de->name, de->mtime);
+               kfree(de->name);
+               kfree(de);
+       }
+@@ -304,7 +304,7 @@ static int __init maybe_link(void)
+       if (nlink >= 2) {
+               char *old = find_link(major, minor, ino, mode, collected);
+               if (old)
+-                      return (sys_link(old, collected) < 0) ? -1 : 1;
++                      return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
+       }
+       return 0;
+ }
+@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
+ {
+       struct stat st;
+-      if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
++      if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
+               if (S_ISDIR(st.st_mode))
+-                      sys_rmdir(path);
++                      sys_rmdir((char __force_user *)path);
+               else
+-                      sys_unlink(path);
++                      sys_unlink((char __force_user *)path);
+       }
+ }
+@@ -338,7 +338,7 @@ static int __init do_name(void)
+                       int openflags = O_WRONLY|O_CREAT;
+                       if (ml != 1)
+                               openflags |= O_TRUNC;
+-                      wfd = sys_open(collected, openflags, mode);
++                      wfd = sys_open((char __force_user *)collected, openflags, mode);
+                       if (wfd >= 0) {
+                               sys_fchown(wfd, uid, gid);
+@@ -350,17 +350,17 @@ static int __init do_name(void)
+                       }
+               }
+       } else if (S_ISDIR(mode)) {
+-              sys_mkdir(collected, mode);
+-              sys_chown(collected, uid, gid);
+-              sys_chmod(collected, mode);
++              sys_mkdir((char __force_user *)collected, mode);
++              sys_chown((char __force_user *)collected, uid, gid);
++              sys_chmod((char __force_user *)collected, mode);
+               dir_add(collected, mtime);
+       } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
+                  S_ISFIFO(mode) || S_ISSOCK(mode)) {
+               if (maybe_link() == 0) {
+-                      sys_mknod(collected, mode, rdev);
+-                      sys_chown(collected, uid, gid);
+-                      sys_chmod(collected, mode);
+-                      do_utime(collected, mtime);
++                      sys_mknod((char __force_user *)collected, mode, rdev);
++                      sys_chown((char __force_user *)collected, uid, gid);
++                      sys_chmod((char __force_user *)collected, mode);
++                      do_utime((char __force_user *)collected, mtime);
+               }
+       }
+       return 0;
+@@ -372,7 +372,7 @@ static int __init do_copy(void)
+               if (xwrite(wfd, victim, body_len) != body_len)
+                       error("write error");
+               sys_close(wfd);
+-              do_utime(vcollected, mtime);
++              do_utime((char __force_user *)vcollected, mtime);
+               kfree(vcollected);
+               eat(body_len);
+               state = SkipIt;
+@@ -390,9 +390,9 @@ static int __init do_symlink(void)
+ {
+       collected[N_ALIGN(name_len) + body_len] = '\0';
+       clean_path(collected, 0);
+-      sys_symlink(collected + N_ALIGN(name_len), collected);
+-      sys_lchown(collected, uid, gid);
+-      do_utime(collected, mtime);
++      sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
++      sys_lchown((char __force_user *)collected, uid, gid);
++      do_utime((char __force_user *)collected, mtime);
+       state = SkipIt;
+       next_state = Reset;
+       return 0;
+diff --git a/init/main.c b/init/main.c
+index a8a58e2..75fba2e 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -11,6 +11,10 @@
+ #define DEBUG         /* Enable initcall_debug */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
++
+ #include <linux/types.h>
+ #include <linux/module.h>
+ #include <linux/proc_fs.h>
+@@ -94,6 +98,8 @@ extern void init_IRQ(void);
+ extern void fork_init(void);
+ extern void radix_tree_init(void);
++extern void grsecurity_init(void);
++
+ /*
+  * Debug helper: via this flag we know that we are in 'early bootup code'
+  * where only the boot processor is running with IRQ disabled.  This means
+@@ -155,6 +161,48 @@ static int __init set_reset_devices(char *str)
+ __setup("reset_devices", set_reset_devices);
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
++static int __init setup_grsec_proc_gid(char *str)
++{
++      grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
++      return 1;
++}
++__setup("grsec_proc_gid=", setup_grsec_proc_gid);
++#endif
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++int grsec_enable_sysfs_restrict = 1;
++static int __init setup_grsec_sysfs_restrict(char *str)
++{
++      if (!simple_strtol(str, NULL, 0))
++              grsec_enable_sysfs_restrict = 0;
++      return 1;
++}
++__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++int pax_softmode;
++
++static int __init setup_pax_softmode(char *str)
++{
++      get_option(&str, &pax_softmode);
++      return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
++#ifdef CONFIG_PAX_SIZE_OVERFLOW
++bool pax_size_overflow_report_only __read_only;
++
++static int __init setup_pax_size_overflow_report_only(char *str)
++{
++      pax_size_overflow_report_only = true;
++      return 0;
++}
++early_param("pax_size_overflow_report_only", setup_pax_size_overflow_report_only);
++#endif
++
+ static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+ const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+ static const char *panic_later, *panic_param;
+@@ -767,7 +815,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
+ {
+       int count = preempt_count();
+       int ret;
+-      char msgbuf[64];
++      const char *msg1 = "", *msg2 = "";
+       if (initcall_blacklisted(fn))
+               return -EPERM;
+@@ -777,18 +825,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
+       else
+               ret = fn();
+-      msgbuf[0] = 0;
+-
+       if (preempt_count() != count) {
+-              sprintf(msgbuf, "preemption imbalance ");
++              msg1 = " preemption imbalance";
+               preempt_count_set(count);
+       }
+       if (irqs_disabled()) {
+-              strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
++              msg2 = " disabled interrupts";
+               local_irq_enable();
+       }
+-      WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
++      WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
++      add_latent_entropy();
+       return ret;
+ }
+@@ -893,8 +940,8 @@ static int run_init_process(const char *init_filename)
+ {
+       argv_init[0] = init_filename;
+       return do_execve(getname_kernel(init_filename),
+-              (const char __user *const __user *)argv_init,
+-              (const char __user *const __user *)envp_init);
++              (const char __user *const __force_user *)argv_init,
++              (const char __user *const __force_user *)envp_init);
+ }
+ static int try_to_run_init_process(const char *init_filename)
+@@ -911,6 +958,10 @@ static int try_to_run_init_process(const char *init_filename)
+       return ret;
+ }
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++extern int gr_init_ran;
++#endif
++
+ static noinline void __init kernel_init_freeable(void);
+ #ifdef CONFIG_DEBUG_RODATA
+@@ -959,6 +1010,11 @@ static int __ref kernel_init(void *unused)
+                      ramdisk_execute_command, ret);
+       }
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++      /* if no initrd was used, be extra sure we enforce chroot restrictions */
++      gr_init_ran = 1;
++#endif
++
+       /*
+        * We try each of these until one succeeds.
+        *
+@@ -1016,7 +1072,7 @@ static noinline void __init kernel_init_freeable(void)
+       do_basic_setup();
+       /* Open the /dev/console on the rootfs, this should never fail */
+-      if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
++      if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
+               pr_err("Warning: unable to open an initial console.\n");
+       (void) sys_dup(0);
+@@ -1029,11 +1085,13 @@ static noinline void __init kernel_init_freeable(void)
+       if (!ramdisk_execute_command)
+               ramdisk_execute_command = "/init";
+-      if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
++      if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
+               ramdisk_execute_command = NULL;
+               prepare_namespace();
+       }
++      grsecurity_init();
++
+       /*
+        * Ok, we have completed the initial bootup, and
+        * we're essentially up and running. Get rid of the
+diff --git a/ipc/compat.c b/ipc/compat.c
+index 9b3c85f..5266b0f 100644
+--- a/ipc/compat.c
++++ b/ipc/compat.c
+@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
+                              COMPAT_SHMLBA);
+               if (err < 0)
+                       return err;
+-              return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
++              return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
+       }
+       case SHMDT:
+               return sys_shmdt(compat_ptr(ptr));
+@@ -747,7 +747,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
+ }
+ COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
+-                     unsigned, nsops,
++                     compat_long_t, nsops,
+                      const struct compat_timespec __user *, timeout)
+ {
+       struct timespec __user *ts64;
+diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
+index 8ad93c2..54036e1 100644
+--- a/ipc/ipc_sysctl.c
++++ b/ipc/ipc_sysctl.c
+@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
+ static int proc_ipc_dointvec(struct ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+       ipc_table.data = get_ipc(table);
+@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
+ static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+       ipc_table.data = get_ipc(table);
+@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
+ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+       ipc_table.data = get_ipc(table);
+@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
+ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table ipc_table;
++      ctl_table_no_const ipc_table;
+       int dummy = 0;
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+@@ -99,6 +99,8 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
+ static int zero;
+ static int one = 1;
+ static int int_max = INT_MAX;
++static unsigned long long_zero = 0;
++static unsigned long long_max = LONG_MAX;
+ static struct ctl_table ipc_kern_table[] = {
+       {
+@@ -107,6 +109,8 @@ static struct ctl_table ipc_kern_table[] = {
+               .maxlen         = sizeof(init_ipc_ns.shm_ctlmax),
+               .mode           = 0644,
+               .proc_handler   = proc_ipc_doulongvec_minmax,
++              .extra1         = &long_zero,
++              .extra2         = &long_max,
+       },
+       {
+               .procname       = "shmall",
+@@ -114,6 +118,8 @@ static struct ctl_table ipc_kern_table[] = {
+               .maxlen         = sizeof(init_ipc_ns.shm_ctlall),
+               .mode           = 0644,
+               .proc_handler   = proc_ipc_doulongvec_minmax,
++              .extra1         = &long_zero,
++              .extra2         = &long_max,
+       },
+       {
+               .procname       = "shmmni",
+diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
+index 68d4e95..1477ded 100644
+--- a/ipc/mq_sysctl.c
++++ b/ipc/mq_sysctl.c
+@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
+ static int proc_mq_dointvec(struct ctl_table *table, int write,
+                           void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table mq_table;
++      ctl_table_no_const mq_table;
+       memcpy(&mq_table, table, sizeof(mq_table));
+       mq_table.data = get_mq(table);
+@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
+ static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
+       void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table mq_table;
++      ctl_table_no_const mq_table;
+       memcpy(&mq_table, table, sizeof(mq_table));
+       mq_table.data = get_mq(table);
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index 0b13ace..2b586ea 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -274,6 +274,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
+               mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                         info->attr.mq_msgsize);
++              gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
+               spin_lock(&mq_lock);
+               if (u->mq_bytes + mq_bytes < u->mq_bytes ||
+                   u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
+diff --git a/ipc/msg.c b/ipc/msg.c
+index c6521c2..4e2379d 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -1041,7 +1041,8 @@ void msg_exit_ns(struct ipc_namespace *ns)
+ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
+ {
+       struct user_namespace *user_ns = seq_user_ns(s);
+-      struct msg_queue *msq = it;
++      struct kern_ipc_perm *perm = it;
++      struct msg_queue *msq = container_of(perm, struct msg_queue, q_perm);
+       seq_printf(s,
+                  "%10d %10d  %4o  %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
+diff --git a/ipc/msgutil.c b/ipc/msgutil.c
+index a521999..6259e10 100644
+--- a/ipc/msgutil.c
++++ b/ipc/msgutil.c
+@@ -53,7 +53,7 @@ static struct msg_msg *alloc_msg(size_t len)
+       size_t alen;
+       alen = min(len, DATALEN_MSG);
+-      msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
++      msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL|GFP_USERCOPY);
+       if (msg == NULL)
+               return NULL;
+@@ -65,7 +65,7 @@ static struct msg_msg *alloc_msg(size_t len)
+       while (len > 0) {
+               struct msg_msgseg *seg;
+               alen = min(len, DATALEN_SEG);
+-              seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL);
++              seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL|GFP_USERCOPY);
+               if (seg == NULL)
+                       goto out_err;
+               *pseg = seg;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 5e318c5..235b6b0 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -1814,7 +1814,7 @@ static int get_queue_result(struct sem_queue *q)
+ }
+ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+-              unsigned, nsops, const struct timespec __user *, timeout)
++              long, nsops, const struct timespec __user *, timeout)
+ {
+       int error = -EINVAL;
+       struct sem_array *sma;
+@@ -2049,7 +2049,7 @@ out_free:
+ }
+ SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
+-              unsigned, nsops)
++              long, nsops)
+ {
+       return sys_semtimedop(semid, tsops, nsops, NULL);
+ }
+@@ -2204,7 +2204,8 @@ void exit_sem(struct task_struct *tsk)
+ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
+ {
+       struct user_namespace *user_ns = seq_user_ns(s);
+-      struct sem_array *sma = it;
++      struct kern_ipc_perm *perm = it;
++      struct sem_array *sma = container_of(perm, struct sem_array, sem_perm);
+       time_t sem_otime;
+       /*
+diff --git a/ipc/shm.c b/ipc/shm.c
+index dbac886..ef5e42d 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -72,9 +72,17 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
+ static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
+ #endif
++#ifdef CONFIG_GRKERNSEC
++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++                         const u64 shm_createtime, const kuid_t cuid,
++                         const int shmid);
++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++                         const u64 shm_createtime);
++#endif
++
+ void shm_init_ns(struct ipc_namespace *ns)
+ {
+-      ns->shm_ctlmax = SHMMAX;
++      ns->shm_ctlmax = BITS_PER_LONG == 32 ? SHMMAX : LONG_MAX;
+       ns->shm_ctlall = SHMALL;
+       ns->shm_ctlmni = SHMMNI;
+       ns->shm_rmid_forced = 0;
+@@ -590,6 +598,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+       shp->shm_lprid = 0;
+       shp->shm_atim = shp->shm_dtim = 0;
+       shp->shm_ctim = get_seconds();
++#ifdef CONFIG_GRKERNSEC
++      shp->shm_createtime = ktime_get_ns();
++#endif
+       shp->shm_segsz = size;
+       shp->shm_nattch = 0;
+       shp->shm_file = file;
+@@ -1133,6 +1144,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+               f_mode = FMODE_READ | FMODE_WRITE;
+       }
+       if (shmflg & SHM_EXEC) {
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (current->mm->pax_flags & MF_PAX_MPROTECT)
++                      goto out;
++#endif
++
+               prot |= PROT_EXEC;
+               acc_mode |= S_IXUGO;
+       }
+@@ -1157,6 +1174,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+       if (err)
+               goto out_unlock;
++#ifdef CONFIG_GRKERNSEC
++      if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
++                           shp->shm_perm.cuid, shmid) ||
++          !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
++              err = -EACCES;
++              goto out_unlock;
++      }
++#endif
++
+       ipc_lock_object(&shp->shm_perm);
+       /* check if shm_destroy() is tearing down shp */
+@@ -1169,6 +1195,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+       path = shp->shm_file->f_path;
+       path_get(&path);
+       shp->shm_nattch++;
++#ifdef CONFIG_GRKERNSEC
++      shp->shm_lapid = current->pid;
++#endif
+       size = i_size_read(d_inode(path.dentry));
+       ipc_unlock_object(&shp->shm_perm);
+       rcu_read_unlock();
+@@ -1372,7 +1401,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
+ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
+ {
+       struct user_namespace *user_ns = seq_user_ns(s);
+-      struct shmid_kernel *shp = it;
++      struct kern_ipc_perm *perm = it;
++      struct shmid_kernel *shp = container_of(perm, struct shmid_kernel, shm_perm);
+       unsigned long rss = 0, swp = 0;
+       shm_add_rss_swap(shp, &rss, &swp);
+diff --git a/ipc/util.c b/ipc/util.c
+index 798cad1..d6ffc17 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -71,6 +71,8 @@ struct ipc_proc_iface {
+       int (*show)(struct seq_file *, void *);
+ };
++extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
++
+ /**
+  * ipc_init - initialise ipc subsystem
+  *
+@@ -489,6 +491,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
+               granted_mode >>= 6;
+       else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
+               granted_mode >>= 3;
++
++      if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
++              return -1;
++
+       /* is there some bit set in requested_mode but not in granted_mode? */
+       if ((requested_mode & ~granted_mode & 0007) &&
+           !ns_capable(ns->user_ns, CAP_IPC_OWNER))
+diff --git a/kernel/audit.c b/kernel/audit.c
+index a8a91bd2..b8f3933 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -122,7 +122,7 @@ u32                audit_sig_sid = 0;
+    3) suppressed due to audit_rate_limit
+    4) suppressed due to audit_backlog_limit
+ */
+-static atomic_t    audit_lost = ATOMIC_INIT(0);
++static atomic_unchecked_t    audit_lost = ATOMIC_INIT(0);
+ /* The netlink socket. */
+ static struct sock *audit_sock;
+@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
+       unsigned long           now;
+       int                     print;
+-      atomic_inc(&audit_lost);
++      atomic_inc_unchecked(&audit_lost);
+       print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
+@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
+       if (print) {
+               if (printk_ratelimit())
+                       pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
+-                              atomic_read(&audit_lost),
++                              atomic_read_unchecked(&audit_lost),
+                               audit_rate_limit,
+                               audit_backlog_limit);
+               audit_panic(message);
+@@ -854,7 +854,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+               s.pid                   = audit_pid;
+               s.rate_limit            = audit_rate_limit;
+               s.backlog_limit         = audit_backlog_limit;
+-              s.lost                  = atomic_read(&audit_lost);
++              s.lost                  = atomic_read_unchecked(&audit_lost);
+               s.backlog               = skb_queue_len(&audit_skb_queue);
+               s.feature_bitmap        = AUDIT_FEATURE_BITMAP_ALL;
+               s.backlog_wait_time     = audit_backlog_wait_time_master;
+@@ -1171,7 +1171,7 @@ static void __net_exit audit_net_exit(struct net *net)
+       netlink_kernel_release(sock);
+ }
+-static struct pernet_operations audit_net_ops __net_initdata = {
++static struct pernet_operations audit_net_ops __net_initconst = {
+       .init = audit_net_init,
+       .exit = audit_net_exit,
+       .id = &audit_net_id,
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 5abf1dc..78861f76 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -1954,7 +1954,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
+ }
+ /* global counter which is incremented every time something logs in */
+-static atomic_t session_id = ATOMIC_INIT(0);
++static atomic_unchecked_t session_id = ATOMIC_INIT(0);
+ static int audit_set_loginuid_perm(kuid_t loginuid)
+ {
+@@ -2026,7 +2026,7 @@ int audit_set_loginuid(kuid_t loginuid)
+       /* are we setting or clearing? */
+       if (uid_valid(loginuid))
+-              sessionid = (unsigned int)atomic_inc_return(&session_id);
++              sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
+       task->sessionid = sessionid;
+       task->loginuid = loginuid;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 03fd23d..4b2832f0 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -208,6 +208,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+ }
+ #ifdef CONFIG_BPF_JIT
++extern long __rap_hash___bpf_prog_run;
++
+ struct bpf_binary_header *
+ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
+                    unsigned int alignment,
+@@ -221,27 +223,45 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
+        * random section of illegal instructions.
+        */
+       size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
+-      hdr = module_alloc(size);
++      hdr = module_alloc_exec(size);
+       if (hdr == NULL)
+               return NULL;
+       /* Fill space with illegal/arch-dep instructions. */
+       bpf_fill_ill_insns(hdr, size);
++      pax_open_kernel();
+       hdr->pages = size / PAGE_SIZE;
++      pax_close_kernel();
++
+       hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
+                    PAGE_SIZE - sizeof(*hdr));
++
++#ifdef CONFIG_PAX_RAP
++      hole -= 8;
++#endif
++
+       start = (get_random_int() % hole) & ~(alignment - 1);
++#ifdef CONFIG_PAX_RAP
++      start += 8;
++#endif
++
+       /* Leave a random number of instructions before BPF code. */
+       *image_ptr = &hdr->image[start];
++#ifdef CONFIG_PAX_RAP
++      pax_open_kernel();
++      *(long *)(*image_ptr - 8) = (long)&__rap_hash___bpf_prog_run;
++      pax_close_kernel();
++#endif
++
+       return hdr;
+ }
+ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
+ {
+-      module_memfree(hdr);
++      module_memfree_exec(hdr);
+ }
+ int bpf_jit_harden __read_mostly;
+@@ -465,7 +485,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
+  *
+  * Decode and execute eBPF instructions.
+  */
+-static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
++unsigned int __bpf_prog_run(const struct sk_buff *ctx, const struct bpf_insn *insn)
+ {
+       u64 stack[MAX_BPF_STACK / sizeof(u64)];
+       u64 regs[MAX_BPF_REG], tmp;
+@@ -970,7 +990,7 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
+  */
+ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ {
+-      fp->bpf_func = (void *) __bpf_prog_run;
++      fp->bpf_func = __bpf_prog_run;
+       /* eBPF JITs can rewrite the program in case constant
+        * blinding is active. However, in case of error during
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 228f962..ebef033 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -827,8 +827,16 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
+       union bpf_attr attr = {};
+       int err;
+-      if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
++      /* the syscall is limited to root temporarily. This restriction will be
++       * lifted by upstream when a half-assed security audit is clean. Note
++       * that eBPF+tracing must have this restriction, since it may pass
++       * kernel data to user space
++       */
++      if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
++#ifdef CONFIG_GRKERNSEC
++      return -EPERM;
++#endif
+       if (!access_ok(VERIFY_READ, uattr, 1))
+               return -EFAULT;
+diff --git a/kernel/capability.c b/kernel/capability.c
+index 00411c8..aaad585 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -193,6 +193,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
+                * before modification is attempted and the application
+                * fails.
+                */
++              if (tocopy > ARRAY_SIZE(kdata))
++                      return -EFAULT;
++
+               if (copy_to_user(dataptr, kdata, tocopy
+                                * sizeof(struct __user_cap_data_struct))) {
+                       return -EFAULT;
+@@ -298,10 +301,11 @@ bool has_ns_capability(struct task_struct *t,
+       int ret;
+       rcu_read_lock();
+-      ret = security_capable(__task_cred(t), ns, cap);
++      ret = security_capable(__task_cred(t), ns, cap) == 0 &&
++              gr_task_is_capable(t, __task_cred(t), cap);
+       rcu_read_unlock();
+-      return (ret == 0);
++      return ret;
+ }
+ /**
+@@ -338,10 +342,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
+       int ret;
+       rcu_read_lock();
+-      ret = security_capable_noaudit(__task_cred(t), ns, cap);
++      ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, __task_cred(t), cap);
+       rcu_read_unlock();
+-      return (ret == 0);
++      return ret;
+ }
+ /**
+@@ -370,9 +374,9 @@ static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit)
+               BUG();
+       }
+-      capable = audit ? security_capable(current_cred(), ns, cap) :
+-                        security_capable_noaudit(current_cred(), ns, cap);
+-      if (capable == 0) {
++      capable = audit ? (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) :
++                        (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) ;
++      if (capable) {
+               current->flags |= PF_SUPERPRIV;
+               return true;
+       }
+@@ -429,6 +433,13 @@ bool capable(int cap)
+       return ns_capable(&init_user_ns, cap);
+ }
+ EXPORT_SYMBOL(capable);
++
++bool capable_nolog(int cap)
++{
++      return ns_capable_noaudit(&init_user_ns, cap);
++}
++EXPORT_SYMBOL(capable_nolog);
++
+ #endif /* CONFIG_MULTIUSER */
+ /**
+@@ -473,3 +484,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
+               kgid_has_mapping(ns, inode->i_gid);
+ }
+ EXPORT_SYMBOL(capable_wrt_inode_uidgid);
++
++bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
++{
++      struct user_namespace *ns = current_user_ns();
++
++      return ns_capable_noaudit(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
++              kgid_has_mapping(ns, inode->i_gid);
++}
++EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index d6b729b..f78716c 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -3645,7 +3645,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
+       key = &cft->lockdep_key;
+ #endif
+       kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
+-                                cgroup_file_mode(cft), 0, cft->kf_ops, cft,
++                                cgroup_file_mode(cft), 0, cft->kf_ops, (void *)cft,
+                                 NULL, key);
+       if (IS_ERR(kn))
+               return PTR_ERR(kn);
+@@ -3749,11 +3749,14 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
+               /* free copy for custom atomic_write_len, see init_cftypes() */
+               if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
+                       kfree(cft->kf_ops);
+-              cft->kf_ops = NULL;
+-              cft->ss = NULL;
++
++              pax_open_kernel();
++              const_cast(cft->kf_ops) = NULL;
++              const_cast(cft->ss) = NULL;
+               /* revert flags set by cgroup core while adding @cfts */
+-              cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
++              const_cast(cft->flags) &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
++              pax_close_kernel();
+       }
+ }
+@@ -3784,8 +3787,10 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+                       kf_ops->atomic_write_len = cft->max_write_len;
+               }
+-              cft->kf_ops = kf_ops;
+-              cft->ss = ss;
++              pax_open_kernel();
++              const_cast(cft->kf_ops) = kf_ops;
++              const_cast(cft->ss) = ss;
++              pax_close_kernel();
+       }
+       return 0;
+@@ -3798,7 +3803,7 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
+       if (!cfts || !cfts[0].ss)
+               return -ENOENT;
+-      list_del(&cfts->node);
++      pax_list_del((struct list_head *)&cfts->node);
+       cgroup_apply_cftypes(cfts, false);
+       cgroup_exit_cftypes(cfts);
+       return 0;
+@@ -3855,7 +3860,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+       mutex_lock(&cgroup_mutex);
+-      list_add_tail(&cfts->node, &ss->cfts);
++      pax_list_add_tail((struct list_head *)&cfts->node, &ss->cfts);
+       ret = cgroup_apply_cftypes(cfts, true);
+       if (ret)
+               cgroup_rm_cftypes_locked(cfts);
+@@ -3876,8 +3881,10 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+ {
+       struct cftype *cft;
++      pax_open_kernel();
+       for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+-              cft->flags |= __CFTYPE_ONLY_ON_DFL;
++              const_cast(cft->flags) |= __CFTYPE_ONLY_ON_DFL;
++      pax_close_kernel();
+       return cgroup_add_cftypes(ss, cfts);
+ }
+@@ -3893,8 +3900,10 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+ {
+       struct cftype *cft;
++      pax_open_kernel();
+       for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+-              cft->flags |= __CFTYPE_NOT_ON_DFL;
++              const_cast(cft->flags) |= __CFTYPE_NOT_ON_DFL;
++      pax_close_kernel();
+       return cgroup_add_cftypes(ss, cfts);
+ }
+@@ -6066,6 +6075,9 @@ static void cgroup_release_agent(struct work_struct *work)
+       if (!pathbuf || !agentbuf)
+               goto out;
++      if (agentbuf[0] == '\0')
++              goto out;
++
+       spin_lock_irq(&css_set_lock);
+       path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
+       spin_unlock_irq(&css_set_lock);
+@@ -6522,7 +6534,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
+               struct task_struct *task;
+               int count = 0;
+-              seq_printf(seq, "css_set %p\n", cset);
++              seq_printf(seq, "css_set %pK\n", cset);
+               list_for_each_entry(task, &cset->tasks, cg_list) {
+                       if (count++ > MAX_TASKS_SHOWN_PER_CSS)
+diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
+index 2bd6737..9b0ddd4 100644
+--- a/kernel/cgroup_pids.c
++++ b/kernel/cgroup_pids.c
+@@ -54,7 +54,7 @@ struct pids_cgroup {
+       struct cgroup_file              events_file;
+       /* Number of times fork failed because limit was hit. */
+-      atomic64_t                      events_limit;
++      atomic64_unchecked_t            events_limit;
+ };
+ static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
+@@ -78,7 +78,7 @@ pids_css_alloc(struct cgroup_subsys_state *parent)
+       pids->limit = PIDS_MAX;
+       atomic64_set(&pids->counter, 0);
+-      atomic64_set(&pids->events_limit, 0);
++      atomic64_set_unchecked(&pids->events_limit, 0);
+       return &pids->css;
+ }
+@@ -227,7 +227,7 @@ static int pids_can_fork(struct task_struct *task)
+       err = pids_try_charge(pids, 1);
+       if (err) {
+               /* Only log the first time events_limit is incremented. */
+-              if (atomic64_inc_return(&pids->events_limit) == 1) {
++              if (atomic64_inc_return_unchecked(&pids->events_limit) == 1) {
+                       pr_info("cgroup: fork rejected by pids controller in ");
+                       pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
+                       pr_cont("\n");
+@@ -310,7 +310,7 @@ static int pids_events_show(struct seq_file *sf, void *v)
+ {
+       struct pids_cgroup *pids = css_pids(seq_css(sf));
+-      seq_printf(sf, "max %lld\n", (s64)atomic64_read(&pids->events_limit));
++      seq_printf(sf, "max %lld\n", (s64)atomic64_read_unchecked(&pids->events_limit));
+       return 0;
+ }
+diff --git a/kernel/compat.c b/kernel/compat.c
+index 333d364..762ec00 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -13,6 +13,7 @@
+ #include <linux/linkage.h>
+ #include <linux/compat.h>
++#include <linux/module.h>
+ #include <linux/errno.h>
+ #include <linux/time.h>
+ #include <linux/signal.h>
+@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
+       mm_segment_t oldfs;
+       long ret;
+-      restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
++      restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       ret = hrtimer_nanosleep_restart(restart);
+@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       ret = hrtimer_nanosleep(&tu,
+-                              rmtp ? (struct timespec __user *)&rmt : NULL,
++                              rmtp ? (struct timespec __force_user *)&rmt : NULL,
+                               HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+       set_fs(oldfs);
+@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = sys_sigpending((old_sigset_t __user *) &s);
++      ret = sys_sigpending((old_sigset_t __force_user *) &s);
+       set_fs(old_fs);
+       if (ret == 0)
+               ret = put_user(s, set);
+@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
++      ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
+       set_fs(old_fs);
+       if (!ret) {
+@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
+               set_fs (KERNEL_DS);
+               ret = sys_wait4(pid,
+                               (stat_addr ?
+-                               (unsigned int __user *) &status : NULL),
+-                              options, (struct rusage __user *) &r);
++                               (unsigned int __force_user *) &status : NULL),
++                              options, (struct rusage __force_user *) &r);
+               set_fs (old_fs);
+               if (ret > 0) {
+@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
+       memset(&info, 0, sizeof(info));
+       set_fs(KERNEL_DS);
+-      ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
+-                       uru ? (struct rusage __user *)&ru : NULL);
++      ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
++                       uru ? (struct rusage __force_user *)&ru : NULL);
+       set_fs(old_fs);
+       if ((ret < 0) || (info.si_signo == 0))
+@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_timer_settime(timer_id, flags,
+-                              (struct itimerspec __user *) &newts,
+-                              (struct itimerspec __user *) &oldts);
++                              (struct itimerspec __force_user *) &newts,
++                              (struct itimerspec __force_user *) &oldts);
+       set_fs(oldfs);
+       if (!err && old && put_compat_itimerspec(old, &oldts))
+               return -EFAULT;
+@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_timer_gettime(timer_id,
+-                              (struct itimerspec __user *) &ts);
++                              (struct itimerspec __force_user *) &ts);
+       set_fs(oldfs);
+       if (!err && put_compat_itimerspec(setting, &ts))
+               return -EFAULT;
+@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_settime(which_clock,
+-                              (struct timespec __user *) &ts);
++                              (struct timespec __force_user *) &ts);
+       set_fs(oldfs);
+       return err;
+ }
+@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_gettime(which_clock,
+-                              (struct timespec __user *) &ts);
++                              (struct timespec __force_user *) &ts);
+       set_fs(oldfs);
+       if (!err && compat_put_timespec(&ts, tp))
+               return -EFAULT;
+@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
++      ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
+       set_fs(oldfs);
+       err = compat_put_timex(utp, &txc);
+@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_getres(which_clock,
+-                             (struct timespec __user *) &ts);
++                             (struct timespec __force_user *) &ts);
+       set_fs(oldfs);
+       if (!err && tp && compat_put_timespec(&ts, tp))
+               return -EFAULT;
+@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
+       struct timespec tu;
+       struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
+-      restart->nanosleep.rmtp = (struct timespec __user *) &tu;
++      restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = clock_nanosleep_restart(restart);
+@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sys_clock_nanosleep(which_clock, flags,
+-                                (struct timespec __user *) &in,
+-                                (struct timespec __user *) &out);
++                                (struct timespec __force_user *) &in,
++                                (struct timespec __force_user *) &out);
+       set_fs(oldfs);
+       if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
+@@ -1147,7 +1148,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
++      ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
+       set_fs(old_fs);
+       if (compat_put_timespec(&t, interval))
+               return -EFAULT;
+diff --git a/kernel/configs.c b/kernel/configs.c
+index c18b1f1..b9a0132 100644
+--- a/kernel/configs.c
++++ b/kernel/configs.c
+@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
+       struct proc_dir_entry *entry;
+       /* create the current config file */
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
++      entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
++                          &ikconfig_file_ops);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
++                          &ikconfig_file_ops);
++#endif
++#else
+       entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
+                           &ikconfig_file_ops);
++#endif
++
+       if (!entry)
+               return -ENOMEM;
+diff --git a/kernel/cred.c b/kernel/cred.c
+index 5f264fb..8fc856b 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -172,6 +172,15 @@ void exit_creds(struct task_struct *tsk)
+       validate_creds(cred);
+       alter_cred_subscribers(cred, -1);
+       put_cred(cred);
++
++#ifdef CONFIG_GRKERNSEC_SETXID
++      cred = (struct cred *) tsk->delayed_cred;
++      if (cred != NULL) {
++              tsk->delayed_cred = NULL;
++              validate_creds(cred);
++              put_cred(cred);
++      }
++#endif
+ }
+ /**
+@@ -419,7 +428,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
+  * Always returns 0 thus allowing this function to be tail-called at the end
+  * of, say, sys_setgid().
+  */
+-int commit_creds(struct cred *new)
++static int __commit_creds(struct cred *new)
+ {
+       struct task_struct *task = current;
+       const struct cred *old = task->real_cred;
+@@ -438,6 +447,8 @@ int commit_creds(struct cred *new)
+       get_cred(new); /* we will require a ref for the subj creds too */
++      gr_set_role_label(task, new->uid, new->gid);
++
+       /* dumpability changes */
+       if (!uid_eq(old->euid, new->euid) ||
+           !gid_eq(old->egid, new->egid) ||
+@@ -487,6 +498,105 @@ int commit_creds(struct cred *new)
+       put_cred(old);
+       return 0;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern int set_user(struct cred *new);
++
++void gr_delayed_cred_worker(void)
++{
++      const struct cred *new = current->delayed_cred;
++      struct cred *ncred;
++
++      current->delayed_cred = NULL;
++
++      if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
++              // from doing get_cred on it when queueing this
++              put_cred(new);
++              return;
++      } else if (new == NULL)
++              return;
++
++      ncred = prepare_creds();
++      if (!ncred)
++              goto die;
++      // uids
++      ncred->uid = new->uid;
++      ncred->euid = new->euid;
++      ncred->suid = new->suid;
++      ncred->fsuid = new->fsuid;
++      // gids
++      ncred->gid = new->gid;
++      ncred->egid = new->egid;
++      ncred->sgid = new->sgid;
++      ncred->fsgid = new->fsgid;
++      // groups
++      set_groups(ncred, new->group_info);
++      // caps
++      ncred->securebits = new->securebits;
++      ncred->cap_inheritable = new->cap_inheritable;
++      ncred->cap_permitted = new->cap_permitted;
++      ncred->cap_effective = new->cap_effective;
++      ncred->cap_bset = new->cap_bset;
++
++      if (set_user(ncred)) {
++              abort_creds(ncred);
++              goto die;
++      }
++
++      // from doing get_cred on it when queueing this
++      put_cred(new);
++
++      __commit_creds(ncred);
++      return;
++die:
++      // from doing get_cred on it when queueing this
++      put_cred(new);
++      do_group_exit(SIGKILL);
++}
++#endif
++
++int commit_creds(struct cred *new)
++{
++#ifdef CONFIG_GRKERNSEC_SETXID
++      int ret;
++      int schedule_it = 0;
++      struct task_struct *t;
++      unsigned oldsecurebits = current_cred()->securebits;
++
++      /* we won't get called with tasklist_lock held for writing
++         and interrupts disabled as the cred struct in that case is
++         init_cred
++      */
++      if (grsec_enable_setxid && !current_is_single_threaded() &&
++          uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
++          !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
++              schedule_it = 1;
++      }
++      ret = __commit_creds(new);
++      if (schedule_it) {
++              rcu_read_lock();
++              read_lock(&tasklist_lock);
++              for (t = next_thread(current); t != current;
++                   t = next_thread(t)) {
++                      /* we'll check if the thread has uid 0 in
++                       * the delayed worker routine
++                       */
++                      if (task_securebits(t) == oldsecurebits &&
++                          t->delayed_cred == NULL) {
++                              t->delayed_cred = get_cred(new);
++                              set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
++                              set_tsk_need_resched(t);
++                      }
++              }
++              read_unlock(&tasklist_lock);
++              rcu_read_unlock();
++      }
++
++      return ret;
++#else
++      return __commit_creds(new);
++#endif
++}
++
+ EXPORT_SYMBOL(commit_creds);
+ /**
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 0874e2e..5b32cc9 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
+  */
+ static atomic_t                       masters_in_kgdb;
+ static atomic_t                       slaves_in_kgdb;
+-static atomic_t                       kgdb_break_tasklet_var;
++static atomic_unchecked_t     kgdb_break_tasklet_var;
+ atomic_t                      kgdb_setting_breakpoint;
+ struct task_struct            *kgdb_usethread;
+@@ -137,7 +137,7 @@ int                                kgdb_single_step;
+ static pid_t                  kgdb_sstep_pid;
+ /* to keep track of the CPU which is doing the single stepping*/
+-atomic_t                      kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
++atomic_unchecked_t            kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+ /*
+  * If you are debugging a problem where roundup (the collection of
+@@ -552,7 +552,7 @@ return_normal:
+        * kernel will only try for the value of sstep_tries before
+        * giving up and continuing on.
+        */
+-      if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
++      if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
+           (kgdb_info[cpu].task &&
+            kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+               atomic_set(&kgdb_active, -1);
+@@ -654,8 +654,8 @@ cpu_master_loop:
+       }
+ kgdb_restore:
+-      if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+-              int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
++      if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
++              int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
+               if (kgdb_info[sstep_cpu].task)
+                       kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+               else
+@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
+ static void kgdb_tasklet_bpt(unsigned long ing)
+ {
+       kgdb_breakpoint();
+-      atomic_set(&kgdb_break_tasklet_var, 0);
++      atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
+ }
+ static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+ void kgdb_schedule_breakpoint(void)
+ {
+-      if (atomic_read(&kgdb_break_tasklet_var) ||
++      if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
+               atomic_read(&kgdb_active) != -1 ||
+               atomic_read(&kgdb_setting_breakpoint))
+               return;
+-      atomic_inc(&kgdb_break_tasklet_var);
++      atomic_inc_unchecked(&kgdb_break_tasklet_var);
+       tasklet_schedule(&kgdb_tasklet_breakpoint);
+ }
+ EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 2a20c0d..3eb7d03 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
+                       continue;
+               kdb_printf("%-20s%8u  0x%p ", mod->name,
+-                         mod->core_layout.size, (void *)mod);
++                         mod->core_layout.size_rx + mod->core_layout.size_rw, (void *)mod);
+ #ifdef CONFIG_MODULE_UNLOAD
+               kdb_printf("%4d ", module_refcount(mod));
+ #endif
+@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
+                       kdb_printf(" (Loading)");
+               else
+                       kdb_printf(" (Live)");
+-              kdb_printf(" 0x%p", mod->core_layout.base);
++              kdb_printf(" 0x%p 0x%p", mod->core_layout.base_rx, mod->core_layout.base_rw);
+ #ifdef CONFIG_MODULE_UNLOAD
+               {
+diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
+index e9fdb52..cfb547d 100644
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -251,7 +251,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
+ {
+       int *value = table->data;
+       int new_value = *value, ret;
+-      struct ctl_table new_table = *table;
++      ctl_table_no_const new_table = *table;
+       new_table.data = &new_value;
+       ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index fc9bb22..bedc98b 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -389,8 +389,15 @@ static struct srcu_struct pmus_srcu;
+  *   0 - disallow raw tracepoint access for unpriv
+  *   1 - disallow cpu events for unpriv
+  *   2 - disallow kernel profiling for unpriv
++ *   3 - disallow all unpriv perf event use
+  */
+-int sysctl_perf_event_paranoid __read_mostly = 2;
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++int sysctl_perf_event_legitimately_concerned __read_only = 3;
++#elif defined(CONFIG_GRKERNSEC_HIDESYM)
++int sysctl_perf_event_legitimately_concerned __read_only = 2;
++#else
++int sysctl_perf_event_legitimately_concerned __read_only = 2;
++#endif
+ /* Minimum for 512 kiB + 1 user control page */
+ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
+@@ -545,7 +552,7 @@ void perf_sample_event_took(u64 sample_len_ns)
+       }
+ }
+-static atomic64_t perf_event_id;
++static atomic64_unchecked_t perf_event_id;
+ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                             enum event_type_t event_type);
+@@ -1044,8 +1051,9 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
+       timer->function = perf_mux_hrtimer_handler;
+ }
+-static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
++static int perf_mux_hrtimer_restart(void *_cpuctx)
+ {
++      struct perf_cpu_context *cpuctx = _cpuctx;
+       struct hrtimer *timer = &cpuctx->hrtimer;
+       struct pmu *pmu = cpuctx->ctx.pmu;
+       unsigned long flags;
+@@ -3111,7 +3119,7 @@ void __perf_event_task_sched_in(struct task_struct *prev,
+               perf_pmu_sched_task(prev, task, true);
+ }
+-static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
++static u64 perf_calculate_period(const struct perf_event *event, u64 nsec, u64 count)
+ {
+       u64 frequency = event->attr.sample_freq;
+       u64 sec = NSEC_PER_SEC;
+@@ -4201,9 +4209,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+       total += perf_event_count(event);
+       *enabled += event->total_time_enabled +
+-                      atomic64_read(&event->child_total_time_enabled);
++                      atomic64_read_unchecked(&event->child_total_time_enabled);
+       *running += event->total_time_running +
+-                      atomic64_read(&event->child_total_time_running);
++                      atomic64_read_unchecked(&event->child_total_time_running);
+       list_for_each_entry(child, &event->child_list, child_list) {
+               (void)perf_event_read(child, false);
+@@ -4235,12 +4243,12 @@ static int __perf_read_group_add(struct perf_event *leader,
+        */
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+               values[n++] += leader->total_time_enabled +
+-                      atomic64_read(&leader->child_total_time_enabled);
++                      atomic64_read_unchecked(&leader->child_total_time_enabled);
+       }
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+               values[n++] += leader->total_time_running +
+-                      atomic64_read(&leader->child_total_time_running);
++                      atomic64_read_unchecked(&leader->child_total_time_running);
+       }
+       /*
+@@ -4763,10 +4771,10 @@ void perf_event_update_userpage(struct perf_event *event)
+               userpg->offset -= local64_read(&event->hw.prev_count);
+       userpg->time_enabled = enabled +
+-                      atomic64_read(&event->child_total_time_enabled);
++                      atomic64_read_unchecked(&event->child_total_time_enabled);
+       userpg->time_running = running +
+-                      atomic64_read(&event->child_total_time_running);
++                      atomic64_read_unchecked(&event->child_total_time_running);
+       arch_perf_update_userpage(event, userpg, now);
+@@ -5468,7 +5476,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
+               /* Data. */
+               sp = perf_user_stack_pointer(regs);
+-              rem = __output_copy_user(handle, (void *) sp, dump_size);
++              rem = __output_copy_user(handle, (void __user *) sp, dump_size);
+               dyn_size = dump_size - rem;
+               perf_output_skip(handle, rem);
+@@ -5559,11 +5567,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+       values[n++] = perf_event_count(event);
+       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+               values[n++] = enabled +
+-                      atomic64_read(&event->child_total_time_enabled);
++                      atomic64_read_unchecked(&event->child_total_time_enabled);
+       }
+       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+               values[n++] = running +
+-                      atomic64_read(&event->child_total_time_running);
++                      atomic64_read_unchecked(&event->child_total_time_running);
+       }
+       if (read_format & PERF_FORMAT_ID)
+               values[n++] = primary_event_id(event);
+@@ -8562,8 +8570,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
+-              cpu_function_call(cpu,
+-                      (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
++              cpu_function_call(cpu, perf_mux_hrtimer_restart, cpuctx);
+       }
+       put_online_cpus();
+       mutex_unlock(&mux_interval_mutex);
+@@ -9004,7 +9011,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+       event->parent           = parent_event;
+       event->ns               = get_pid_ns(task_active_pid_ns(current));
+-      event->id               = atomic64_inc_return(&perf_event_id);
++      event->id               = atomic64_inc_return_unchecked(&perf_event_id);
+       event->state            = PERF_EVENT_STATE_INACTIVE;
+@@ -9395,6 +9402,11 @@ SYSCALL_DEFINE5(perf_event_open,
+       if (flags & ~PERF_FLAG_ALL)
+               return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++      if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
++              return -EACCES;
++#endif
++
+       err = perf_copy_attr(attr_uptr, &attr);
+       if (err)
+               return err;
+@@ -9912,10 +9924,10 @@ static void sync_child_event(struct perf_event *child_event,
+       /*
+        * Add back the child's count to the parent's count:
+        */
+-      atomic64_add(child_val, &parent_event->child_count);
+-      atomic64_add(child_event->total_time_enabled,
++      atomic64_add_unchecked(child_val, &parent_event->child_count);
++      atomic64_add_unchecked(child_event->total_time_enabled,
+                    &parent_event->child_total_time_enabled);
+-      atomic64_add(child_event->total_time_running,
++      atomic64_add_unchecked(child_event->total_time_running,
+                    &parent_event->child_total_time_running);
+ }
+diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
+index 3f8cb1e..83f0438 100644
+--- a/kernel/events/hw_breakpoint.c
++++ b/kernel/events/hw_breakpoint.c
+@@ -30,6 +30,7 @@
+  * This file contains the arch-independent routines.
+  */
++#include <linux/bug.h>
+ #include <linux/irqflags.h>
+ #include <linux/kallsyms.h>
+ #include <linux/notifier.h>
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 486fd78..96062d7 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -150,10 +150,10 @@ static inline unsigned long perf_aux_size(struct ring_buffer *rb)
+       return len;                                                     \
+ }
+-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                    \
++#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user)              \
+ static inline unsigned long                                           \
+ func_name(struct perf_output_handle *handle,                          \
+-        const void *buf, unsigned long len)                           \
++        const void user *buf, unsigned long len)                      \
+ __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
+ static inline unsigned long
+@@ -172,7 +172,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
+       return 0;
+ }
+-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
++DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
+ static inline unsigned long
+ memcpy_skip(void *dst, const void *src, unsigned long n)
+@@ -180,7 +180,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
+       return 0;
+ }
+-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
++DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
+ #ifndef arch_perf_out_copy_user
+ #define arch_perf_out_copy_user arch_perf_out_copy_user
+@@ -198,7 +198,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
+ }
+ #endif
+-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
++DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
+ /* Callchain handling */
+ extern struct perf_callchain_entry *
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 8c50276..457c599 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1695,7 +1695,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
+ {
+       struct page *page;
+       uprobe_opcode_t opcode;
+-      int result;
++      long result;
+       pagefault_disable();
+       result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 091a78b..7d6001b 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -170,6 +170,10 @@ void release_task(struct task_struct *p)
+       struct task_struct *leader;
+       int zap_leader;
+ repeat:
++#ifdef CONFIG_NET
++      gr_del_task_from_ip_table(p);
++#endif
++
+       /* don't need to get the RCU readlock here - the process is dead and
+        * can't be modifying its own credentials. But shut RCU-lockdep up */
+       rcu_read_lock();
+@@ -525,9 +529,8 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
+       return NULL;
+ }
++static struct task_struct *find_child_reaper(struct task_struct *father) __must_hold(&tasklist_lock);
+ static struct task_struct *find_child_reaper(struct task_struct *father)
+-      __releases(&tasklist_lock)
+-      __acquires(&tasklist_lock)
+ {
+       struct pid_namespace *pid_ns = task_active_pid_ns(father);
+       struct task_struct *reaper = pid_ns->child_reaper;
+@@ -624,6 +627,8 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
+  *    jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
+  */
+ static void forget_original_parent(struct task_struct *father,
++                                      struct list_head *dead) __must_hold(&tasklist_lock);
++static void forget_original_parent(struct task_struct *father,
+                                       struct list_head *dead)
+ {
+       struct task_struct *p, *t, *reaper;
+@@ -731,6 +736,15 @@ void do_exit(long code)
+       int group_dead;
+       TASKS_RCU(int tasks_rcu_i);
++      /*
++       * If do_exit is called because this processes oopsed, it's possible
++       * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
++       * continuing. Amongst other possible reasons, this is to prevent
++       * mm_release()->clear_child_tid() from writing to a user-controlled
++       * kernel address.
++       */
++      set_fs(USER_DS);
++
+       profile_task_exit(tsk);
+       kcov_task_exit(tsk);
+@@ -741,15 +755,6 @@ void do_exit(long code)
+       if (unlikely(!tsk->pid))
+               panic("Attempted to kill the idle task!");
+-      /*
+-       * If do_exit is called because this processes oopsed, it's possible
+-       * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
+-       * continuing. Amongst other possible reasons, this is to prevent
+-       * mm_release()->clear_child_tid() from writing to a user-controlled
+-       * kernel address.
+-       */
+-      set_fs(USER_DS);
+-
+       ptrace_event(PTRACE_EVENT_EXIT, code);
+       validate_creds_for_do_exit(tsk);
+@@ -812,6 +817,9 @@ void do_exit(long code)
+       tsk->exit_code = code;
+       taskstats_exit(tsk, group_dead);
++      gr_acl_handle_psacct(tsk, code);
++      gr_acl_handle_exit();
++
+       exit_mm(tsk);
+       if (group_dead)
+@@ -926,7 +934,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
+  * Take down every thread in the group.  This is called by fatal signals
+  * as well as by sys_exit_group (below).
+  */
+-void
++__noreturn void
+ do_group_exit(int exit_code)
+ {
+       struct signal_struct *sig = current->signal;
+@@ -1054,6 +1062,7 @@ static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
+  * the lock and this task is uninteresting.  If we return nonzero, we have
+  * released the lock and the system call should return.
+  */
++static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) __must_hold(&tasklist_lock);
+ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
+ {
+       int state, retval, status;
+@@ -1070,6 +1079,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
+               get_task_struct(p);
+               read_unlock(&tasklist_lock);
++              __acquire(&tasklist_lock); // XXX sparse can't model conditional release
+               sched_annotate_sleep();
+               if ((exit_code & 0x7f) == 0) {
+@@ -1092,6 +1102,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
+        * We own this thread, nobody else can reap it.
+        */
+       read_unlock(&tasklist_lock);
++      __acquire(&tasklist_lock); // XXX sparse can't model conditional release
+       sched_annotate_sleep();
+       /*
+@@ -1234,6 +1245,8 @@ static int *task_stopped_code(struct task_struct *p, bool ptrace)
+  * search should terminate.
+  */
+ static int wait_task_stopped(struct wait_opts *wo,
++                              int ptrace, struct task_struct *p) __must_hold(&tasklist_lock);
++static int wait_task_stopped(struct wait_opts *wo,
+                               int ptrace, struct task_struct *p)
+ {
+       struct siginfo __user *infop;
+@@ -1281,6 +1294,7 @@ unlock_sig:
+       pid = task_pid_vnr(p);
+       why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
+       read_unlock(&tasklist_lock);
++      __acquire(&tasklist_lock); // XXX sparse can't model conditional release
+       sched_annotate_sleep();
+       if (unlikely(wo->wo_flags & WNOWAIT))
+@@ -1318,6 +1332,7 @@ unlock_sig:
+  * the lock and this task is uninteresting.  If we return nonzero, we have
+  * released the lock and the system call should return.
+  */
++static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) __must_hold(&tasklist_lock);
+ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
+ {
+       int retval;
+@@ -1344,6 +1359,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
+       pid = task_pid_vnr(p);
+       get_task_struct(p);
+       read_unlock(&tasklist_lock);
++      __acquire(&tasklist_lock); // XXX sparse can't model conditional release
+       sched_annotate_sleep();
+       if (!wo->wo_info) {
+@@ -1373,6 +1389,8 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
+  * or another error from security_task_wait(), or still -ECHILD.
+  */
+ static int wait_consider_task(struct wait_opts *wo, int ptrace,
++                              struct task_struct *p) __must_hold(&tasklist_lock);
++static int wait_consider_task(struct wait_opts *wo, int ptrace,
+                               struct task_struct *p)
+ {
+       /*
+@@ -1498,6 +1516,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
+  * ->notask_error is 0 if there were any eligible children,
+  * or another error from security_task_wait(), or still -ECHILD.
+  */
++static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) __must_hold(&tasklist_lock);
+ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
+ {
+       struct task_struct *p;
+@@ -1512,6 +1531,7 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
+       return 0;
+ }
++static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) __must_hold(&tasklist_lock);
+ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
+ {
+       struct task_struct *p;
+@@ -1575,12 +1595,16 @@ repeat:
+       tsk = current;
+       do {
+               retval = do_wait_thread(wo, tsk);
+-              if (retval)
++              if (retval) {
++                      __release(&tasklist_lock); // XXX sparse can't model conditional release
+                       goto end;
++              }
+               retval = ptrace_do_wait(wo, tsk);
+-              if (retval)
++              if (retval) {
++                      __release(&tasklist_lock); // XXX sparse can't model conditional release
+                       goto end;
++              }
+               if (wo->wo_flags & __WNOTHREAD)
+                       break;
+diff --git a/kernel/extable.c b/kernel/extable.c
+index e820cce..72195de 100644
+--- a/kernel/extable.c
++++ b/kernel/extable.c
+@@ -23,6 +23,7 @@
+ #include <asm/sections.h>
+ #include <asm/uaccess.h>
++#include <asm/setup.h>
+ /*
+  * mutex protecting text section modification (dynamic code patching).
+@@ -41,10 +42,22 @@ u32 __initdata __visible main_extable_sort_needed = 1;
+ /* Sort the kernel's built-in exception table */
+ void __init sort_main_extable(void)
+ {
+-      if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
++      struct exception_table_entry *start = __start___ex_table;
++
++      if (main_extable_sort_needed && __stop___ex_table > start) {
+               pr_notice("Sorting __ex_table...\n");
+-              sort_extable(__start___ex_table, __stop___ex_table);
++              sort_extable(start, __stop___ex_table);
+       }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      while (start < __stop___ex_table) {
++              start->insn -= kaslr_offset();
++              start->fixup -= kaslr_offset();
++              start->handler -= kaslr_offset();
++              start++;
++      }
++#endif
++
+ }
+ /* Given an address, look for it in the exception tables. */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index beb3172..c13f974 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -188,13 +188,56 @@ static void free_thread_stack(unsigned long *stack)
+ void thread_stack_cache_init(void)
+ {
+-      thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
+-                                            THREAD_SIZE, 0, NULL);
++      thread_stack_cache = kmem_cache_create_usercopy("thread_stack", THREAD_SIZE,
++                                            THREAD_SIZE, 0, 0, THREAD_SIZE, NULL);
+       BUG_ON(thread_stack_cache == NULL);
+ }
+ # endif
+ #endif
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static inline unsigned long *gr_alloc_thread_stack_node(struct task_struct *tsk,
++                                                int node, void **lowmem_stack)
++{
++      struct page *pages[THREAD_SIZE / PAGE_SIZE];
++      void *ret = NULL;
++      unsigned int i;
++
++      *lowmem_stack = alloc_thread_stack_node(tsk, node);
++      if (*lowmem_stack == NULL)
++              goto out;
++
++      for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
++              pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
++      
++      /* use VM_IOREMAP to gain THREAD_SIZE alignment */
++      ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
++      if (ret == NULL) {
++              free_thread_stack(*lowmem_stack);
++              *lowmem_stack = NULL;
++      } else
++              populate_stack(ret, THREAD_SIZE);
++
++out:
++      return ret;
++}
++
++static inline void gr_free_thread_stack(struct task_struct *tsk, unsigned long *stack)
++{
++      unmap_process_stacks(tsk);
++}
++#else
++static inline unsigned long *gr_alloc_thread_stack_node(struct task_struct *tsk,
++                                                int node, void **lowmem_stack)
++{
++      return alloc_thread_stack_node(tsk, node);
++}
++static inline void gr_free_thread_stack(struct task_struct *tsk, unsigned long *stack)
++{
++      free_thread_stack(stack);
++}
++#endif
++
+ /* SLAB cache for signal_struct structures (tsk->signal) */
+ static struct kmem_cache *signal_cachep;
+@@ -213,10 +256,14 @@ struct kmem_cache *vm_area_cachep;
+ /* SLAB cache for mm_struct structures (tsk->mm) */
+ static struct kmem_cache *mm_cachep;
+-static void account_kernel_stack(unsigned long *stack, int account)
++static void account_kernel_stack(struct task_struct *tsk, unsigned long *stack, int account)
+ {
+       /* All stack pages are in the same zone and belong to the same memcg. */
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      struct page *first_page = virt_to_page(tsk->lowmem_stack);
++#else
+       struct page *first_page = virt_to_page(stack);
++#endif
+       mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
+                           THREAD_SIZE / 1024 * account);
+@@ -228,9 +275,9 @@ static void account_kernel_stack(unsigned long *stack, int account)
+ void free_task(struct task_struct *tsk)
+ {
+-      account_kernel_stack(tsk->stack, -1);
++      account_kernel_stack(tsk, tsk->stack, -1);
+       arch_release_thread_stack(tsk->stack);
+-      free_thread_stack(tsk->stack);
++      gr_free_thread_stack(tsk, tsk->stack);
+       rt_mutex_debug_task_free(tsk);
+       ftrace_graph_exit_task(tsk);
+       put_seccomp_filter(tsk);
+@@ -297,7 +344,7 @@ static void set_max_threads(unsigned int max_threads_suggested)
+ #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ /* Initialized by the architecture: */
+-int arch_task_struct_size __read_mostly;
++size_t arch_task_struct_size __read_mostly;
+ #endif
+ void __init fork_init(void)
+@@ -307,9 +354,12 @@ void __init fork_init(void)
+ #define ARCH_MIN_TASKALIGN    L1_CACHE_BYTES
+ #endif
+       /* create a slab on which task_structs can be allocated */
+-      task_struct_cachep = kmem_cache_create("task_struct",
++      task_struct_cachep = kmem_cache_create_usercopy("task_struct",
+                       arch_task_struct_size, ARCH_MIN_TASKALIGN,
+-                      SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
++                      SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
++                      offsetof(struct task_struct, blocked),
++                      sizeof(init_task.blocked) + sizeof(init_task.saved_sigmask),
++                      NULL);
+ #endif
+       /* do the arch specific task caches init */
+@@ -342,6 +392,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+ {
+       struct task_struct *tsk;
+       unsigned long *stack;
++      void *lowmem_stack;
+       int err;
+       if (node == NUMA_NO_NODE)
+@@ -350,7 +401,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+       if (!tsk)
+               return NULL;
+-      stack = alloc_thread_stack_node(tsk, node);
++      stack = gr_alloc_thread_stack_node(tsk, node, &lowmem_stack);
+       if (!stack)
+               goto free_tsk;
+@@ -359,6 +410,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+               goto free_stack;
+       tsk->stack = stack;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      tsk->lowmem_stack = lowmem_stack;
++#endif
++
+ #ifdef CONFIG_SECCOMP
+       /*
+        * We must handle setting up seccomp filters once we're under
+@@ -375,7 +430,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+       set_task_stack_end_magic(tsk);
+ #ifdef CONFIG_CC_STACKPROTECTOR
+-      tsk->stack_canary = get_random_int();
++      tsk->stack_canary = pax_get_random_long();
+ #endif
+       /*
+@@ -390,26 +445,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+       tsk->task_frag.page = NULL;
+       tsk->wake_q.next = NULL;
+-      account_kernel_stack(stack, 1);
++      account_kernel_stack(tsk, stack, 1);
+       kcov_task_init(tsk);
+       return tsk;
+ free_stack:
+-      free_thread_stack(stack);
++      gr_free_thread_stack(tsk, stack);
+ free_tsk:
+       free_task_struct(tsk);
+       return NULL;
+ }
+ #ifdef CONFIG_MMU
+-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
++{
++      struct vm_area_struct *tmp;
++      unsigned long charge;
++      struct file *file;
++      int retval;
++
++      charge = 0;
++      if (mpnt->vm_flags & VM_ACCOUNT) {
++              unsigned long len = vma_pages(mpnt);
++
++              if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
++                      goto fail_nomem;
++              charge = len;
++      }
++      tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++      if (!tmp)
++              goto fail_nomem;
++      *tmp = *mpnt;
++      tmp->vm_mm = mm;
++      INIT_LIST_HEAD(&tmp->anon_vma_chain);
++      retval = vma_dup_policy(mpnt, tmp);
++      if (retval)
++              goto fail_nomem_policy;
++      if (anon_vma_fork(tmp, mpnt))
++              goto fail_nomem_anon_vma_fork;
++      tmp->vm_flags &= ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP);
++      tmp->vm_next = tmp->vm_prev = NULL;
++      tmp->vm_mirror = NULL;
++      tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
++      file = tmp->vm_file;
++      if (file) {
++              struct inode *inode = file_inode(file);
++              struct address_space *mapping = file->f_mapping;
++
++              get_file(file);
++              if (tmp->vm_flags & VM_DENYWRITE)
++                      atomic_dec(&inode->i_writecount);
++              i_mmap_lock_write(mapping);
++              if (tmp->vm_flags & VM_SHARED)
++                      atomic_inc(&mapping->i_mmap_writable);
++              flush_dcache_mmap_lock(mapping);
++              /* insert tmp into the share list, just after mpnt */
++              vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
++              flush_dcache_mmap_unlock(mapping);
++              i_mmap_unlock_write(mapping);
++      }
++
++      /*
++       * Clear hugetlb-related page reserves for children. This only
++       * affects MAP_PRIVATE mappings. Faults generated by the child
++       * are not guaranteed to succeed, even if read-only
++       */
++      if (is_vm_hugetlb_page(tmp))
++              reset_vma_resv_huge_pages(tmp);
++
++      return tmp;
++
++fail_nomem_anon_vma_fork:
++      mpol_put(vma_policy(tmp));
++fail_nomem_policy:
++      kmem_cache_free(vm_area_cachep, tmp);
++fail_nomem:
++      vm_unacct_memory(charge);
++      return NULL;
++}
++
++static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+       struct rb_node **rb_link, *rb_parent;
+       int retval;
+-      unsigned long charge;
+       uprobe_start_dup_mmap();
+       if (down_write_killable(&oldmm->mmap_sem)) {
+@@ -443,52 +564,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+       prev = NULL;
+       for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+-              struct file *file;
+-
+               if (mpnt->vm_flags & VM_DONTCOPY) {
+                       vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
+                       continue;
+               }
+-              charge = 0;
+-              if (mpnt->vm_flags & VM_ACCOUNT) {
+-                      unsigned long len = vma_pages(mpnt);
+-
+-                      if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
+-                              goto fail_nomem;
+-                      charge = len;
+-              }
+-              tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+-              if (!tmp)
+-                      goto fail_nomem;
+-              *tmp = *mpnt;
+-              INIT_LIST_HEAD(&tmp->anon_vma_chain);
+-              retval = vma_dup_policy(mpnt, tmp);
+-              if (retval)
+-                      goto fail_nomem_policy;
+-              tmp->vm_mm = mm;
+-              if (anon_vma_fork(tmp, mpnt))
+-                      goto fail_nomem_anon_vma_fork;
+-              tmp->vm_flags &=
+-                      ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP);
+-              tmp->vm_next = tmp->vm_prev = NULL;
+-              tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+-              file = tmp->vm_file;
+-              if (file) {
+-                      struct inode *inode = file_inode(file);
+-                      struct address_space *mapping = file->f_mapping;
+-
+-                      get_file(file);
+-                      if (tmp->vm_flags & VM_DENYWRITE)
+-                              atomic_dec(&inode->i_writecount);
+-                      i_mmap_lock_write(mapping);
+-                      if (tmp->vm_flags & VM_SHARED)
+-                              atomic_inc(&mapping->i_mmap_writable);
+-                      flush_dcache_mmap_lock(mapping);
+-                      /* insert tmp into the share list, just after mpnt */
+-                      vma_interval_tree_insert_after(tmp, mpnt,
+-                                      &mapping->i_mmap);
+-                      flush_dcache_mmap_unlock(mapping);
+-                      i_mmap_unlock_write(mapping);
++              tmp = dup_vma(mm, oldmm, mpnt);
++              if (!tmp) {
++                      retval = -ENOMEM;
++                      goto out;
+               }
+               /*
+@@ -520,6 +603,38 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+               if (retval)
+                       goto out;
+       }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
++              struct vm_area_struct *mpnt_m;
++
++              for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next) {
++                      if (mpnt->vm_flags & VM_DONTCOPY)
++                              continue;
++
++                      BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
++
++                      if (!mpnt->vm_mirror) {
++                              mpnt_m = mpnt_m->vm_next;
++                              continue;
++                      }
++
++                      if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
++                              BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
++                              mpnt->vm_mirror = mpnt_m;
++                      } else {
++                              BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
++                              mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
++                              mpnt_m->vm_mirror->vm_mirror = mpnt_m;
++                              mpnt->vm_mirror->vm_mirror = mpnt;
++                      }
++
++                      mpnt_m = mpnt_m->vm_next;
++              }
++              BUG_ON(mpnt_m);
++      }
++#endif
++
+       /* a new mm has just been created */
+       arch_dup_mmap(oldmm, mm);
+       retval = 0;
+@@ -530,14 +645,6 @@ out:
+ fail_uprobe_end:
+       uprobe_end_dup_mmap();
+       return retval;
+-fail_nomem_anon_vma_fork:
+-      mpol_put(vma_policy(tmp));
+-fail_nomem_policy:
+-      kmem_cache_free(vm_area_cachep, tmp);
+-fail_nomem:
+-      retval = -ENOMEM;
+-      vm_unacct_memory(charge);
+-      goto out;
+ }
+ static inline int mm_alloc_pgd(struct mm_struct *mm)
+@@ -857,8 +964,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+               return ERR_PTR(err);
+       mm = get_task_mm(task);
+-      if (mm && mm != current->mm &&
+-                      !ptrace_may_access(task, mode)) {
++      if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
++                ((mode & PTRACE_MODE_ATTACH) && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
+               mmput(mm);
+               mm = ERR_PTR(-EACCES);
+       }
+@@ -1057,13 +1164,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+                       spin_unlock(&fs->lock);
+                       return -EAGAIN;
+               }
+-              fs->users++;
++              atomic_inc(&fs->users);
+               spin_unlock(&fs->lock);
+               return 0;
+       }
+       tsk->fs = copy_fs_struct(fs);
+       if (!tsk->fs)
+               return -ENOMEM;
++      /* Carry through gr_chroot_dentry and is_chrooted instead
++         of recomputing it here.  Already copied when the task struct
++         is duplicated.  This allows pivot_root to not be treated as
++         a chroot
++      */
++      //gr_set_chroot_entries(tsk, &tsk->fs->root);
++
+       return 0;
+ }
+@@ -1296,7 +1410,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
+  * parts of the process environment (as per the clone
+  * flags). The actual kick-off is left to the caller.
+  */
+-static struct task_struct *copy_process(unsigned long clone_flags,
++static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
+                                       unsigned long stack_start,
+                                       unsigned long stack_size,
+                                       int __user *child_tidptr,
+@@ -1368,6 +1482,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+       DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+ #endif
+       retval = -EAGAIN;
++
++      gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
++
+       if (atomic_read(&p->real_cred->user->processes) >=
+                       task_rlimit(p, RLIMIT_NPROC)) {
+               if (p->real_cred->user != INIT_USER &&
+@@ -1626,6 +1743,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+               goto bad_fork_cancel_cgroup;
+       }
++      /* synchronizes with gr_set_acls()
++         we need to call this past the point of no return for fork()
++      */
++      gr_copy_label(p);
++
++#ifdef CONFIG_GRKERNSEC_SETXID
++      if (p->delayed_cred)
++              get_cred(p->delayed_cred);
++#endif
++
+       if (likely(p->pid)) {
+               ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
+@@ -1717,6 +1844,8 @@ bad_fork_cleanup_count:
+ bad_fork_free:
+       free_task(p);
+ fork_out:
++      gr_log_forkfail(retval);
++
+       return ERR_PTR(retval);
+ }
+@@ -1780,6 +1909,7 @@ long _do_fork(unsigned long clone_flags,
+       p = copy_process(clone_flags, stack_start, stack_size,
+                        child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
++      add_latent_entropy();
+       /*
+        * Do this prior waking up the new thread - the thread pointer
+        * might get invalid after that point, if the thread exits quickly.
+@@ -1796,6 +1926,8 @@ long _do_fork(unsigned long clone_flags,
+               if (clone_flags & CLONE_PARENT_SETTID)
+                       put_user(nr, parent_tidptr);
++              gr_handle_brute_check();
++
+               if (clone_flags & CLONE_VFORK) {
+                       p->vfork_done = &vfork;
+                       init_completion(&vfork);
+@@ -1928,11 +2060,12 @@ void __init proc_caches_init(void)
+        * maximum number of CPU's we can ever have.  The cpumask_allocation
+        * is at the end of the structure, exactly for that reason.
+        */
+-      mm_cachep = kmem_cache_create("mm_struct",
++      mm_cachep = kmem_cache_create_usercopy("mm_struct",
+                       sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
++                      offsetof(struct mm_struct, saved_auxv), sizeof(init_mm.saved_auxv),
+                       NULL);
+-      vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
++      vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE);
+       mmap_init();
+       nsproxy_cache_init();
+ }
+@@ -1980,7 +2113,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+               return 0;
+       /* don't need lock here; in the worst case we'll do useless copy */
+-      if (fs->users == 1)
++      if (atomic_read(&fs->users) == 1)
+               return 0;
+       *new_fsp = copy_fs_struct(fs);
+@@ -2093,7 +2226,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+                       fs = current->fs;
+                       spin_lock(&fs->lock);
+                       current->fs = new_fs;
+-                      if (--fs->users)
++                      gr_set_chroot_entries(current, &current->fs->root);
++                      if (atomic_dec_return(&fs->users))
+                               new_fs = NULL;
+                       else
+                               new_fs = fs;
+@@ -2157,7 +2291,7 @@ int unshare_files(struct files_struct **displaced)
+ int sysctl_max_threads(struct ctl_table *table, int write,
+                      void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table t;
++      ctl_table_no_const t;
+       int ret;
+       int threads = max_threads;
+       int min = MIN_THREADS;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 46cb3a3..96207f8 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -210,7 +210,7 @@ struct futex_pi_state {
+       atomic_t refcount;
+       union futex_key key;
+-};
++} __randomize_layout;
+ /**
+  * struct futex_q - The hashed futex queue entry, one per waiting task
+@@ -244,7 +244,7 @@ struct futex_q {
+       struct rt_mutex_waiter *rt_waiter;
+       union futex_key *requeue_pi_key;
+       u32 bitset;
+-};
++} __randomize_layout;
+ static const struct futex_q futex_q_init = {
+       /* list gets initialized in queue_me()*/
+@@ -494,6 +494,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+       struct address_space *mapping;
+       int err, ro = 0;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
++              return -EFAULT;
++#endif
++
+       /*
+        * The futex address must be "naturally" aligned.
+        */
+@@ -3270,6 +3275,7 @@ static void __init futex_detect_cmpxchg(void)
+ {
+ #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+       u32 curval;
++      mm_segment_t oldfs;
+       /*
+        * This will fail and we want it. Some arch implementations do
+@@ -3281,8 +3287,11 @@ static void __init futex_detect_cmpxchg(void)
+        * implementation, the non-functional ones will return
+        * -ENOSYS.
+        */
++      oldfs = get_fs();
++      set_fs(USER_DS);
+       if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+               futex_cmpxchg_enabled = 1;
++      set_fs(oldfs);
+ #endif
+ }
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index 4ae3232..5adee02 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
+       return 0;
+ }
+-static void __user *futex_uaddr(struct robust_list __user *entry,
++static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
+                               compat_long_t futex_offset)
+ {
+       compat_uptr_t base = ptr_to_compat(entry);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 9530fcd..7f3a521 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -972,7 +972,7 @@ static int irq_thread(void *data)
+               action_ret = handler_fn(desc, action);
+               if (action_ret == IRQ_HANDLED)
+-                      atomic_inc(&desc->threads_handled);
++                      atomic_inc_unchecked(&desc->threads_handled);
+               if (action_ret == IRQ_WAKE_THREAD)
+                       irq_wake_secondary(desc, action);
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 19e9dfb..0766454 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -214,16 +214,18 @@ static void msi_domain_update_dom_ops(struct msi_domain_info *info)
+               return;
+       }
++      pax_open_kernel();
+       if (ops->get_hwirq == NULL)
+-              ops->get_hwirq = msi_domain_ops_default.get_hwirq;
++              const_cast(ops->get_hwirq) = msi_domain_ops_default.get_hwirq;
+       if (ops->msi_init == NULL)
+-              ops->msi_init = msi_domain_ops_default.msi_init;
++              const_cast(ops->msi_init) = msi_domain_ops_default.msi_init;
+       if (ops->msi_check == NULL)
+-              ops->msi_check = msi_domain_ops_default.msi_check;
++              const_cast(ops->msi_check) = msi_domain_ops_default.msi_check;
+       if (ops->msi_prepare == NULL)
+-              ops->msi_prepare = msi_domain_ops_default.msi_prepare;
++              const_cast(ops->msi_prepare) = msi_domain_ops_default.msi_prepare;
+       if (ops->set_desc == NULL)
+-              ops->set_desc = msi_domain_ops_default.set_desc;
++              const_cast(ops->set_desc) = msi_domain_ops_default.set_desc;
++      pax_close_kernel();
+ }
+ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
+@@ -231,8 +233,11 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
+       struct irq_chip *chip = info->chip;
+       BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
+-      if (!chip->irq_set_affinity)
+-              chip->irq_set_affinity = msi_domain_set_affinity;
++      if (!chip->irq_set_affinity) {
++              pax_open_kernel();
++              const_cast(chip->irq_set_affinity) = msi_domain_set_affinity;
++              pax_close_kernel();
++      }
+ }
+ /**
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index 5707f97..d526a3d 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -334,7 +334,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
+                        * count. We just care about the count being
+                        * different than the one we saw before.
+                        */
+-                      handled = atomic_read(&desc->threads_handled);
++                      handled = atomic_read_unchecked(&desc->threads_handled);
+                       handled |= SPURIOUS_DEFERRED;
+                       if (handled != desc->threads_handled_last) {
+                               action_ret = IRQ_HANDLED;
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 93ad6c1..139ea2a 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -15,6 +15,7 @@
+ #include <linux/static_key.h>
+ #include <linux/jump_label_ratelimit.h>
+ #include <linux/bug.h>
++#include <linux/mm.h>
+ #ifdef HAVE_JUMP_LABEL
+@@ -52,7 +53,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
+       size = (((unsigned long)stop - (unsigned long)start)
+                                       / sizeof(struct jump_entry));
++      pax_open_kernel();
+       sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
++      pax_close_kernel();
+ }
+ static void jump_label_update(struct static_key *key);
+@@ -475,10 +478,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
+       struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
+       struct jump_entry *iter;
++      pax_open_kernel();
+       for (iter = iter_start; iter < iter_stop; iter++) {
+               if (within_module_init(iter->code, mod))
+                       iter->code = 0;
+       }
++      pax_close_kernel();
+ }
+ static int
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index fafd1a3..316983e 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -11,6 +11,9 @@
+  *      Changed the compression method from stem compression to "table lookup"
+  *      compression (see scripts/kallsyms.c for a more complete description)
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -58,12 +61,33 @@ extern const unsigned long kallsyms_markers[] __weak;
+ static inline int is_kernel_inittext(unsigned long addr)
+ {
++      if (system_state != SYSTEM_BOOTING)
++              return 0;
++
+       if (addr >= (unsigned long)_sinittext
+           && addr <= (unsigned long)_einittext)
+               return 1;
+       return 0;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#ifdef CONFIG_MODULES
++static inline int is_module_text(unsigned long addr)
++{
++      if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
++              return 1;
++
++      addr = ktla_ktva(addr);
++      return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
++}
++#else
++static inline int is_module_text(unsigned long addr)
++{
++      return 0;
++}
++#endif
++#endif
++
+ static inline int is_kernel_text(unsigned long addr)
+ {
+       if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
+@@ -74,13 +98,28 @@ static inline int is_kernel_text(unsigned long addr)
+ static inline int is_kernel(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (is_kernel_text(addr) || is_kernel_inittext(addr))
++              return 1;
++
++      if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
++#else
+       if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
++#endif
++
+               return 1;
+       return in_gate_area_no_mm(addr);
+ }
+ static int is_ksym_addr(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (is_module_text(addr))
++              return 0;
++#endif
++
+       if (all_var)
+               return is_kernel(addr);
+@@ -458,10 +497,11 @@ int sprint_backtrace(char *buffer, unsigned long address)
+ }
+ /* Look up a kernel symbol and print it to the kernel messages. */
+-void __print_symbol(const char *fmt, unsigned long address)
++void __print_symbol(const char *fmt, unsigned long address, ...)
+ {
+       char buffer[KSYM_SYMBOL_LEN];
++      address = (unsigned long)__builtin_extract_return_addr((void *)address);
+       sprint_symbol(buffer, address);
+       printk(fmt, buffer);
+@@ -505,7 +545,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
+ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
+ {
+-      iter->name[0] = '\0';
+       iter->nameoff = get_symbol_offset(new_pos);
+       iter->pos = new_pos;
+ }
+@@ -553,6 +592,11 @@ static int s_show(struct seq_file *m, void *p)
+ {
+       struct kallsym_iter *iter = m->private;
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
++              return 0;
++#endif
++
+       /* Some debugging symbols have no name.  Ignore them. */
+       if (!iter->name[0])
+               return 0;
+@@ -566,6 +610,7 @@ static int s_show(struct seq_file *m, void *p)
+                */
+               type = iter->exported ? toupper(iter->type) :
+                                       tolower(iter->type);
++
+               seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
+                          type, iter->name, iter->module_name);
+       } else
+diff --git a/kernel/kcmp.c b/kernel/kcmp.c
+index 3a47fa9..bcb17e3 100644
+--- a/kernel/kcmp.c
++++ b/kernel/kcmp.c
+@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+       struct task_struct *task1, *task2;
+       int ret;
++#ifdef CONFIG_GRKERNSEC
++      return -ENOSYS;
++#endif
++
+       rcu_read_lock();
+       /*
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 980936a..81408fd 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -236,7 +236,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
+                      compat_ulong_t, flags)
+ {
+       struct compat_kexec_segment in;
+-      struct kexec_segment out, __user *ksegments;
++      struct kexec_segment out;
++      struct kexec_segment __user *ksegments;
+       unsigned long i, result;
+       /* Don't allow clients that don't understand the native
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index 5616755..0affaae 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -871,7 +871,7 @@ int kimage_load_segment(struct kimage *image,
+ struct kimage *kexec_image;
+ struct kimage *kexec_crash_image;
+-int kexec_load_disabled;
++int kexec_load_disabled __read_only;
+ /*
+  * No panic_cpu check version of crash_kexec().  This function is called
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index 0277d12..2d2899c 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
+       kfree(info->argv);
+ }
+-static int call_modprobe(char *module_name, int wait)
++static int call_modprobe(char *module_name, char *module_param, int wait)
+ {
+       struct subprocess_info *info;
+       static char *envp[] = {
+@@ -76,7 +76,7 @@ static int call_modprobe(char *module_name, int wait)
+               NULL
+       };
+-      char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
++      char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
+       if (!argv)
+               goto out;
+@@ -88,7 +88,8 @@ static int call_modprobe(char *module_name, int wait)
+       argv[1] = "-q";
+       argv[2] = "--";
+       argv[3] = module_name;  /* check free_modprobe_argv() */
+-      argv[4] = NULL;
++      argv[4] = module_param;
++      argv[5] = NULL;
+       info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
+                                        NULL, free_modprobe_argv, NULL);
+@@ -121,9 +122,8 @@ out:
+  * If module auto-loading support is disabled then this function
+  * becomes a no-operation.
+  */
+-int __request_module(bool wait, const char *fmt, ...)
++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
+ {
+-      va_list args;
+       char module_name[MODULE_NAME_LEN];
+       unsigned int max_modprobes;
+       int ret;
+@@ -142,9 +142,7 @@ int __request_module(bool wait, const char *fmt, ...)
+       if (!modprobe_path[0])
+               return 0;
+-      va_start(args, fmt);
+-      ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
+-      va_end(args);
++      ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
+       if (ret >= MODULE_NAME_LEN)
+               return -ENAMETOOLONG;
+@@ -152,6 +150,20 @@ int __request_module(bool wait, const char *fmt, ...)
+       if (ret)
+               return ret;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
++              /* hack to workaround consolekit/udisks stupidity */
++              read_lock(&tasklist_lock);
++              if (!strcmp(current->comm, "mount") &&
++                  current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
++                      read_unlock(&tasklist_lock);
++                      printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
++                      return -EPERM;
++              }
++              read_unlock(&tasklist_lock);
++      }
++#endif
++
+       /* If modprobe needs a service that is in a module, we get a recursive
+        * loop.  Limit the number of running kmod threads to max_threads/2 or
+        * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
+@@ -180,16 +192,61 @@ int __request_module(bool wait, const char *fmt, ...)
+       trace_module_request(module_name, wait, _RET_IP_);
+-      ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
++      ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+       atomic_dec(&kmod_concurrent);
+       return ret;
+ }
++
++int ___request_module(bool wait, char *module_param, const char *fmt, ...)
++{
++      va_list args;
++      int ret;
++
++      va_start(args, fmt);
++      ret = ____request_module(wait, module_param, fmt, args);
++      va_end(args);
++
++      return ret;
++}
++
++int __request_module(bool wait, const char *fmt, ...)
++{
++      va_list args;
++      int ret;
++
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
++              char module_param[MODULE_NAME_LEN];
++
++              memset(module_param, 0, sizeof(module_param));
++
++              snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
++
++              va_start(args, fmt);
++              ret = ____request_module(wait, module_param, fmt, args);
++              va_end(args);
++
++              return ret;
++      }
++#endif
++
++      va_start(args, fmt);
++      ret = ____request_module(wait, NULL, fmt, args);
++      va_end(args);
++
++      return ret;
++}
++
+ EXPORT_SYMBOL(__request_module);
+ #endif /* CONFIG_MODULES */
+ static void call_usermodehelper_freeinfo(struct subprocess_info *info)
+ {
++#ifdef CONFIG_GRKERNSEC
++      kfree(info->path);
++      info->path = info->origpath;
++#endif
+       if (info->cleanup)
+               (*info->cleanup)(info);
+       kfree(info);
+@@ -228,6 +285,22 @@ static int call_usermodehelper_exec_async(void *data)
+        */
+       set_user_nice(current, 0);
++#ifdef CONFIG_GRKERNSEC
++      /* this is race-free as far as userland is concerned as we copied
++         out the path to be used prior to this point and are now operating
++         on that copy
++      */
++      if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
++           strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
++           strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
++           strncmp(sub_info->path, "/usr/sbin/", 10) && strcmp(sub_info->path, "/bin/false") && 
++           strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
++              printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
++              retval = -EPERM;
++              goto out;
++      }
++#endif
++
+       retval = -ENOMEM;
+       new = prepare_kernel_cred(current);
+       if (!new)
+@@ -250,8 +323,8 @@ static int call_usermodehelper_exec_async(void *data)
+       commit_creds(new);
+       retval = do_execve(getname_kernel(sub_info->path),
+-                         (const char __user *const __user *)sub_info->argv,
+-                         (const char __user *const __user *)sub_info->envp);
++                         (const char __user *const __force_user *)sub_info->argv,
++                         (const char __user *const __force_user *)sub_info->envp);
+ out:
+       sub_info->retval = retval;
+       /*
+@@ -287,7 +360,7 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
+                *
+                * Thus the __user pointer cast is valid here.
+                */
+-              sys_wait4(pid, (int __user *)&ret, 0, NULL);
++              sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
+               /*
+                * If ret is 0, either call_usermodehelper_exec_async failed and
+@@ -528,7 +601,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
+               goto out;
+       INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
++#ifdef CONFIG_GRKERNSEC
++      sub_info->origpath = path;
++      sub_info->path = kstrdup(path, gfp_mask);
++#else
+       sub_info->path = path;
++#endif
+       sub_info->argv = argv;
+       sub_info->envp = envp;
+@@ -630,7 +708,7 @@ EXPORT_SYMBOL(call_usermodehelper);
+ static int proc_cap_handler(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table t;
++      ctl_table_no_const t;
+       unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
+       kernel_cap_t new_cap;
+       int err, i;
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index d10ab6b..1725fbd 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -31,6 +31,9 @@
+  *            <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+  *            <prasanna@in.ibm.com> added function-return probes.
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kprobes.h>
+ #include <linux/hash.h>
+ #include <linux/init.h>
+@@ -122,12 +125,12 @@ enum kprobe_slot_state {
+ static void *alloc_insn_page(void)
+ {
+-      return module_alloc(PAGE_SIZE);
++      return module_alloc_exec(PAGE_SIZE);
+ }
+ static void free_insn_page(void *page)
+ {
+-      module_memfree(page);
++      module_memfree_exec(page);
+ }
+ struct kprobe_insn_cache kprobe_insn_slots = {
+@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
+               kprobe_type = "k";
+       if (sym)
+-              seq_printf(pi, "%p  %s  %s+0x%x  %s ",
++              seq_printf(pi, "%pK  %s  %s+0x%x  %s ",
+                       p->addr, kprobe_type, sym, offset,
+                       (modname ? modname : " "));
+       else
+-              seq_printf(pi, "%p  %s  %p ",
++              seq_printf(pi, "%pK  %s  %pK ",
+                       p->addr, kprobe_type, p->addr);
+       if (!pp)
+@@ -2291,7 +2294,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
+       struct kprobe_blacklist_entry *ent =
+               list_entry(v, struct kprobe_blacklist_entry, list);
+-      seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
++      seq_printf(m, "0x%pK-0x%pK\t%ps\n", (void *)ent->start_addr,
+                  (void *)ent->end_addr, (void *)ent->start_addr);
+       return 0;
+ }
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index ee1bc1b..a351806 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
+ {
+       if (count+1 > UEVENT_HELPER_PATH_LEN)
+               return -ENOENT;
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
+       memcpy(uevent_helper, buf, count);
+       uevent_helper[count] = '\0';
+       if (count && uevent_helper[count-1] == '\n')
+@@ -195,7 +197,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
+       return count;
+ }
+-static struct bin_attribute notes_attr = {
++static bin_attribute_no_const notes_attr __read_only = {
+       .attr = {
+               .name = "notes",
+               .mode = S_IRUGO,
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 589d763..3962223 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -603,6 +603,10 @@ static int static_obj(void *obj)
+                     end   = (unsigned long) &_end,
+                     addr  = (unsigned long) obj;
++#ifdef CONFIG_PAX_KERNEXEC
++      start = ktla_ktva(start);
++#endif
++
+       /*
+        * static variable?
+        */
+@@ -733,6 +737,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+       if (!static_obj(lock->key)) {
+               debug_locks_off();
+               printk("INFO: trying to register non-static key.\n");
++              printk("lock:%pS key:%pS.\n", lock, lock->key);
+               printk("the code is fine but needs lockdep annotation.\n");
+               printk("turning off the locking correctness validator.\n");
+               dump_stack();
+@@ -3231,7 +3236,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+               if (!class)
+                       return 0;
+       }
+-      atomic_inc((atomic_t *)&class->ops);
++      atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
+       if (very_verbose(class)) {
+               printk("\nacquire class [%p] %s", class->key, class->name);
+               if (class->name_version > 1)
+diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
+index a0f61ef..b6aef3c 100644
+--- a/kernel/locking/lockdep_proc.c
++++ b/kernel/locking/lockdep_proc.c
+@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
+               return 0;
+       }
+-      seq_printf(m, "%p", class->key);
++      seq_printf(m, "%pK", class->key);
+ #ifdef CONFIG_DEBUG_LOCKDEP
+       seq_printf(m, " OPS:%8ld", class->ops);
+ #endif
+@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
+       list_for_each_entry(entry, &class->locks_after, entry) {
+               if (entry->distance == 1) {
+-                      seq_printf(m, " -> [%p] ", entry->class->key);
++                      seq_printf(m, " -> [%pK] ", entry->class->key);
+                       print_name(m, entry->class);
+                       seq_puts(m, "\n");
+               }
+@@ -154,7 +154,7 @@ static int lc_show(struct seq_file *m, void *v)
+               if (!class->key)
+                       continue;
+-              seq_printf(m, "[%p] ", class->key);
++              seq_printf(m, "[%pK] ", class->key);
+               print_name(m, class);
+               seq_puts(m, "\n");
+       }
+@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+               if (!i)
+                       seq_line(m, '-', 40-namelen, namelen);
+-              snprintf(ip, sizeof(ip), "[<%p>]",
++              snprintf(ip, sizeof(ip), "[<%pK>]",
+                               (void *)class->contention_point[i]);
+               seq_printf(m, "%40s %14lu %29s %pS\n",
+                          name, stats->contention_point[i],
+@@ -525,7 +525,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+               if (!i)
+                       seq_line(m, '-', 40-namelen, namelen);
+-              snprintf(ip, sizeof(ip), "[<%p>]",
++              snprintf(ip, sizeof(ip), "[<%pK>]",
+                               (void *)class->contending_point[i]);
+               seq_printf(m, "%40s %14lu %29s %pS\n",
+                          name, stats->contending_point[i],
+diff --git a/kernel/module.c b/kernel/module.c
+index 529efae..05499fa 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -61,6 +61,7 @@
+ #include <linux/pfn.h>
+ #include <linux/bsearch.h>
+ #include <linux/dynamic_debug.h>
++#include <linux/grsecurity.h>
+ #include <uapi/linux/module.h>
+ #include "module-internal.h"
+@@ -108,16 +109,32 @@ static LIST_HEAD(modules);
+ static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
+ {
+-      struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
++      struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
++      struct module *mod = mtn->mod;
+-      return (unsigned long)layout->base;
++      if (unlikely(mtn == &mod->init_layout.mtn_rw))
++              return (unsigned long)mod->init_layout.base_rw;
++      if (unlikely(mtn == &mod->init_layout.mtn_rx))
++              return (unsigned long)mod->init_layout.base_rx;
++
++      if (unlikely(mtn == &mod->core_layout.mtn_rw))
++              return (unsigned long)mod->core_layout.base_rw;
++      return (unsigned long)mod->core_layout.base_rx;
+ }
+ static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
+ {
+-      struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
++      struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
++      struct module *mod = mtn->mod;
+-      return (unsigned long)layout->size;
++      if (unlikely(mtn == &mod->init_layout.mtn_rw))
++              return (unsigned long)mod->init_layout.size_rw;
++      if (unlikely(mtn == &mod->init_layout.mtn_rx))
++              return (unsigned long)mod->init_layout.size_rx;
++
++      if (unlikely(mtn == &mod->core_layout.mtn_rw))
++              return (unsigned long)mod->core_layout.size_rw;
++      return (unsigned long)mod->core_layout.size_rx;
+ }
+ static __always_inline bool
+@@ -150,14 +167,19 @@ static const struct latch_tree_ops mod_tree_ops = {
+ static struct mod_tree_root {
+       struct latch_tree_root root;
+-      unsigned long addr_min;
+-      unsigned long addr_max;
++      unsigned long addr_min_rw;
++      unsigned long addr_min_rx;
++      unsigned long addr_max_rw;
++      unsigned long addr_max_rx;
+ } mod_tree __cacheline_aligned = {
+-      .addr_min = -1UL,
++      .addr_min_rw = -1UL,
++      .addr_min_rx = -1UL,
+ };
+-#define module_addr_min mod_tree.addr_min
+-#define module_addr_max mod_tree.addr_max
++#define module_addr_min_rw mod_tree.addr_min_rw
++#define module_addr_min_rx mod_tree.addr_min_rx
++#define module_addr_max_rw mod_tree.addr_max_rw
++#define module_addr_max_rx mod_tree.addr_max_rx
+ static noinline void __mod_tree_insert(struct mod_tree_node *node)
+ {
+@@ -175,23 +197,31 @@ static void __mod_tree_remove(struct mod_tree_node *node)
+  */
+ static void mod_tree_insert(struct module *mod)
+ {
+-      mod->core_layout.mtn.mod = mod;
+-      mod->init_layout.mtn.mod = mod;
++      mod->core_layout.mtn_rx.mod = mod;
++      mod->core_layout.mtn_rw.mod = mod;
++      mod->init_layout.mtn_rx.mod = mod;
++      mod->init_layout.mtn_rw.mod = mod;
+-      __mod_tree_insert(&mod->core_layout.mtn);
+-      if (mod->init_layout.size)
+-              __mod_tree_insert(&mod->init_layout.mtn);
++      __mod_tree_insert(&mod->core_layout.mtn_rx);
++      __mod_tree_insert(&mod->core_layout.mtn_rw);
++      if (mod->init_layout.size_rx)
++              __mod_tree_insert(&mod->init_layout.mtn_rx);
++      if (mod->init_layout.size_rw)
++              __mod_tree_insert(&mod->init_layout.mtn_rw);
+ }
+ static void mod_tree_remove_init(struct module *mod)
+ {
+-      if (mod->init_layout.size)
+-              __mod_tree_remove(&mod->init_layout.mtn);
++      if (mod->init_layout.size_rx)
++              __mod_tree_remove(&mod->init_layout.mtn_rx);
++      if (mod->init_layout.size_rw)
++              __mod_tree_remove(&mod->init_layout.mtn_rw);
+ }
+ static void mod_tree_remove(struct module *mod)
+ {
+-      __mod_tree_remove(&mod->core_layout.mtn);
++      __mod_tree_remove(&mod->core_layout.mtn_rx);
++      __mod_tree_remove(&mod->core_layout.mtn_rw);
+       mod_tree_remove_init(mod);
+ }
+@@ -208,7 +238,8 @@ static struct module *mod_find(unsigned long addr)
+ #else /* MODULES_TREE_LOOKUP */
+-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
+ static void mod_tree_insert(struct module *mod) { }
+ static void mod_tree_remove_init(struct module *mod) { }
+@@ -232,22 +263,36 @@ static struct module *mod_find(unsigned long addr)
+  * Bounds of module text, for speeding up __module_address.
+  * Protected by module_mutex.
+  */
+-static void __mod_update_bounds(void *base, unsigned int size)
++static void __mod_update_bounds_rx(void *base, unsigned int size)
+ {
+       unsigned long min = (unsigned long)base;
+       unsigned long max = min + size;
+-      if (min < module_addr_min)
+-              module_addr_min = min;
+-      if (max > module_addr_max)
+-              module_addr_max = max;
++      if (min < module_addr_min_rx)
++              module_addr_min_rx = min;
++      if (max > module_addr_max_rx)
++              module_addr_max_rx = max;
++}
++
++static void __mod_update_bounds_rw(void *base, unsigned int size)
++{
++      unsigned long min = (unsigned long)base;
++      unsigned long max = min + size;
++
++      if (min < module_addr_min_rw)
++              module_addr_min_rw = min;
++      if (max > module_addr_max_rw)
++              module_addr_max_rw = max;
+ }
+ static void mod_update_bounds(struct module *mod)
+ {
+-      __mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
+-      if (mod->init_layout.size)
+-              __mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
++      __mod_update_bounds_rx(mod->core_layout.base_rx, mod->core_layout.size_rx);
++      __mod_update_bounds_rw(mod->core_layout.base_rw, mod->core_layout.size_rw);
++      if (mod->init_layout.size_rx)
++              __mod_update_bounds_rx(mod->init_layout.base_rx, mod->init_layout.size_rx);
++      if (mod->init_layout.size_rw)
++              __mod_update_bounds_rw(mod->init_layout.base_rw, mod->init_layout.size_rw);
+ }
+ #ifdef CONFIG_KGDB_KDB
+@@ -276,7 +321,7 @@ module_param(sig_enforce, bool_enable_only, 0644);
+ #endif /* !CONFIG_MODULE_SIG_FORCE */
+ /* Block module loading/unloading? */
+-int modules_disabled = 0;
++int modules_disabled __read_only = 0;
+ core_param(nomodule, modules_disabled, bint, 0);
+ /* Waiting for a module to finish initializing? */
+@@ -454,7 +499,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+               return true;
+       list_for_each_entry_rcu(mod, &modules, list) {
+-              struct symsearch arr[] = {
++              struct symsearch modarr[] = {
+                       { mod->syms, mod->syms + mod->num_syms, mod->crcs,
+                         NOT_GPL_ONLY, false },
+                       { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
+@@ -479,7 +524,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
+-              if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
++              if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
+                       return true;
+       }
+       return false;
+@@ -625,7 +670,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
+       if (!pcpusec->sh_size)
+               return 0;
+-      if (align > PAGE_SIZE) {
++      if (align-1 >= PAGE_SIZE) {
+               pr_warn("%s: per-cpu alignment %li > %li\n",
+                       mod->name, align, PAGE_SIZE);
+               align = PAGE_SIZE;
+@@ -1198,7 +1243,7 @@ struct module_attribute module_uevent =
+ static ssize_t show_coresize(struct module_attribute *mattr,
+                            struct module_kobject *mk, char *buffer)
+ {
+-      return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
++      return sprintf(buffer, "%u\n", mk->mod->core_layout.size_rx + mk->mod->core_layout.size_rw);
+ }
+ static struct module_attribute modinfo_coresize =
+@@ -1207,7 +1252,7 @@ static struct module_attribute modinfo_coresize =
+ static ssize_t show_initsize(struct module_attribute *mattr,
+                            struct module_kobject *mk, char *buffer)
+ {
+-      return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
++      return sprintf(buffer, "%u\n", mk->mod->init_layout.size_rx + mk->mod->init_layout.size_rw);
+ }
+ static struct module_attribute modinfo_initsize =
+@@ -1299,12 +1344,29 @@ static int check_version(Elf_Shdr *sechdrs,
+               goto bad_version;
+       }
++#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
++      /*
++       * avoid potentially printing jibberish on attempted load
++       * of a module randomized with a different seed
++       */
++      pr_warn("no symbol version for %s\n", symname);
++#else
+       pr_warn("%s: no symbol version for %s\n", mod->name, symname);
++#endif
+       return 0;
+ bad_version:
++#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
++      /*
++       * avoid potentially printing jibberish on attempted load
++       * of a module randomized with a different seed
++       */
++      pr_warn("attempted module disagrees about version of symbol %s\n",
++             symname);
++#else
+       pr_warn("%s: disagrees about version of symbol %s\n",
+              mod->name, symname);
++#endif
+       return 0;
+ }
+@@ -1432,7 +1494,7 @@ resolve_symbol_wait(struct module *mod,
+  */
+ #ifdef CONFIG_SYSFS
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ static inline bool sect_empty(const Elf_Shdr *sect)
+ {
+       return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
+@@ -1570,7 +1632,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
+ {
+       unsigned int notes, loaded, i;
+       struct module_notes_attrs *notes_attrs;
+-      struct bin_attribute *nattr;
++      bin_attribute_no_const *nattr;
+       /* failed to create section attributes, so can't create notes */
+       if (!mod->sect_attrs)
+@@ -1682,7 +1744,7 @@ static void del_usage_links(struct module *mod)
+ static int module_add_modinfo_attrs(struct module *mod)
+ {
+       struct module_attribute *attr;
+-      struct module_attribute *temp_attr;
++      module_attribute_no_const *temp_attr;
+       int error = 0;
+       int i;
+@@ -1869,40 +1931,40 @@ static void mod_sysfs_teardown(struct module *mod)
+ static void frob_text(const struct module_layout *layout,
+                     int (*set_memory)(unsigned long start, int num_pages))
+ {
+-      BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
+-      BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
+-      set_memory((unsigned long)layout->base,
+-                 layout->text_size >> PAGE_SHIFT);
++      BUG_ON((unsigned long)layout->base_rx & (PAGE_SIZE-1));
++      BUG_ON((unsigned long)layout->size_rx & (PAGE_SIZE-1));
++      set_memory((unsigned long)layout->base_rx,
++                 layout->size_rx >> PAGE_SHIFT);
+ }
+ static void frob_rodata(const struct module_layout *layout,
+                       int (*set_memory)(unsigned long start, int num_pages))
+ {
+-      BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
+-      BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
+-      BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
+-      set_memory((unsigned long)layout->base + layout->text_size,
+-                 (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
++      BUG_ON((unsigned long)layout->base_rx & (PAGE_SIZE-1));
++      BUG_ON((unsigned long)layout->size_rx & (PAGE_SIZE-1));
++//    BUG_ON((unsigned long)layout->size_ro & (PAGE_SIZE-1));
++//    set_memory((unsigned long)layout->base_rx + layout->size_rx,
++//               (layout->size_ro - layout->size_rx) >> PAGE_SHIFT);
+ }
+ static void frob_ro_after_init(const struct module_layout *layout,
+                               int (*set_memory)(unsigned long start, int num_pages))
+ {
+-      BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
++#if 0
++      BUG_ON((unsigned long)layout->base_rx & (PAGE_SIZE-1));
+       BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
+       BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
+       set_memory((unsigned long)layout->base + layout->ro_size,
+                  (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
++#endif
+ }
+ static void frob_writable_data(const struct module_layout *layout,
+                              int (*set_memory)(unsigned long start, int num_pages))
+ {
+-      BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
+-      BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
+-      BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
+-      set_memory((unsigned long)layout->base + layout->ro_after_init_size,
+-                 (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
++      BUG_ON((unsigned long)layout->base_rw & (PAGE_SIZE-1));
++      BUG_ON((unsigned long)layout->size_rw & (PAGE_SIZE-1));
++      set_memory((unsigned long)layout->base_rw, layout->size_rw  >> PAGE_SHIFT);
+ }
+ /* livepatching wants to disable read-only so it can frob module. */
+@@ -1987,7 +2049,15 @@ static void disable_ro_nx(const struct module_layout *layout)
+ }
+ #else
+-static void disable_ro_nx(const struct module_layout *layout) { }
++static void disable_ro_nx(const struct module_layout *layout)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++      set_memory_nx((unsigned long)layout->base_rx, PFN_UP(layout->size_rx));
++      set_memory_rw((unsigned long)layout->base_rx, PFN_UP(layout->size_rx));
++#endif
++
++}
+ static void module_enable_nx(const struct module *mod) { }
+ static void module_disable_nx(const struct module *mod) { }
+ #endif
+@@ -2124,16 +2194,19 @@ static void free_module(struct module *mod)
+       /* This may be empty, but that's OK */
+       disable_ro_nx(&mod->init_layout);
+       module_arch_freeing_init(mod);
+-      module_memfree(mod->init_layout.base);
++      module_memfree(mod->init_layout.base_rw);
++      module_memfree_exec(mod->init_layout.base_rx);
+       kfree(mod->args);
+       percpu_modfree(mod);
+       /* Free lock-classes; relies on the preceding sync_rcu(). */
+-      lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
++      lockdep_free_key_range(mod->core_layout.base_rw, mod->core_layout.size_rw);
++      lockdep_free_key_range(mod->core_layout.base_rx, mod->core_layout.size_rx);
+       /* Finally, free the core (containing the module structure) */
+       disable_ro_nx(&mod->core_layout);
+-      module_memfree(mod->core_layout.base);
++      module_memfree_exec(mod->core_layout.base_rx);
++      module_memfree(mod->core_layout.base_rw);
+ #ifdef CONFIG_MPU
+       update_protections(current->mm);
+@@ -2202,9 +2275,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+       int ret = 0;
+       const struct kernel_symbol *ksym;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      int is_fs_load = 0;
++      int register_filesystem_found = 0;
++      char *p;
++
++      p = strstr(mod->args, "grsec_modharden_fs");
++      if (p) {
++              char *endptr = p + sizeof("grsec_modharden_fs") - 1;
++              /* copy \0 as well */
++              memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
++              is_fs_load = 1;
++      }
++#endif
++
+       for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
+               const char *name = info->strtab + sym[i].st_name;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++              /* it's a real shame this will never get ripped and copied
++                 upstream! ;(
++              */
++              if (is_fs_load && !strcmp(name, "register_filesystem"))
++                      register_filesystem_found = 1;
++#endif
++
+               switch (sym[i].st_shndx) {
+               case SHN_COMMON:
+                       /* Ignore common symbols */
+@@ -2233,7 +2328,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+                       ksym = resolve_symbol_wait(mod, info, name);
+                       /* Ok if resolved.  */
+                       if (ksym && !IS_ERR(ksym)) {
++                              pax_open_kernel();
+                               sym[i].st_value = ksym->value;
++                              pax_close_kernel();
+                               break;
+                       }
+@@ -2252,11 +2349,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+                               secbase = (unsigned long)mod_percpu(mod);
+                       else
+                               secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
++                      pax_open_kernel();
+                       sym[i].st_value += secbase;
++                      pax_close_kernel();
+                       break;
+               }
+       }
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      if (is_fs_load && !register_filesystem_found) {
++              printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
++              ret = -EPERM;
++      }
++#endif
++
+       return ret;
+ }
+@@ -2345,26 +2451,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
+                           || s->sh_entsize != ~0UL
+                           || strstarts(sname, ".init"))
+                               continue;
+-                      s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
++                      if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++                              s->sh_entsize = get_offset(mod, &mod->core_layout.size_rw, s, i);
++                      else
++                              s->sh_entsize = get_offset(mod, &mod->core_layout.size_rx, s, i);
+                       pr_debug("\t%s\n", sname);
+               }
+-              switch (m) {
+-              case 0: /* executable */
+-                      mod->core_layout.size = debug_align(mod->core_layout.size);
+-                      mod->core_layout.text_size = mod->core_layout.size;
+-                      break;
+-              case 1: /* RO: text and ro-data */
+-                      mod->core_layout.size = debug_align(mod->core_layout.size);
+-                      mod->core_layout.ro_size = mod->core_layout.size;
+-                      break;
+-              case 2: /* RO after init */
+-                      mod->core_layout.size = debug_align(mod->core_layout.size);
+-                      mod->core_layout.ro_after_init_size = mod->core_layout.size;
+-                      break;
+-              case 4: /* whole core */
+-                      mod->core_layout.size = debug_align(mod->core_layout.size);
+-                      break;
+-              }
+       }
+       pr_debug("Init section allocation order:\n");
+@@ -2378,30 +2470,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
+                           || s->sh_entsize != ~0UL
+                           || !strstarts(sname, ".init"))
+                               continue;
+-                      s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
+-                                       | INIT_OFFSET_MASK);
++                      if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++                              s->sh_entsize = get_offset(mod, &mod->init_layout.size_rw, s, i);
++                      else
++                              s->sh_entsize = get_offset(mod, &mod->init_layout.size_rx, s, i);
++                      s->sh_entsize |= INIT_OFFSET_MASK;
+                       pr_debug("\t%s\n", sname);
+               }
+-              switch (m) {
+-              case 0: /* executable */
+-                      mod->init_layout.size = debug_align(mod->init_layout.size);
+-                      mod->init_layout.text_size = mod->init_layout.size;
+-                      break;
+-              case 1: /* RO: text and ro-data */
+-                      mod->init_layout.size = debug_align(mod->init_layout.size);
+-                      mod->init_layout.ro_size = mod->init_layout.size;
+-                      break;
+-              case 2:
+-                      /*
+-                       * RO after init doesn't apply to init_layout (only
+-                       * core_layout), so it just takes the value of ro_size.
+-                       */
+-                      mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
+-                      break;
+-              case 4: /* whole init */
+-                      mod->init_layout.size = debug_align(mod->init_layout.size);
+-                      break;
+-              }
+       }
+ }
+@@ -2579,7 +2654,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+       /* Put symbol section at end of init part of module. */
+       symsect->sh_flags |= SHF_ALLOC;
+-      symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
++      symsect->sh_entsize = get_offset(mod, &mod->init_layout.size_rx, symsect,
+                                        info->index.sym) | INIT_OFFSET_MASK;
+       pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
+@@ -2597,23 +2672,23 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+       }
+       /* Append room for core symbols at end of core part. */
+-      info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
+-      info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
+-      mod->core_layout.size += strtab_size;
+-      mod->core_layout.size = debug_align(mod->core_layout.size);
++      info->symoffs = ALIGN(mod->core_layout.size_rx, symsect->sh_addralign ?: 1);
++      info->stroffs = mod->core_layout.size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
++      mod->core_layout.size_rx += strtab_size;
++      mod->core_layout.size_rx = debug_align(mod->core_layout.size_rx);
+       /* Put string table section at end of init part of module. */
+       strsect->sh_flags |= SHF_ALLOC;
+-      strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
++      strsect->sh_entsize = get_offset(mod, &mod->init_layout.size_rx, strsect,
+                                        info->index.str) | INIT_OFFSET_MASK;
+       pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+       /* We'll tack temporary mod_kallsyms on the end. */
+-      mod->init_layout.size = ALIGN(mod->init_layout.size,
++      mod->init_layout.size_rx = ALIGN(mod->init_layout.size_rx,
+                                     __alignof__(struct mod_kallsyms));
+-      info->mod_kallsyms_init_off = mod->init_layout.size;
+-      mod->init_layout.size += sizeof(struct mod_kallsyms);
+-      mod->init_layout.size = debug_align(mod->init_layout.size);
++      info->mod_kallsyms_init_off = mod->init_layout.size_rx;
++      mod->init_layout.size_rx += sizeof(struct mod_kallsyms);
++      mod->init_layout.size_rx = debug_align(mod->init_layout.size_rx);
+ }
+ /*
+@@ -2630,7 +2705,9 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+       Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
+       /* Set up to point into init section. */
+-      mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
++      mod->kallsyms = mod->init_layout.base_rx + info->mod_kallsyms_init_off;
++
++      pax_open_kernel();
+       mod->kallsyms->symtab = (void *)symsec->sh_addr;
+       mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+@@ -2643,8 +2720,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+                       = elf_type(&mod->kallsyms->symtab[i], info);
+       /* Now populate the cut down core kallsyms for after init. */
+-      mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
+-      mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
++      mod->core_kallsyms.symtab = dst = mod->core_layout.base_rx + info->symoffs;
++      mod->core_kallsyms.strtab = s = mod->core_layout.base_rx + info->stroffs;
+       src = mod->kallsyms->symtab;
+       for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
+               if (i == 0 || is_livepatch_module(mod) ||
+@@ -2657,6 +2734,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+               }
+       }
+       mod->core_kallsyms.num_symtab = ndst;
++
++      pax_close_kernel();
+ }
+ #else
+ static inline void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2924,7 +3003,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+       mod = (void *)info->sechdrs[info->index.mod].sh_addr;
+       if (info->index.sym == 0) {
++#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
++              /*
++               * avoid potentially printing jibberish on attempted load
++               * of a module randomized with a different seed
++               */
++              pr_warn("module has no symbols (stripped?)\n");
++#else
+               pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
++#endif
+               return ERR_PTR(-ENOEXEC);
+       }
+@@ -2940,8 +3027,16 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+ {
+       const char *modmagic = get_modinfo(info, "vermagic");
++      const char *license = get_modinfo(info, "license");
+       int err;
++#if defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) || defined(CONFIG_PAX_RAP)
++      if (!license || !license_is_gpl_compatible(license)) {
++              pr_err("%s: module is not compatible with the KERNEXEC 'or' method and RAP\n", mod->name);
++              return -ENOEXEC;
++      }
++#endif
++
+       if (flags & MODULE_INIT_IGNORE_VERMAGIC)
+               modmagic = NULL;
+@@ -2974,7 +3069,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+               return err;
+       /* Set up license info based on the info section */
+-      set_license(mod, get_modinfo(info, "license"));
++      set_license(mod, license);
+       return 0;
+ }
+@@ -3071,7 +3166,7 @@ static int move_module(struct module *mod, struct load_info *info)
+       void *ptr;
+       /* Do the allocs. */
+-      ptr = module_alloc(mod->core_layout.size);
++      ptr = module_alloc(mod->core_layout.size_rw);
+       /*
+        * The pointer to this block is stored in the module structure
+        * which is inside the block. Just mark it as not being a
+@@ -3081,11 +3176,11 @@ static int move_module(struct module *mod, struct load_info *info)
+       if (!ptr)
+               return -ENOMEM;
+-      memset(ptr, 0, mod->core_layout.size);
+-      mod->core_layout.base = ptr;
++      memset(ptr, 0, mod->core_layout.size_rw);
++      mod->core_layout.base_rw = ptr;
+-      if (mod->init_layout.size) {
+-              ptr = module_alloc(mod->init_layout.size);
++      if (mod->init_layout.size_rw) {
++              ptr = module_alloc(mod->init_layout.size_rw);
+               /*
+                * The pointer to this block is stored in the module structure
+                * which is inside the block. This block doesn't need to be
+@@ -3094,13 +3189,45 @@ static int move_module(struct module *mod, struct load_info *info)
+                */
+               kmemleak_ignore(ptr);
+               if (!ptr) {
+-                      module_memfree(mod->core_layout.base);
++                      module_memfree(mod->core_layout.base_rw);
+                       return -ENOMEM;
+               }
+-              memset(ptr, 0, mod->init_layout.size);
+-              mod->init_layout.base = ptr;
++              memset(ptr, 0, mod->init_layout.size_rw);
++              mod->init_layout.base_rw = ptr;
+       } else
+-              mod->init_layout.base = NULL;
++              mod->init_layout.base_rw = NULL;
++
++      ptr = module_alloc_exec(mod->core_layout.size_rx);
++      kmemleak_not_leak(ptr);
++      if (!ptr) {
++              if (mod->init_layout.base_rw)
++                      module_memfree(mod->init_layout.base_rw);
++              module_memfree(mod->core_layout.base_rw);
++              return -ENOMEM;
++      }
++
++      pax_open_kernel();
++      memset(ptr, 0, mod->core_layout.size_rx);
++      pax_close_kernel();
++      mod->core_layout.base_rx = ptr;
++
++      if (mod->init_layout.size_rx) {
++              ptr = module_alloc_exec(mod->init_layout.size_rx);
++              kmemleak_ignore(ptr);
++              if (!ptr) {
++                      module_memfree(mod->core_layout.base_rx);
++                      if (mod->init_layout.base_rw)
++                              module_memfree(mod->init_layout.base_rw);
++                      module_memfree(mod->core_layout.base_rw);
++                      return -ENOMEM;
++              }
++
++              pax_open_kernel();
++              memset(ptr, 0, mod->init_layout.size_rx);
++              pax_close_kernel();
++              mod->init_layout.base_rx = ptr;
++      } else
++              mod->init_layout.base_rx = NULL;
+       /* Transfer each section which specifies SHF_ALLOC */
+       pr_debug("final section addresses:\n");
+@@ -3111,16 +3238,45 @@ static int move_module(struct module *mod, struct load_info *info)
+               if (!(shdr->sh_flags & SHF_ALLOC))
+                       continue;
+-              if (shdr->sh_entsize & INIT_OFFSET_MASK)
+-                      dest = mod->init_layout.base
+-                              + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+-              else
+-                      dest = mod->core_layout.base + shdr->sh_entsize;
++              if (shdr->sh_entsize & INIT_OFFSET_MASK) {
++                      if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++                              dest = mod->init_layout.base_rw
++                                      + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++                      else
++                              dest = mod->init_layout.base_rx
++                                      + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++              } else {
++                      if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++                              dest = mod->core_layout.base_rw + shdr->sh_entsize;
++                      else
++                              dest = mod->core_layout.base_rx + shdr->sh_entsize;
++              }
++
++              if (shdr->sh_type != SHT_NOBITS) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_64
++                      if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
++                              set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
++#endif
++                      if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
++                              pax_open_kernel();
++                              memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++                              pax_close_kernel();
++                      } else
++#endif
+-              if (shdr->sh_type != SHT_NOBITS)
+                       memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++              }
+               /* Update sh_addr to point to copy in image. */
+-              shdr->sh_addr = (unsigned long)dest;
++
++#ifdef CONFIG_PAX_KERNEXEC
++              if (shdr->sh_flags & SHF_EXECINSTR)
++                      shdr->sh_addr = ktva_ktla((unsigned long)dest);
++              else
++#endif
++
++                      shdr->sh_addr = (unsigned long)dest;
+               pr_debug("\t0x%lx %s\n",
+                        (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
+       }
+@@ -3182,12 +3338,12 @@ static void flush_module_icache(const struct module *mod)
+        * Do it before processing of module parameters, so the module
+        * can provide parameter accessor functions of its own.
+        */
+-      if (mod->init_layout.base)
+-              flush_icache_range((unsigned long)mod->init_layout.base,
+-                                 (unsigned long)mod->init_layout.base
+-                                 + mod->init_layout.size);
+-      flush_icache_range((unsigned long)mod->core_layout.base,
+-                         (unsigned long)mod->core_layout.base + mod->core_layout.size);
++      if (mod->init_layout.base_rx)
++              flush_icache_range((unsigned long)mod->init_layout.base_rx,
++                                 (unsigned long)mod->init_layout.base_rx
++                                 + mod->init_layout.size_rx);
++      flush_icache_range((unsigned long)mod->core_layout.base_rx,
++                         (unsigned long)mod->core_layout.base_rx + mod->core_layout.size_rx);
+       set_fs(old_fs);
+ }
+@@ -3279,8 +3435,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
+ {
+       percpu_modfree(mod);
+       module_arch_freeing_init(mod);
+-      module_memfree(mod->init_layout.base);
+-      module_memfree(mod->core_layout.base);
++      module_memfree_exec(mod->init_layout.base_rx);
++      module_memfree_exec(mod->core_layout.base_rx);
++      module_memfree(mod->init_layout.base_rw);
++      module_memfree(mod->core_layout.base_rw);
+ }
+ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -3293,7 +3451,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
+ static int post_relocation(struct module *mod, const struct load_info *info)
+ {
+       /* Sort exception table now relocations are done. */
++      pax_open_kernel();
+       sort_extable(mod->extable, mod->extable + mod->num_exentries);
++      pax_close_kernel();
+       /* Copy relocated percpu area over. */
+       percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
+@@ -3341,13 +3501,15 @@ static void do_mod_ctors(struct module *mod)
+ /* For freeing module_init on success, in case kallsyms traversing */
+ struct mod_initfree {
+       struct rcu_head rcu;
+-      void *module_init;
++      void *module_init_rw;
++      void *module_init_rx;
+ };
+ static void do_free_init(struct rcu_head *head)
+ {
+       struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
+-      module_memfree(m->module_init);
++      module_memfree(m->module_init_rw);
++      module_memfree_exec(m->module_init_rx);
+       kfree(m);
+ }
+@@ -3367,7 +3529,8 @@ static noinline int do_init_module(struct module *mod)
+               ret = -ENOMEM;
+               goto fail;
+       }
+-      freeinit->module_init = mod->init_layout.base;
++      freeinit->module_init_rx = mod->init_layout.base_rx;
++      freeinit->module_init_rw = mod->init_layout.base_rw;
+       /*
+        * We want to find out whether @mod uses async during init.  Clear
+@@ -3427,11 +3590,10 @@ static noinline int do_init_module(struct module *mod)
+       mod_tree_remove_init(mod);
+       disable_ro_nx(&mod->init_layout);
+       module_arch_freeing_init(mod);
+-      mod->init_layout.base = NULL;
+-      mod->init_layout.size = 0;
+-      mod->init_layout.ro_size = 0;
+-      mod->init_layout.ro_after_init_size = 0;
+-      mod->init_layout.text_size = 0;
++      mod->init_layout.base_rx = NULL;
++      mod->init_layout.base_rw = NULL;
++      mod->init_layout.size_rx = 0;
++      mod->init_layout.size_rw = 0;
+       /*
+        * We want to free module_init, but be aware that kallsyms may be
+        * walking this with preempt disabled.  In all the failure paths, we
+@@ -3630,9 +3792,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
+       if (err)
+               goto free_unload;
++      /* Now copy in args */
++      mod->args = strndup_user(uargs, ~0UL >> 1);
++      if (IS_ERR(mod->args)) {
++              err = PTR_ERR(mod->args);
++              goto free_unload;
++      }
++
+       /* Set up MODINFO_ATTR fields */
+       setup_modinfo(mod, info);
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++      {
++              char *p, *p2;
++
++              if (strstr(mod->args, "grsec_modharden_netdev")) {
++                      printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
++                      err = -EPERM;
++                      goto free_modinfo;
++              } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
++                      p += sizeof("grsec_modharden_normal") - 1;
++                      p2 = strstr(p, "_");
++                      if (p2) {
++                              *p2 = '\0';
++                              printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
++                              *p2 = '_';
++                      }
++                      err = -EPERM;
++                      goto free_modinfo;
++              }
++      }
++#endif
++
+       /* Fix up syms, so that st_value is a pointer to location. */
+       err = simplify_symbols(mod, info);
+       if (err < 0)
+@@ -3648,13 +3839,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
+       flush_module_icache(mod);
+-      /* Now copy in args */
+-      mod->args = strndup_user(uargs, ~0UL >> 1);
+-      if (IS_ERR(mod->args)) {
+-              err = PTR_ERR(mod->args);
+-              goto free_arch_cleanup;
+-      }
+-
+       dynamic_debug_setup(info->debug, info->num_debug);
+       /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
+@@ -3719,11 +3903,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
+  ddebug_cleanup:
+       dynamic_debug_remove(info->debug);
+       synchronize_sched();
+-      kfree(mod->args);
+- free_arch_cleanup:
+       module_arch_cleanup(mod);
+  free_modinfo:
+       free_modinfo(mod);
++      kfree(mod->args);
+  free_unload:
+       module_unload_free(mod);
+  unlink_mod:
+@@ -3743,7 +3926,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
+        */
+       ftrace_release_mod(mod);
+       /* Free lock-classes; relies on the preceding sync_rcu() */
+-      lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
++      lockdep_free_key_range(mod->core_layout.base_rw, mod->core_layout.size_rw);
++      lockdep_free_key_range(mod->core_layout.base_rx, mod->core_layout.size_rx);
+       module_deallocate(mod, info);
+  free_copy:
+@@ -3831,10 +4015,16 @@ static const char *get_ksymbol(struct module *mod,
+       struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+       /* At worse, next value is at end of module */
+-      if (within_module_init(addr, mod))
+-              nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
++      if (within_module_rx(addr, &mod->init_layout))
++              nextval = (unsigned long)mod->init_layout.base_rx+mod->init_layout.size_rx;
++      else if (within_module_rw(addr, &mod->init_layout))
++              nextval = (unsigned long)mod->init_layout.base_rw+mod->init_layout.size_rw;
++      else if (within_module_rx(addr, &mod->core_layout))
++              nextval = (unsigned long)mod->core_layout.base_rx+mod->core_layout.size_rx;
++      else if (within_module_rw(addr, &mod->core_layout))
++              nextval = (unsigned long)mod->core_layout.base_rw+mod->core_layout.size_rw;
+       else
+-              nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
++              return NULL;
+       /* Scan for closest preceding symbol, and next symbol. (ELF
+          starts real symbols at 1). */
+@@ -4087,7 +4277,7 @@ static int m_show(struct seq_file *m, void *p)
+               return 0;
+       seq_printf(m, "%s %u",
+-                 mod->name, mod->init_layout.size + mod->core_layout.size);
++                 mod->name, mod->init_layout.size_rx + mod->init_layout.size_rw + mod->core_layout.size_rx + mod->core_layout.size_rw);
+       print_unload_info(m, mod);
+       /* Informative for users. */
+@@ -4096,7 +4286,7 @@ static int m_show(struct seq_file *m, void *p)
+                  mod->state == MODULE_STATE_COMING ? "Loading" :
+                  "Live");
+       /* Used by oprofile and other similar tools. */
+-      seq_printf(m, " 0x%pK", mod->core_layout.base);
++      seq_printf(m, " 0x%pK 0x%pK", mod->core_layout.base_rx, mod->core_layout.base_rw);
+       /* Taints info */
+       if (mod->taints)
+@@ -4132,7 +4322,17 @@ static const struct file_operations proc_modules_operations = {
+ static int __init proc_modules_init(void)
+ {
++#ifndef CONFIG_GRKERNSEC_HIDESYM
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
++#else
+       proc_create("modules", 0, NULL, &proc_modules_operations);
++#endif
++#else
++      proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#endif
+       return 0;
+ }
+ module_init(proc_modules_init);
+@@ -4193,7 +4393,15 @@ struct module *__module_address(unsigned long addr)
+ {
+       struct module *mod;
+-      if (addr < module_addr_min || addr > module_addr_max)
++#ifdef CONFIG_X86_32
++      unsigned long vaddr = ktla_ktva(addr);
++
++      if (module_addr_min_rx <= vaddr && vaddr <= module_addr_max_rx)
++              addr = vaddr;
++#endif
++
++      if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
++          (addr < module_addr_min_rw || addr > module_addr_max_rw))
+               return NULL;
+       module_assert_mutex_or_preempt();
+@@ -4236,11 +4444,21 @@ bool is_module_text_address(unsigned long addr)
+  */
+ struct module *__module_text_address(unsigned long addr)
+ {
+-      struct module *mod = __module_address(addr);
++      struct module *mod;
++
++#ifdef CONFIG_X86_32
++      addr = ktla_ktva(addr);
++#endif
++
++      if (addr < module_addr_min_rx || addr > module_addr_max_rx)
++              return NULL;
++
++      mod = __module_address(addr);
++
+       if (mod) {
+               /* Make sure it's within the text section. */
+-              if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
+-                  && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
++              if (!within_module_rx(addr, &mod->init_layout)
++                  && !within_module_rx(addr, &mod->core_layout))
+                       mod = NULL;
+       }
+       return mod;
+@@ -4270,7 +4488,7 @@ void print_modules(void)
+ #ifdef CONFIG_MODVERSIONS
+ /* Generate the signature for all relevant module structures here.
+  * If these change, we don't want to try to parse the module. */
+-void module_layout(struct module *mod,
++__visible void module_layout(struct module *mod,
+                  struct modversion_info *ver,
+                  struct kernel_param *kp,
+                  struct kernel_symbol *ks,
+diff --git a/kernel/notifier.c b/kernel/notifier.c
+index fd2c9ac..6263e05 100644
+--- a/kernel/notifier.c
++++ b/kernel/notifier.c
+@@ -5,6 +5,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/vmalloc.h>
+ #include <linux/reboot.h>
++#include <linux/mm.h>
+ /*
+  *    Notifier list for kernel code which wants to be called
+@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
+       while ((*nl) != NULL) {
+               if (n->priority > (*nl)->priority)
+                       break;
+-              nl = &((*nl)->next);
++              nl = (struct notifier_block **)&((*nl)->next);
+       }
+-      n->next = *nl;
++      pax_open_kernel();
++      const_cast(n->next) = *nl;
+       rcu_assign_pointer(*nl, n);
++      pax_close_kernel();
+       return 0;
+ }
+@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
+                       return 0;
+               if (n->priority > (*nl)->priority)
+                       break;
+-              nl = &((*nl)->next);
++              nl = (struct notifier_block **)&((*nl)->next);
+       }
+-      n->next = *nl;
++      pax_open_kernel();
++      const_cast(n->next) = *nl;
+       rcu_assign_pointer(*nl, n);
++      pax_close_kernel();
+       return 0;
+ }
+@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
+ {
+       while ((*nl) != NULL) {
+               if ((*nl) == n) {
++                      pax_open_kernel();
+                       rcu_assign_pointer(*nl, n->next);
++                      pax_close_kernel();
+                       return 0;
+               }
+-              nl = &((*nl)->next);
++              nl = (struct notifier_block **)&((*nl)->next);
+       }
+       return -ENOENT;
+ }
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 9932788..7052e20 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
+        * seq_nr mod. number of cpus in use.
+        */
+-      seq_nr = atomic_inc_return(&pd->seq_nr);
++      seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
+       cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
+       return padata_index_to_cpu(pd, cpu_index);
+@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+       padata_init_pqueues(pd);
+       padata_init_squeues(pd);
+       setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+-      atomic_set(&pd->seq_nr, -1);
++      atomic_set_unchecked(&pd->seq_nr, -1);
+       atomic_set(&pd->reorder_objects, 0);
+       atomic_set(&pd->refcnt, 0);
+       pd->pinst = pinst;
+diff --git a/kernel/panic.c b/kernel/panic.c
+index ca8cea1..2de8171 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -56,7 +56,7 @@ EXPORT_SYMBOL(panic_blink);
+ /*
+  * Stop ourself in panic -- architecture code may override this
+  */
+-void __weak panic_smp_self_stop(void)
++void __weak __noreturn panic_smp_self_stop(void)
+ {
+       while (1)
+               cpu_relax();
+@@ -488,11 +488,11 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
+       pr_warn("------------[ cut here ]------------\n");
+       if (file)
+-              pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
++              pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA\n",
+                       raw_smp_processor_id(), current->pid, file, line,
+                       caller);
+       else
+-              pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
++              pr_warn("WARNING: CPU: %d PID: %d at %pA\n",
+                       raw_smp_processor_id(), current->pid, caller);
+       if (args)
+@@ -523,7 +523,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
+ }
+ #ifdef WANT_WARN_ON_SLOWPATH
+-void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
++void warn_slowpath_fmt(const char *file, const int line, const char *fmt, ...)
+ {
+       struct warn_args args;
+@@ -535,7 +535,7 @@ void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+ }
+ EXPORT_SYMBOL(warn_slowpath_fmt);
+-void warn_slowpath_fmt_taint(const char *file, int line,
++void warn_slowpath_fmt_taint(const char *file, const int line,
+                            unsigned taint, const char *fmt, ...)
+ {
+       struct warn_args args;
+@@ -547,7 +547,7 @@ void warn_slowpath_fmt_taint(const char *file, int line,
+ }
+ EXPORT_SYMBOL(warn_slowpath_fmt_taint);
+-void warn_slowpath_null(const char *file, int line)
++void warn_slowpath_null(const char *file, const int line)
+ {
+       __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
+ }
+@@ -562,7 +562,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
+  */
+ __visible void __stack_chk_fail(void)
+ {
+-      panic("stack-protector: Kernel stack is corrupted in: %p\n",
++      dump_stack();
++      panic("stack-protector: Kernel stack is corrupted in: %pA\n",
+               __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff --git a/kernel/pid.c b/kernel/pid.c
+index f66162f..e950a59 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -33,6 +33,7 @@
+ #include <linux/rculist.h>
+ #include <linux/bootmem.h>
+ #include <linux/hash.h>
++#include <linux/security.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/init_task.h>
+ #include <linux/syscalls.h>
+@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
+ int pid_max = PID_MAX_DEFAULT;
+-#define RESERVED_PIDS         300
++#define RESERVED_PIDS         500
+ int pid_max_min = RESERVED_PIDS + 1;
+ int pid_max_max = PID_MAX_LIMIT;
+@@ -451,9 +452,17 @@ EXPORT_SYMBOL(pid_task);
+  */
+ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
+ {
++      struct task_struct *task;
++
+       RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+                        "find_task_by_pid_ns() needs rcu_read_lock() protection");
+-      return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++      task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++      if (gr_pid_is_chrooted(task))
++              return NULL;
++
++      return task;
+ }
+ struct task_struct *find_task_by_vpid(pid_t vnr)
+@@ -461,6 +470,13 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
+       return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
+ }
++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
++{
++      RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
++                       "find_task_by_pid_ns() needs rcu_read_lock() protection");
++      return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
++}
++
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
+       struct pid *pid;
+@@ -497,9 +513,9 @@ struct pid *find_get_pid(pid_t nr)
+ }
+ EXPORT_SYMBOL_GPL(find_get_pid);
+-pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
++pid_t pid_nr_ns(const struct pid *pid, const struct pid_namespace *ns)
+ {
+-      struct upid *upid;
++      const struct upid *upid;
+       pid_t nr = 0;
+       if (pid && ns->level <= pid->level) {
+@@ -511,7 +527,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+ }
+ EXPORT_SYMBOL_GPL(pid_nr_ns);
+-pid_t pid_vnr(struct pid *pid)
++pid_t pid_vnr(const struct pid *pid)
+ {
+       return pid_nr_ns(pid, task_active_pid_ns(current));
+ }
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index a65ba13..f600dbb 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+       struct pid_namespace *pid_ns = task_active_pid_ns(current);
+-      struct ctl_table tmp = *table;
++      ctl_table_no_const tmp = *table;
+       if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
+               return -EPERM;
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index 68d3ebc..554935d 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -34,6 +34,7 @@ config HIBERNATE_CALLBACKS
+ config HIBERNATION
+       bool "Hibernation (aka 'suspend to disk')"
+       depends on SWAP && ARCH_HIBERNATION_POSSIBLE
++      depends on !GRKERNSEC_KMEM
+       select HIBERNATE_CALLBACKS
+       select LZO_COMPRESS
+       select LZO_DECOMPRESS
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 33c79b6..b26dbc4 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -306,8 +306,10 @@ static int create_image(int platform_mode)
+       if (error)
+               printk(KERN_ERR "PM: Error %d creating hibernation image\n",
+                       error);
+-      if (!in_suspend)
++      if (!in_suspend) {
+               events_check_enabled = false;
++              clear_free_pages();
++      }
+       platform_leave(platform_mode);
+@@ -1189,22 +1191,6 @@ static int __init nohibernate_setup(char *str)
+       return 1;
+ }
+-static int __init page_poison_nohibernate_setup(char *str)
+-{
+-#ifdef CONFIG_PAGE_POISONING_ZERO
+-      /*
+-       * The zeroing option for page poison skips the checks on alloc.
+-       * since hibernation doesn't save free pages there's no way to
+-       * guarantee the pages will still be zeroed.
+-       */
+-      if (!strcmp(str, "on")) {
+-              pr_info("Disabling hibernation due to page poisoning\n");
+-              return nohibernate_setup(str);
+-      }
+-#endif
+-      return 1;
+-}
+-
+ __setup("noresume", noresume_setup);
+ __setup("resume_offset=", resume_offset_setup);
+ __setup("resume=", resume_setup);
+@@ -1212,4 +1198,3 @@ __setup("hibernate=", hibernate_setup);
+ __setup("resumewait", resumewait_setup);
+ __setup("resumedelay=", resumedelay_setup);
+ __setup("nohibernate", nohibernate_setup);
+-__setup("page_poison=", page_poison_nohibernate_setup);
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 242d8b8..56d1d0d 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -110,6 +110,8 @@ extern int create_basic_memory_bitmaps(void);
+ extern void free_basic_memory_bitmaps(void);
+ extern int hibernate_preallocate_memory(void);
++extern void clear_free_pages(void);
++
+ /**
+  *    Auxiliary structure used for reading the snapshot image data and
+  *    metadata from and writing them to the list of page backup entries
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 8f27d5a..e7389a0 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
+       unsigned int elapsed_msecs;
+       bool wakeup = false;
+       int sleep_usecs = USEC_PER_MSEC;
++      bool timedout = false;
+       start = ktime_get_boottime();
+@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
+       while (true) {
+               todo = 0;
++              if (time_after(jiffies, end_time))
++                      timedout = true;
+               read_lock(&tasklist_lock);
+               for_each_process_thread(g, p) {
+                       if (p == current || !freeze_task(p))
+                               continue;
+-                      if (!freezer_should_skip(p))
++                      if (!freezer_should_skip(p)) {
+                               todo++;
++                              if (timedout) {
++                                      printk(KERN_ERR "Task refusing to freeze:\n");
++                                      sched_show_task(p);
++                              }
++                      }
+               }
+               read_unlock(&tasklist_lock);
+@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
+                       todo += wq_busy;
+               }
+-              if (!todo || time_after(jiffies, end_time))
++              if (!todo || timedout)
+                       break;
+               if (pm_wakeup_pending()) {
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index b022284..b48c449 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1020,6 +1020,28 @@ static void swsusp_unset_page_forbidden(struct page *page)
+               memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
+ }
++void clear_free_pages(void)
++{
++#if defined(CONFIG_PAX_MEMORY_SANITIZE) || defined(CONFIG_PAGE_POISONING_ZERO)
++      struct memory_bitmap *bm = free_pages_map;
++      unsigned long pfn;
++
++      if (WARN_ON(!(free_pages_map)))
++              return;
++
++      memory_bm_position_reset(bm);
++      pfn = memory_bm_next_pfn(bm);
++      while (pfn != BM_END_OF_MAP) {
++              if (pfn_valid(pfn))
++                      clear_highpage(pfn_to_page(pfn));
++
++              pfn = memory_bm_next_pfn(bm);
++      }
++      memory_bm_position_reset(bm);
++      pr_info("PM: free pages cleared after restore\n");
++#endif /* CONFIG_PAX_MEMORY_SANITIZE || PAGE_POISONING_ZERO */
++}
++
+ /**
+  * mark_nosave_pages - Mark pages that should not be saved.
+  * @bm: Memory bitmap.
+@@ -1132,6 +1154,26 @@ void free_basic_memory_bitmaps(void)
+       pr_debug("PM: Basic memory bitmaps freed\n");
+ }
++void clear_free_pages(void)
++{
++      struct memory_bitmap *bm = free_pages_map;
++      unsigned long pfn;
++
++      if (WARN_ON(!(free_pages_map)))
++              return;
++
++      memory_bm_position_reset(bm);
++      pfn = memory_bm_next_pfn(bm);
++      while (pfn != BM_END_OF_MAP) {
++              if (pfn_valid(pfn))
++                      clear_highpage(pfn_to_page(pfn));
++
++              pfn = memory_bm_next_pfn(bm);
++      }
++      memory_bm_position_reset(bm);
++      pr_info("PM: free pages cleared after restore\n");
++}
++
+ /**
+  * snapshot_additional_pages - Estimate the number of extra pages needed.
+  * @zone: Memory zone to carry out the computation for.
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index eea6dbc..075ab5e 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -588,7 +588,7 @@ static int log_store(int facility, int level,
+       return msg->text_len;
+ }
+-int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
++int dmesg_restrict __read_only = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
+ static int syslog_action_restricted(int type)
+ {
+@@ -611,6 +611,11 @@ int check_syslog_permissions(int type, int source)
+       if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
+               goto ok;
++#ifdef CONFIG_GRKERNSEC_DMESG
++      if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
++              return -EPERM;
++#endif
++
+       if (syslog_action_restricted(type)) {
+               if (capable(CAP_SYSLOG))
+                       goto ok;
+diff --git a/kernel/profile.c b/kernel/profile.c
+index 2dbccf2..f98676c 100644
+--- a/kernel/profile.c
++++ b/kernel/profile.c
+@@ -37,7 +37,7 @@ struct profile_hit {
+ #define NR_PROFILE_HIT                (PAGE_SIZE/sizeof(struct profile_hit))
+ #define NR_PROFILE_GRP                (NR_PROFILE_HIT/PROFILE_GRPSZ)
+-static atomic_t *prof_buffer;
++static atomic_unchecked_t *prof_buffer;
+ static unsigned long prof_len, prof_shift;
+ int prof_on __read_mostly;
+@@ -257,7 +257,7 @@ static void profile_flip_buffers(void)
+                                       hits[i].pc = 0;
+                               continue;
+                       }
+-                      atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++                      atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+                       hits[i].hits = hits[i].pc = 0;
+               }
+       }
+@@ -318,9 +318,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
+        * Add the current hit(s) and flush the write-queue out
+        * to the global buffer:
+        */
+-      atomic_add(nr_hits, &prof_buffer[pc]);
++      atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
+       for (i = 0; i < NR_PROFILE_HIT; ++i) {
+-              atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++              atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+               hits[i].pc = hits[i].hits = 0;
+       }
+ out:
+@@ -384,7 +384,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
+ {
+       unsigned long pc;
+       pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
+-      atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
++      atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+ }
+ #endif /* !CONFIG_SMP */
+@@ -479,7 +479,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+                       return -EFAULT;
+               buf++; p++; count--; read++;
+       }
+-      pnt = (char *)prof_buffer + p - sizeof(atomic_t);
++      pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
+       if (copy_to_user(buf, (void *)pnt, count))
+               return -EFAULT;
+       read += count;
+@@ -510,7 +510,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
+       }
+ #endif
+       profile_discard_flip_buffers();
+-      memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
++      memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
+       return count;
+ }
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 1d3b766..4fc197c 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -206,12 +206,32 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+       return ret;
+ }
+-static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
++static bool ptrace_has_cap(const struct cred *tcred, unsigned int mode)
+ {
++      struct user_namespace *tns = tcred->user_ns;
++      struct user_namespace *curns = current_cred()->user_ns;
++
++      /* When a root-owned process enters a user namespace created by a
++       * malicious user, the user shouldn't be able to execute code under
++       * uid 0 by attaching to the root-owned process via ptrace.
++       * Therefore, similar to the capable_wrt_inode_uidgid() check,
++       * verify that all the uids and gids of the target process are
++       * mapped into the current namespace.
++       * No fsuid/fsgid check because __ptrace_may_access doesn't do it
++       * either.
++       */
++      if (!kuid_has_mapping(curns, tcred->euid) ||
++                      !kuid_has_mapping(curns, tcred->suid) ||
++                      !kuid_has_mapping(curns, tcred->uid)  ||
++                      !kgid_has_mapping(curns, tcred->egid) ||
++                      !kgid_has_mapping(curns, tcred->sgid) ||
++                      !kgid_has_mapping(curns, tcred->gid))
++              return false;
++
+       if (mode & PTRACE_MODE_NOAUDIT)
+-              return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
++              return has_ns_capability_noaudit(current, tns, CAP_SYS_PTRACE);
+       else
+-              return has_ns_capability(current, ns, CAP_SYS_PTRACE);
++              return has_ns_capability(current, tns, CAP_SYS_PTRACE);
+ }
+ /* Returns 0 on success, -errno on denial. */
+@@ -263,7 +283,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+           gid_eq(caller_gid, tcred->sgid) &&
+           gid_eq(caller_gid, tcred->gid))
+               goto ok;
+-      if (ptrace_has_cap(tcred->user_ns, mode))
++      if (ptrace_has_cap(tcred, mode))
+               goto ok;
+       rcu_read_unlock();
+       return -EPERM;
+@@ -274,7 +294,7 @@ ok:
+               dumpable = get_dumpable(task->mm);
+       rcu_read_lock();
+       if (dumpable != SUID_DUMP_USER &&
+-          !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
++          !ptrace_has_cap(__task_cred(task), mode)) {
+               rcu_read_unlock();
+               return -EPERM;
+       }
+@@ -343,7 +363,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+       if (seize)
+               flags |= PT_SEIZED;
+       rcu_read_lock();
+-      if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
++      if (ns_capable_noaudit(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
+               flags |= PT_PTRACE_CAP;
+       rcu_read_unlock();
+       task->ptrace = flags;
+@@ -542,7 +562,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
+                               break;
+                       return -EIO;
+               }
+-              if (copy_to_user(dst, buf, retval))
++              if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
+                       return -EFAULT;
+               copied += retval;
+               src += retval;
+@@ -843,7 +863,7 @@ int ptrace_request(struct task_struct *child, long request,
+       bool seized = child->ptrace & PT_SEIZED;
+       int ret = -EIO;
+       siginfo_t siginfo, *si;
+-      void __user *datavp = (void __user *) data;
++      void __user *datavp = (__force void __user *) data;
+       unsigned long __user *datalp = datavp;
+       unsigned long flags;
+@@ -1094,14 +1114,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+               goto out;
+       }
++      if (gr_handle_ptrace(child, request)) {
++              ret = -EPERM;
++              goto out_put_task_struct;
++      }
++
+       if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+               ret = ptrace_attach(child, request, addr, data);
+               /*
+                * Some architectures need to do book-keeping after
+                * a ptrace attach.
+                */
+-              if (!ret)
++              if (!ret) {
+                       arch_ptrace_attach(child);
++                      gr_audit_ptrace(child);
++              }
+               goto out_put_task_struct;
+       }
+@@ -1129,7 +1156,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+       copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+       if (copied != sizeof(tmp))
+               return -EIO;
+-      return put_user(tmp, (unsigned long __user *)data);
++      return put_user(tmp, (__force unsigned long __user *)data);
+ }
+ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+@@ -1222,7 +1249,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+ }
+ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
+-                     compat_long_t, addr, compat_long_t, data)
++                     compat_ulong_t, addr, compat_ulong_t, data)
+ {
+       struct task_struct *child;
+       long ret;
+@@ -1238,14 +1265,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
+               goto out;
+       }
++      if (gr_handle_ptrace(child, request)) {
++              ret = -EPERM;
++              goto out_put_task_struct;
++      }
++
+       if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+               ret = ptrace_attach(child, request, addr, data);
+               /*
+                * Some architectures need to do book-keeping after
+                * a ptrace attach.
+                */
+-              if (!ret)
++              if (!ret) {
+                       arch_ptrace_attach(child);
++                      gr_audit_ptrace(child);
++              }
+               goto out_put_task_struct;
+       }
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 971e2b1..dc5637d 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -132,12 +132,12 @@ static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
+ static DEFINE_SPINLOCK(rcu_torture_lock);
+ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
+ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
+-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+-static atomic_t n_rcu_torture_alloc;
+-static atomic_t n_rcu_torture_alloc_fail;
+-static atomic_t n_rcu_torture_free;
+-static atomic_t n_rcu_torture_mberror;
+-static atomic_t n_rcu_torture_error;
++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
++static atomic_unchecked_t n_rcu_torture_alloc;
++static atomic_unchecked_t n_rcu_torture_alloc_fail;
++static atomic_unchecked_t n_rcu_torture_free;
++static atomic_unchecked_t n_rcu_torture_mberror;
++static atomic_unchecked_t n_rcu_torture_error;
+ static long n_rcu_torture_barrier_error;
+ static long n_rcu_torture_boost_ktrerror;
+ static long n_rcu_torture_boost_rterror;
+@@ -146,7 +146,7 @@ static long n_rcu_torture_boosts;
+ static long n_rcu_torture_timers;
+ static long n_barrier_attempts;
+ static long n_barrier_successes;
+-static atomic_long_t n_cbfloods;
++static atomic_long_unchecked_t n_cbfloods;
+ static struct list_head rcu_torture_removed;
+ static int rcu_torture_writer_state;
+@@ -225,11 +225,11 @@ rcu_torture_alloc(void)
+       spin_lock_bh(&rcu_torture_lock);
+       if (list_empty(&rcu_torture_freelist)) {
+-              atomic_inc(&n_rcu_torture_alloc_fail);
++              atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
+               spin_unlock_bh(&rcu_torture_lock);
+               return NULL;
+       }
+-      atomic_inc(&n_rcu_torture_alloc);
++      atomic_inc_unchecked(&n_rcu_torture_alloc);
+       p = rcu_torture_freelist.next;
+       list_del_init(p);
+       spin_unlock_bh(&rcu_torture_lock);
+@@ -242,7 +242,7 @@ rcu_torture_alloc(void)
+ static void
+ rcu_torture_free(struct rcu_torture *p)
+ {
+-      atomic_inc(&n_rcu_torture_free);
++      atomic_inc_unchecked(&n_rcu_torture_free);
+       spin_lock_bh(&rcu_torture_lock);
+       list_add_tail(&p->rtort_free, &rcu_torture_freelist);
+       spin_unlock_bh(&rcu_torture_lock);
+@@ -323,7 +323,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
+       i = rp->rtort_pipe_count;
+       if (i > RCU_TORTURE_PIPE_LEN)
+               i = RCU_TORTURE_PIPE_LEN;
+-      atomic_inc(&rcu_torture_wcount[i]);
++      atomic_inc_unchecked(&rcu_torture_wcount[i]);
+       if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+               rp->rtort_mbtest = 0;
+               return true;
+@@ -853,7 +853,7 @@ rcu_torture_cbflood(void *arg)
+       VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
+       do {
+               schedule_timeout_interruptible(cbflood_inter_holdoff);
+-              atomic_long_inc(&n_cbfloods);
++              atomic_long_inc_unchecked(&n_cbfloods);
+               WARN_ON(signal_pending(current));
+               for (i = 0; i < cbflood_n_burst; i++) {
+                       for (j = 0; j < cbflood_n_per_burst; j++) {
+@@ -983,7 +983,7 @@ rcu_torture_writer(void *arg)
+                       i = old_rp->rtort_pipe_count;
+                       if (i > RCU_TORTURE_PIPE_LEN)
+                               i = RCU_TORTURE_PIPE_LEN;
+-                      atomic_inc(&rcu_torture_wcount[i]);
++                      atomic_inc_unchecked(&rcu_torture_wcount[i]);
+                       old_rp->rtort_pipe_count++;
+                       switch (synctype[torture_random(&rand) % nsynctypes]) {
+                       case RTWS_DEF_FREE:
+@@ -1111,7 +1111,7 @@ static void rcu_torture_timer(unsigned long unused)
+               return;
+       }
+       if (p->rtort_mbtest == 0)
+-              atomic_inc(&n_rcu_torture_mberror);
++              atomic_inc_unchecked(&n_rcu_torture_mberror);
+       spin_lock(&rand_lock);
+       cur_ops->read_delay(&rand);
+       n_rcu_torture_timers++;
+@@ -1187,7 +1187,7 @@ rcu_torture_reader(void *arg)
+                       continue;
+               }
+               if (p->rtort_mbtest == 0)
+-                      atomic_inc(&n_rcu_torture_mberror);
++                      atomic_inc_unchecked(&n_rcu_torture_mberror);
+               cur_ops->read_delay(&rand);
+               preempt_disable();
+               pipe_count = p->rtort_pipe_count;
+@@ -1255,11 +1255,11 @@ rcu_torture_stats_print(void)
+               rcu_torture_current,
+               rcu_torture_current_version,
+               list_empty(&rcu_torture_freelist),
+-              atomic_read(&n_rcu_torture_alloc),
+-              atomic_read(&n_rcu_torture_alloc_fail),
+-              atomic_read(&n_rcu_torture_free));
++              atomic_read_unchecked(&n_rcu_torture_alloc),
++              atomic_read_unchecked(&n_rcu_torture_alloc_fail),
++              atomic_read_unchecked(&n_rcu_torture_free));
+       pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
+-              atomic_read(&n_rcu_torture_mberror),
++              atomic_read_unchecked(&n_rcu_torture_mberror),
+               n_rcu_torture_boost_ktrerror,
+               n_rcu_torture_boost_rterror);
+       pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
+@@ -1271,17 +1271,17 @@ rcu_torture_stats_print(void)
+               n_barrier_successes,
+               n_barrier_attempts,
+               n_rcu_torture_barrier_error);
+-      pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
++      pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
+       pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+-      if (atomic_read(&n_rcu_torture_mberror) != 0 ||
++      if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
+           n_rcu_torture_barrier_error != 0 ||
+           n_rcu_torture_boost_ktrerror != 0 ||
+           n_rcu_torture_boost_rterror != 0 ||
+           n_rcu_torture_boost_failure != 0 ||
+           i > 1) {
+               pr_cont("%s", "!!! ");
+-              atomic_inc(&n_rcu_torture_error);
++              atomic_inc_unchecked(&n_rcu_torture_error);
+               WARN_ON_ONCE(1);
+       }
+       pr_cont("Reader Pipe: ");
+@@ -1298,7 +1298,7 @@ rcu_torture_stats_print(void)
+       pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+       pr_cont("Free-Block Circulation: ");
+       for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+-              pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
++              pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
+       }
+       pr_cont("\n");
+@@ -1655,7 +1655,7 @@ rcu_torture_cleanup(void)
+       rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
+-      if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
++      if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
+               rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
+       else if (torture_onoff_failures())
+               rcu_torture_print_module_parms(cur_ops,
+@@ -1780,18 +1780,18 @@ rcu_torture_init(void)
+       rcu_torture_current = NULL;
+       rcu_torture_current_version = 0;
+-      atomic_set(&n_rcu_torture_alloc, 0);
+-      atomic_set(&n_rcu_torture_alloc_fail, 0);
+-      atomic_set(&n_rcu_torture_free, 0);
+-      atomic_set(&n_rcu_torture_mberror, 0);
+-      atomic_set(&n_rcu_torture_error, 0);
++      atomic_set_unchecked(&n_rcu_torture_alloc, 0);
++      atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
++      atomic_set_unchecked(&n_rcu_torture_free, 0);
++      atomic_set_unchecked(&n_rcu_torture_mberror, 0);
++      atomic_set_unchecked(&n_rcu_torture_error, 0);
+       n_rcu_torture_barrier_error = 0;
+       n_rcu_torture_boost_ktrerror = 0;
+       n_rcu_torture_boost_rterror = 0;
+       n_rcu_torture_boost_failure = 0;
+       n_rcu_torture_boosts = 0;
+       for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+-              atomic_set(&rcu_torture_wcount[i], 0);
++              atomic_set_unchecked(&rcu_torture_wcount[i], 0);
+       for_each_possible_cpu(cpu) {
+               for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+                       per_cpu(rcu_torture_count, cpu)[i] = 0;
+diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
+index 944b1b4..45d1d75 100644
+--- a/kernel/rcu/tiny.c
++++ b/kernel/rcu/tiny.c
+@@ -42,7 +42,7 @@
+ /* Forward declarations for tiny_plugin.h. */
+ struct rcu_ctrlblk;
+ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+-static void rcu_process_callbacks(struct softirq_action *unused);
++static void rcu_process_callbacks(void);
+ static void __call_rcu(struct rcu_head *head,
+                      rcu_callback_t func,
+                      struct rcu_ctrlblk *rcp);
+@@ -170,7 +170,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+                                     false));
+ }
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+       __rcu_process_callbacks(&rcu_sched_ctrlblk);
+       __rcu_process_callbacks(&rcu_bh_ctrlblk);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 5d80925..a71654a 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -328,7 +328,7 @@ static void rcu_momentary_dyntick_idle(void)
+                */
+               rdtp = this_cpu_ptr(&rcu_dynticks);
+               smp_mb__before_atomic(); /* Earlier stuff before QS. */
+-              atomic_add(2, &rdtp->dynticks);  /* QS. */
++              atomic_add_unchecked(2, &rdtp->dynticks);  /* QS. */
+               smp_mb__after_atomic(); /* Later stuff after QS. */
+               break;
+       }
+@@ -693,10 +693,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
+       rcu_prepare_for_idle();
+       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+       smp_mb__before_atomic();  /* See above. */
+-      atomic_inc(&rdtp->dynticks);
++      atomic_inc_unchecked(&rdtp->dynticks);
+       smp_mb__after_atomic();  /* Force ordering with next sojourn. */
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+-                   atomic_read(&rdtp->dynticks) & 0x1);
++                   atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+       rcu_dynticks_task_enter();
+       /*
+@@ -829,11 +829,11 @@ static void rcu_eqs_exit_common(long long oldval, int user)
+       rcu_dynticks_task_exit();
+       smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
+-      atomic_inc(&rdtp->dynticks);
++      atomic_inc_unchecked(&rdtp->dynticks);
+       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+       smp_mb__after_atomic();  /* See above. */
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+-                   !(atomic_read(&rdtp->dynticks) & 0x1));
++                   !(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+       rcu_cleanup_after_idle();
+       trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
+       if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+@@ -979,12 +979,12 @@ void rcu_nmi_enter(void)
+        * to be in the outermost NMI handler that interrupted an RCU-idle
+        * period (observation due to Andy Lutomirski).
+        */
+-      if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
++      if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
+               smp_mb__before_atomic();  /* Force delay from prior write. */
+-              atomic_inc(&rdtp->dynticks);
++              atomic_inc_unchecked(&rdtp->dynticks);
+               /* atomic_inc() before later RCU read-side crit sects */
+               smp_mb__after_atomic();  /* See above. */
+-              WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++              WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+               incby = 1;
+       }
+       rdtp->dynticks_nmi_nesting += incby;
+@@ -1009,7 +1009,7 @@ void rcu_nmi_exit(void)
+        * to us!)
+        */
+       WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
+-      WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++      WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+       /*
+        * If the nesting level is not 1, the CPU wasn't RCU-idle, so
+@@ -1024,9 +1024,9 @@ void rcu_nmi_exit(void)
+       rdtp->dynticks_nmi_nesting = 0;
+       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+       smp_mb__before_atomic();  /* See above. */
+-      atomic_inc(&rdtp->dynticks);
++      atomic_inc_unchecked(&rdtp->dynticks);
+       smp_mb__after_atomic();  /* Force delay to next write. */
+-      WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
++      WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+ }
+ /**
+@@ -1039,7 +1039,7 @@ void rcu_nmi_exit(void)
+  */
+ bool notrace __rcu_is_watching(void)
+ {
+-      return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
++      return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
+ }
+ /**
+@@ -1122,7 +1122,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
+ static int dyntick_save_progress_counter(struct rcu_data *rdp,
+                                        bool *isidle, unsigned long *maxj)
+ {
+-      rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
++      rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+       rcu_sysidle_check_cpu(rdp, isidle, maxj);
+       if ((rdp->dynticks_snap & 0x1) == 0) {
+               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+@@ -1147,7 +1147,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
+       int *rcrmp;
+       unsigned int snap;
+-      curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
++      curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+       snap = (unsigned int)rdp->dynticks_snap;
+       /*
+@@ -3013,7 +3013,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
+ /*
+  * Do RCU core processing for the current CPU.
+  */
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+       struct rcu_state *rsp;
+@@ -3750,7 +3750,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
+       rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
+       rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
+       WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
+-      WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
++      WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
+       rdp->cpu = cpu;
+       rdp->rsp = rsp;
+       rcu_boot_init_nocb_percpu_data(rdp);
+@@ -3780,8 +3780,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+               init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
+       rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+       rcu_sysidle_init_percpu_data(rdp->dynticks);
+-      atomic_set(&rdp->dynticks->dynticks,
+-                 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
++      atomic_set_unchecked(&rdp->dynticks->dynticks,
++                 (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
+       raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
+       /*
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index f714f87..f49d80b 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -111,11 +111,11 @@ struct rcu_dynticks {
+       long long dynticks_nesting; /* Track irq/process nesting level. */
+                                   /* Process level is worth LLONG_MAX/2. */
+       int dynticks_nmi_nesting;   /* Track NMI nesting level. */
+-      atomic_t dynticks;          /* Even value for idle, else odd. */
++      atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
+ #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+       long long dynticks_idle_nesting;
+                                   /* irq/process nesting level from idle. */
+-      atomic_t dynticks_idle;     /* Even value for idle, else odd. */
++      atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
+                                   /*  "Idle" excludes userspace execution. */
+       unsigned long dynticks_idle_jiffies;
+                                   /* End of last non-NMI non-idle period. */
+@@ -400,9 +400,9 @@ struct rcu_data {
+ #ifdef CONFIG_RCU_FAST_NO_HZ
+       struct rcu_head oom_head;
+ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+-      atomic_long_t exp_workdone1;    /* # done by others #1. */
+-      atomic_long_t exp_workdone2;    /* # done by others #2. */
+-      atomic_long_t exp_workdone3;    /* # done by others #3. */
++      atomic_long_unchecked_t exp_workdone1;  /* # done by others #1. */
++      atomic_long_unchecked_t exp_workdone2;  /* # done by others #2. */
++      atomic_long_unchecked_t exp_workdone3;  /* # done by others #3. */
+       /* 7) Callback offloading. */
+ #ifdef CONFIG_RCU_NOCB_CPU
+@@ -519,8 +519,8 @@ struct rcu_state {
+       struct mutex exp_mutex;                 /* Serialize expedited GP. */
+       struct mutex exp_wake_mutex;            /* Serialize wakeup. */
+       unsigned long expedited_sequence;       /* Take a ticket. */
+-      atomic_long_t expedited_normal;         /* # fallbacks to normal. */
+-      atomic_t expedited_need_qs;             /* # CPUs left to check in. */
++      atomic_long_unchecked_t expedited_normal;/* # fallbacks to normal. */
++      atomic_unchecked_t expedited_need_qs;   /* # CPUs left to check in. */
+       struct swait_queue_head expedited_wq;   /* Wait for check-ins. */
+       int ncpus_snap;                         /* # CPUs seen last time. */
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 6d86ab6..7046dff 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -223,14 +223,14 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
+ }
+ /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
+-static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
++static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_unchecked_t *stat,
+                              unsigned long s)
+ {
+       if (rcu_exp_gp_seq_done(rsp, s)) {
+               trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
+               /* Ensure test happens before caller kfree(). */
+               smp_mb__before_atomic(); /* ^^^ */
+-              atomic_long_inc(stat);
++              atomic_long_inc_unchecked(stat);
+               return true;
+       }
+       return false;
+@@ -359,7 +359,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
+                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+                       if (raw_smp_processor_id() == cpu ||
+-                          !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
++                          !(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
+                               mask_ofl_test |= rdp->grpmask;
+               }
+               mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 0082fce..29572cb 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1174,7 +1174,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+       free_cpumask_var(cm);
+ }
+-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
++static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
+       .store                  = &rcu_cpu_kthread_task,
+       .thread_should_run      = rcu_cpu_kthread_should_run,
+       .thread_fn              = rcu_cpu_kthread,
+@@ -1643,7 +1643,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
+              "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
+              "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
+              ticks_value, ticks_title,
+-             atomic_read(&rdtp->dynticks) & 0xfff,
++             atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
+              rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
+              rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
+              READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
+@@ -2177,8 +2177,8 @@ static int rcu_nocb_kthread(void *arg)
+               }
+               trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+               smp_mb__before_atomic();  /* _add after CB invocation. */
+-              atomic_long_add(-c, &rdp->nocb_q_count);
+-              atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
++              atomic_long_sub(c, &rdp->nocb_q_count);
++              atomic_long_sub(cl, &rdp->nocb_q_count_lazy);
+               rdp->n_nocbs_invoked += c;
+       }
+       return 0;
+@@ -2533,9 +2533,9 @@ static void rcu_sysidle_enter(int irq)
+       j = jiffies;
+       WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
+       smp_mb__before_atomic();
+-      atomic_inc(&rdtp->dynticks_idle);
++      atomic_inc_unchecked(&rdtp->dynticks_idle);
+       smp_mb__after_atomic();
+-      WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
++      WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
+ }
+ /*
+@@ -2606,9 +2606,9 @@ static void rcu_sysidle_exit(int irq)
+       /* Record end of idle period. */
+       smp_mb__before_atomic();
+-      atomic_inc(&rdtp->dynticks_idle);
++      atomic_inc_unchecked(&rdtp->dynticks_idle);
+       smp_mb__after_atomic();
+-      WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
++      WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
+       /*
+        * If we are the timekeeping CPU, we are permitted to be non-idle
+@@ -2654,7 +2654,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+       WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
+       /* Pick up current idle and NMI-nesting counter and check. */
+-      cur = atomic_read(&rdtp->dynticks_idle);
++      cur = atomic_read_unchecked(&rdtp->dynticks_idle);
+       if (cur & 0x1) {
+               *isidle = false; /* We are not idle! */
+               return;
+diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
+index 86782f9a..2e8c0a3 100644
+--- a/kernel/rcu/tree_trace.c
++++ b/kernel/rcu/tree_trace.c
+@@ -124,7 +124,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
+                  rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
+                  rdp->core_needs_qs);
+       seq_printf(m, " dt=%d/%llx/%d df=%lu",
+-                 atomic_read(&rdp->dynticks->dynticks),
++                 atomic_read_unchecked(&rdp->dynticks->dynticks),
+                  rdp->dynticks->dynticks_nesting,
+                  rdp->dynticks->dynticks_nmi_nesting,
+                  rdp->dynticks_fqs);
+@@ -189,14 +189,14 @@ static int show_rcuexp(struct seq_file *m, void *v)
+       for_each_possible_cpu(cpu) {
+               rdp = per_cpu_ptr(rsp->rda, cpu);
+-              s1 += atomic_long_read(&rdp->exp_workdone1);
+-              s2 += atomic_long_read(&rdp->exp_workdone2);
+-              s3 += atomic_long_read(&rdp->exp_workdone3);
++              s1 += atomic_long_read_unchecked(&rdp->exp_workdone1);
++              s2 += atomic_long_read_unchecked(&rdp->exp_workdone2);
++              s3 += atomic_long_read_unchecked(&rdp->exp_workdone3);
+       }
+       seq_printf(m, "s=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+                  rsp->expedited_sequence, s1, s2, s3,
+-                 atomic_long_read(&rsp->expedited_normal),
+-                 atomic_read(&rsp->expedited_need_qs),
++                 atomic_long_read_unchecked(&rsp->expedited_normal),
++                 atomic_read_unchecked(&rsp->expedited_need_qs),
+                  rsp->expedited_sequence / 2);
+       return 0;
+ }
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 9b5f044..b8b0a33 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -84,8 +84,8 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
+ enum { MAX_IORES_LEVEL = 5 };
++static void *r_start(struct seq_file *m, loff_t *pos) __acquires(&resource_lock);
+ static void *r_start(struct seq_file *m, loff_t *pos)
+-      __acquires(resource_lock)
+ {
+       struct resource *p = m->private;
+       loff_t l = 0;
+@@ -95,8 +95,8 @@ static void *r_start(struct seq_file *m, loff_t *pos)
+       return p;
+ }
++static void r_stop(struct seq_file *m, void *v) __releases(&resource_lock);
+ static void r_stop(struct seq_file *m, void *v)
+-      __releases(resource_lock)
+ {
+       read_unlock(&resource_lock);
+ }
+@@ -171,8 +171,18 @@ static const struct file_operations proc_iomem_operations = {
+ static int __init ioresources_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++      proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
++      proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++      proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
++      proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
++#endif
++#else
+       proc_create("ioports", 0, NULL, &proc_ioports_operations);
+       proc_create("iomem", 0, NULL, &proc_iomem_operations);
++#endif
+       return 0;
+ }
+ __initcall(ioresources_init);
+diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
+index a5d966c..9c2d28b 100644
+--- a/kernel/sched/auto_group.c
++++ b/kernel/sched/auto_group.c
+@@ -9,7 +9,7 @@
+ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+ static struct autogroup autogroup_default;
+-static atomic_t autogroup_seq_nr;
++static atomic_unchecked_t autogroup_seq_nr;
+ void __init autogroup_init(struct task_struct *init_task)
+ {
+@@ -77,7 +77,7 @@ static inline struct autogroup *autogroup_create(void)
+       kref_init(&ag->kref);
+       init_rwsem(&ag->lock);
+-      ag->id = atomic_inc_return(&autogroup_seq_nr);
++      ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
+       ag->tg = tg;
+ #ifdef CONFIG_RT_GROUP_SCHED
+       /*
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 44817c6..caeebd2 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2259,7 +2259,7 @@ void set_numabalancing_state(bool enabled)
+ int sysctl_numa_balancing(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table t;
++      ctl_table_no_const t;
+       int err;
+       int state = static_branch_likely(&sched_numa_balancing);
+@@ -2334,7 +2334,7 @@ static void __init init_schedstats(void)
+ int sysctl_schedstats(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table t;
++      ctl_table_no_const t;
+       int err;
+       int state = static_branch_likely(&sched_schedstats);
+@@ -2784,7 +2784,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+ /* rq->lock is NOT held, but preemption is disabled */
+ static void __balance_callback(struct rq *rq)
+ {
+-      struct callback_head *head, *next;
++      struct balance_callback *head, *next;
+       void (*func)(struct rq *rq);
+       unsigned long flags;
+@@ -2792,7 +2792,7 @@ static void __balance_callback(struct rq *rq)
+       head = rq->balance_callback;
+       rq->balance_callback = NULL;
+       while (head) {
+-              func = (void (*)(struct rq *))head->func;
++              func = head->func;
+               next = head->next;
+               head->next = NULL;
+               head = next;
+@@ -3759,6 +3759,8 @@ int can_nice(const struct task_struct *p, const int nice)
+       /* convert nice value [19,-20] to rlimit style value [1,40] */
+       int nice_rlim = nice_to_rlimit(nice);
++      gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
++
+       return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+               capable(CAP_SYS_NICE));
+ }
+@@ -3785,7 +3787,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+       nice = task_nice(current) + increment;
+       nice = clamp_val(nice, MIN_NICE, MAX_NICE);
+-      if (increment < 0 && !can_nice(current, nice))
++      if (increment < 0 && (!can_nice(current, nice) ||
++                            gr_handle_chroot_nice()))
+               return -EPERM;
+       retval = security_task_setnice(current, nice);
+@@ -4095,6 +4098,7 @@ recheck:
+                       if (policy != p->policy && !rlim_rtprio)
+                               return -EPERM;
++                      gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
+                       /* can't increase priority */
+                       if (attr->sched_priority > p->rt_priority &&
+                           attr->sched_priority > rlim_rtprio)
+@@ -7450,6 +7454,14 @@ void __init sched_init(void)
+       for_each_possible_cpu(i) {
+               struct rq *rq;
++#if defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) && defined(CONFIG_X86_64)
++              void *newstack = vzalloc_irq_stack();
++              if (newstack == NULL)
++                      panic("grsec: Unable to allocate irq stack");
++              populate_stack(newstack, IRQ_STACK_SIZE);
++              per_cpu(irq_stack_ptr, i) = newstack + IRQ_STACK_SIZE - 64;
++#endif
++
+               rq = cpu_rq(i);
+               raw_spin_lock_init(&rq->lock);
+               rq->nr_running = 0;
+@@ -7580,7 +7592,7 @@ void __might_sleep(const char *file, int line, int preempt_offset)
+        */
+       WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
+                       "do not call blocking ops when !TASK_RUNNING; "
+-                      "state=%lx set at [<%p>] %pS\n",
++                      "state=%lx set at [<%p>] %pA\n",
+                       current->state,
+                       (void *)current->task_state_change,
+                       (void *)current->task_state_change);
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 1ce8867..0472a49 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -219,8 +219,8 @@ static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
+       return dl_task(prev);
+ }
+-static DEFINE_PER_CPU(struct callback_head, dl_push_head);
+-static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
++static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
++static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
+ static void push_dl_tasks(struct rq *);
+ static void pull_dl_task(struct rq *);
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 2a0a999..dc593c8 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -193,7 +193,7 @@ late_initcall(sched_init_debug);
+ #ifdef CONFIG_SYSCTL
+-static struct ctl_table sd_ctl_dir[] = {
++static ctl_table_no_const sd_ctl_dir[] __read_only = {
+       {
+               .procname       = "sched_domain",
+               .mode           = 0555,
+@@ -210,17 +210,17 @@ static struct ctl_table sd_ctl_root[] = {
+       {}
+ };
+-static struct ctl_table *sd_alloc_ctl_entry(int n)
++static ctl_table_no_const *sd_alloc_ctl_entry(int n)
+ {
+-      struct ctl_table *entry =
++      ctl_table_no_const *entry =
+               kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
+       return entry;
+ }
+-static void sd_free_ctl_entry(struct ctl_table **tablep)
++static void sd_free_ctl_entry(ctl_table_no_const *tablep)
+ {
+-      struct ctl_table *entry;
++      ctl_table_no_const *entry;
+       /*
+        * In the intermediate directories, both the child directory and
+@@ -228,22 +228,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+        * will always be set. In the lowest directory the names are
+        * static strings and all have proc handlers.
+        */
+-      for (entry = *tablep; entry->mode; entry++) {
+-              if (entry->child)
+-                      sd_free_ctl_entry(&entry->child);
++      for (entry = tablep; entry->mode; entry++) {
++              if (entry->child) {
++                      sd_free_ctl_entry(entry->child);
++                      pax_open_kernel();
++                      entry->child = NULL;
++                      pax_close_kernel();
++              }
+               if (entry->proc_handler == NULL)
+                       kfree(entry->procname);
+       }
+-      kfree(*tablep);
+-      *tablep = NULL;
++      kfree(tablep);
+ }
+ static int min_load_idx = 0;
+ static int max_load_idx = CPU_LOAD_IDX_MAX-1;
+ static void
+-set_table_entry(struct ctl_table *entry,
++set_table_entry(ctl_table_no_const *entry,
+               const char *procname, void *data, int maxlen,
+               umode_t mode, proc_handler *proc_handler,
+               bool load_idx)
+@@ -260,10 +263,10 @@ set_table_entry(struct ctl_table *entry,
+       }
+ }
+-static struct ctl_table *
++static ctl_table_no_const *
+ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ {
+-      struct ctl_table *table = sd_alloc_ctl_entry(14);
++      ctl_table_no_const *table = sd_alloc_ctl_entry(14);
+       if (table == NULL)
+               return NULL;
+@@ -301,9 +304,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+       return table;
+ }
+-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
+ {
+-      struct ctl_table *entry, *table;
++      ctl_table_no_const *entry, *table;
+       struct sched_domain *sd;
+       int domain_num = 0, i;
+       char buf[32];
+@@ -330,11 +333,13 @@ static struct ctl_table_header *sd_sysctl_header;
+ void register_sched_domain_sysctl(void)
+ {
+       int i, cpu_num = num_possible_cpus();
+-      struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
++      ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
+       char buf[32];
+       WARN_ON(sd_ctl_dir[0].child);
++      pax_open_kernel();
+       sd_ctl_dir[0].child = entry;
++      pax_close_kernel();
+       if (entry == NULL)
+               return;
+@@ -356,8 +361,12 @@ void unregister_sched_domain_sysctl(void)
+ {
+       unregister_sysctl_table(sd_sysctl_header);
+       sd_sysctl_header = NULL;
+-      if (sd_ctl_dir[0].child)
+-              sd_free_ctl_entry(&sd_ctl_dir[0].child);
++      if (sd_ctl_dir[0].child) {
++              sd_free_ctl_entry(sd_ctl_dir[0].child);
++              pax_open_kernel();
++              sd_ctl_dir[0].child = NULL;
++              pax_close_kernel();
++      }
+ }
+ #endif /* CONFIG_SYSCTL */
+ #endif /* CONFIG_SMP */
+@@ -801,7 +810,11 @@ static int __init init_sched_debug_procfs(void)
+ {
+       struct proc_dir_entry *pe;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      pe = proc_create("sched_debug", 0400, NULL, &sched_debug_fops);
++#else
+       pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
++#endif
+       if (!pe)
+               return -ENOMEM;
+       return 0;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 8b3610c..94bbee3 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -8305,7 +8305,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
+  * run_rebalance_domains is triggered when needed from the scheduler tick.
+  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
+  */
+-static void run_rebalance_domains(struct softirq_action *h)
++static __latent_entropy void run_rebalance_domains(void)
+ {
+       struct rq *this_rq = this_rq();
+       enum cpu_idle_type idle = this_rq->idle_balance ?
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index d5690b7..40d1c85 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -362,8 +362,8 @@ static inline int has_pushable_tasks(struct rq *rq)
+       return !plist_head_empty(&rq->rt.pushable_tasks);
+ }
+-static DEFINE_PER_CPU(struct callback_head, rt_push_head);
+-static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
++static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
++static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
+ static void push_rt_tasks(struct rq *);
+ static void pull_rt_task(struct rq *);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index c64fc51..d12559e 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -642,7 +642,10 @@ struct rq {
+       unsigned long cpu_capacity;
+       unsigned long cpu_capacity_orig;
+-      struct callback_head *balance_callback;
++      struct balance_callback {
++              struct balance_callback *next;
++              void (*func)(struct rq *rq);
++      } *balance_callback;
+       unsigned char idle_balance;
+       /* For active balancing */
+@@ -788,7 +791,7 @@ extern int migrate_swap(struct task_struct *, struct task_struct *);
+ static inline void
+ queue_balance_callback(struct rq *rq,
+-                     struct callback_head *head,
++                     struct balance_callback *head,
+                      void (*func)(struct rq *rq))
+ {
+       lockdep_assert_held(&rq->lock);
+@@ -796,7 +799,7 @@ queue_balance_callback(struct rq *rq,
+       if (unlikely(head->next))
+               return;
+-      head->func = (void (*)(struct callback_head *))func;
++      head->func = func;
+       head->next = rq->balance_callback;
+       rq->balance_callback = head;
+ }
+@@ -1253,7 +1256,7 @@ struct sched_class {
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+       void (*task_change_group) (struct task_struct *p, int type);
+ #endif
+-};
++} __do_const;
+ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
+ {
+@@ -1323,7 +1326,7 @@ extern struct dl_bandwidth def_dl_bandwidth;
+ extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
+ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
+-unsigned long to_ratio(u64 period, u64 runtime);
++unsigned long __attribute_const__ to_ratio(u64 period, u64 runtime);
+ extern void init_entity_runnable_average(struct sched_entity *se);
+ extern void post_init_entity_util_avg(struct sched_entity *se);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index af21afc..bc14d32 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
+ int print_fatal_signals __read_mostly;
+-static void __user *sig_handler(struct task_struct *t, int sig)
++static __sighandler_t sig_handler(struct task_struct *t, int sig)
+ {
+       return t->sighand->action[sig - 1].sa.sa_handler;
+ }
+-static int sig_handler_ignored(void __user *handler, int sig)
++static int sig_handler_ignored(__sighandler_t handler, int sig)
+ {
+       /* Is it explicitly or implicitly ignored? */
+       return handler == SIG_IGN ||
+@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
+ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
+ {
+-      void __user *handler;
++      __sighandler_t handler;
+       handler = sig_handler(t, sig);
+@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+       atomic_inc(&user->sigpending);
+       rcu_read_unlock();
++      if (!override_rlimit)
++              gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
++
+       if (override_rlimit ||
+           atomic_read(&user->sigpending) <=
+                       task_rlimit(t, RLIMIT_SIGPENDING)) {
+@@ -494,7 +497,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ int unhandled_signal(struct task_struct *tsk, int sig)
+ {
+-      void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
++      __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
+       if (is_global_init(tsk))
+               return 1;
+       if (handler != SIG_IGN && handler != SIG_DFL)
+@@ -556,6 +559,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
+  *
+  * All callers have to hold the siglock.
+  */
++int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) __must_hold(&tsk->sighand->siglock);
+ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ {
+       int signr;
+@@ -742,6 +746,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
+               }
+       }
++      /* allow glibc communication via tgkill to other threads in our
++         thread group */
++      if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
++           sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
++          && gr_handle_signal(t, sig))
++              return -EPERM;
++
+       return security_task_kill(t, info, sig, 0);
+ }
+@@ -1125,7 +1136,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+       return send_signal(sig, info, p, 1);
+ }
+-static int
++int
+ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+       return send_signal(sig, info, t, 0);
+@@ -1162,6 +1173,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+       unsigned long int flags;
+       int ret, blocked, ignored;
+       struct k_sigaction *action;
++      int is_unhandled = 0;
+       spin_lock_irqsave(&t->sighand->siglock, flags);
+       action = &t->sighand->action[sig-1];
+@@ -1176,9 +1188,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+       }
+       if (action->sa.sa_handler == SIG_DFL)
+               t->signal->flags &= ~SIGNAL_UNKILLABLE;
++      if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
++              is_unhandled = 1;
+       ret = specific_send_sig_info(sig, info, t);
+       spin_unlock_irqrestore(&t->sighand->siglock, flags);
++      /* only deal with unhandled signals, java etc trigger SIGSEGV during
++         normal operation */
++      if (is_unhandled) {
++              gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
++              gr_handle_crash(t, sig);
++      }
++
+       return ret;
+ }
+@@ -1259,8 +1280,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+       ret = check_kill_permission(sig, info, p);
+       rcu_read_unlock();
+-      if (!ret && sig)
++      if (!ret && sig) {
+               ret = do_send_sig_info(sig, info, p, true);
++              if (!ret)
++                      gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
++      }
+       return ret;
+ }
+@@ -1774,9 +1798,8 @@ static int sigkill_pending(struct task_struct *tsk)
+  * If we actually decide not to stop at all because the tracer
+  * is gone, we keep current->exit_code unless clear_code.
+  */
++static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) __must_hold(&current->sighand->siglock);
+ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+-      __releases(&current->sighand->siglock)
+-      __acquires(&current->sighand->siglock)
+ {
+       bool gstop_done = false;
+@@ -1896,6 +1919,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+       recalc_sigpending_tsk(current);
+ }
++static void ptrace_do_notify(int signr, int exit_code, int why) __must_hold(&current->sighand->siglock);
+ static void ptrace_do_notify(int signr, int exit_code, int why)
+ {
+       siginfo_t info;
+@@ -1943,8 +1967,8 @@ void ptrace_notify(int exit_code)
+  * %false if group stop is already cancelled or ptrace trap is scheduled.
+  * %true if participated in group stop.
+  */
++static bool do_signal_stop(int signr) __releases(&current->sighand->siglock);
+ static bool do_signal_stop(int signr)
+-      __releases(&current->sighand->siglock)
+ {
+       struct signal_struct *sig = current->signal;
+@@ -1956,8 +1980,10 @@ static bool do_signal_stop(int signr)
+               WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
+               if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
+-                  unlikely(signal_group_exit(sig)))
++                  unlikely(signal_group_exit(sig))) {
++                      __release(&current->sighand->siglock); // XXX sparse can't model conditional release
+                       return false;
++              }
+               /*
+                * There is no group stop already in progress.  We must
+                * initiate one now.
+@@ -2041,6 +2067,7 @@ static bool do_signal_stop(int signr)
+                * Schedule it and let the caller deal with it.
+                */
+               task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
++              __release(&current->sighand->siglock); // XXX sparse can't model conditional release
+               return false;
+       }
+ }
+@@ -2864,7 +2891,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+       int error = -ESRCH;
+       rcu_read_lock();
+-      p = find_task_by_vpid(pid);
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++      /* allow glibc communication via tgkill to other threads in our
++         thread group */
++      if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
++          sig == (SIGRTMIN+1) && tgid == info->si_pid)            
++              p = find_task_by_vpid_unrestricted(pid);
++      else
++#endif
++              p = find_task_by_vpid(pid);
+       if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
+               error = check_kill_permission(sig, info, p);
+               /*
+@@ -3196,8 +3231,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
+       }
+       seg = get_fs();
+       set_fs(KERNEL_DS);
+-      ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
+-                           (stack_t __force __user *) &uoss,
++      ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
++                           (stack_t __force_user *) &uoss,
+                            compat_user_stack_pointer());
+       set_fs(seg);
+       if (ret >= 0 && uoss_ptr)  {
+@@ -3481,7 +3516,7 @@ SYSCALL_DEFINE1(ssetmask, int, newmask)
+ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
+ {
+       struct k_sigaction new_sa, old_sa;
+-      int ret;
++      long ret;
+       new_sa.sa.sa_handler = handler;
+       new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
+@@ -3489,7 +3524,7 @@ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
+       ret = do_sigaction(sig, &new_sa, &old_sa);
+-      return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
++      return ret ? ret : (long)old_sa.sa.sa_handler;
+ }
+ #endif /* __ARCH_WANT_SYS_SIGNAL */
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 3aa642d..3200019 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -573,7 +573,7 @@ void __init smp_init(void)
+  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
+  * of local_irq_disable/enable().
+  */
+-int on_each_cpu(void (*func) (void *info), void *info, int wait)
++int on_each_cpu(smp_call_func_t func, void *info, int wait)
+ {
+       unsigned long flags;
+       int ret = 0;
+diff --git a/kernel/smpboot.c b/kernel/smpboot.c
+index 13bc43d..e7068a2 100644
+--- a/kernel/smpboot.c
++++ b/kernel/smpboot.c
+@@ -13,6 +13,7 @@
+ #include <linux/percpu.h>
+ #include <linux/kthread.h>
+ #include <linux/smpboot.h>
++#include <asm/pgtable.h>
+ #include "smpboot.h"
+@@ -303,7 +304,7 @@ int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_threa
+               if (cpumask_test_cpu(cpu, cpumask))
+                       smpboot_unpark_thread(plug_thread, cpu);
+       }
+-      list_add(&plug_thread->list, &hotplug_threads);
++      pax_list_add(&plug_thread->list, &hotplug_threads);
+ out:
+       mutex_unlock(&smpboot_threads_lock);
+       put_online_cpus();
+@@ -321,7 +322,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
+ {
+       get_online_cpus();
+       mutex_lock(&smpboot_threads_lock);
+-      list_del(&plug_thread->list);
++      pax_list_del(&plug_thread->list);
+       smpboot_destroy_threads(plug_thread);
+       mutex_unlock(&smpboot_threads_lock);
+       put_online_cpus();
+@@ -361,7 +362,9 @@ int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+               smpboot_unpark_thread(plug_thread, cpu);
++      pax_open_kernel();
+       cpumask_copy(old, new);
++      pax_close_kernel();
+       mutex_unlock(&smpboot_threads_lock);
+       put_online_cpus();
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 17caf4b..2e68ae7 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
+ EXPORT_SYMBOL(irq_stat);
+ #endif
+-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
++static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+@@ -270,7 +270,7 @@ restart:
+               kstat_incr_softirqs_this_cpu(vec_nr);
+               trace_softirq_entry(vec_nr);
+-              h->action(h);
++              h->action();
+               trace_softirq_exit(vec_nr);
+               if (unlikely(prev_count != preempt_count())) {
+                       pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
+       or_softirq_pending(1UL << nr);
+ }
+-void open_softirq(int nr, void (*action)(struct softirq_action *))
++void __init open_softirq(int nr, void (*action)(void))
+ {
+       softirq_vec[nr].action = action;
+ }
+@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+-static void tasklet_action(struct softirq_action *a)
++static __latent_entropy void tasklet_action(void)
+ {
+       struct tasklet_struct *list;
+@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
+       }
+ }
+-static void tasklet_hi_action(struct softirq_action *a)
++static __latent_entropy void tasklet_hi_action(void)
+ {
+       struct tasklet_struct *list;
+@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
+       .notifier_call = cpu_callback
+ };
+-static struct smp_hotplug_thread softirq_threads = {
++static struct smp_hotplug_thread softirq_threads __read_only = {
+       .store                  = &ksoftirqd,
+       .thread_should_run      = ksoftirqd_should_run,
+       .thread_fn              = run_ksoftirqd,
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 4a1ca5f..98ccb56 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -509,7 +509,7 @@ void stop_machine_unpark(int cpu)
+       kthread_unpark(stopper->thread);
+ }
+-static struct smp_hotplug_thread cpu_stop_threads = {
++static struct smp_hotplug_thread cpu_stop_threads __read_only = {
+       .store                  = &cpu_stopper.thread,
+       .thread_should_run      = cpu_stop_should_run,
+       .thread_fn              = cpu_stopper_thread,
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 89d5be4..441bef3 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
+               error = -EACCES;
+               goto out;
+       }
++
++      if (gr_handle_chroot_setpriority(p, niceval)) {
++              error = -EACCES;
++              goto out;
++      }
++
+       no_nice = security_task_setnice(p, niceval);
+       if (no_nice) {
+               error = no_nice;
+@@ -366,6 +372,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+                       goto error;
+       }
++      if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
++              goto error;
++
++      if (!gid_eq(new->gid, old->gid)) {
++              /* make sure we generate a learn log for what will
++                 end up being a role transition after a full-learning
++                 policy is generated
++                 CAP_SETGID is required to perform a transition
++                 we may not log a CAP_SETGID check above, e.g.
++                 in the case where new rgid = old egid
++              */
++              gr_learn_cap(current, new, CAP_SETGID, true);
++      }
++
+       if (rgid != (gid_t) -1 ||
+           (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
+               new->sgid = new->egid;
+@@ -401,6 +421,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+       old = current_cred();
+       retval = -EPERM;
++
++      if (gr_check_group_change(kgid, kgid, kgid))
++              goto error;
++
+       if (ns_capable(old->user_ns, CAP_SETGID))
+               new->gid = new->egid = new->sgid = new->fsgid = kgid;
+       else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
+@@ -418,7 +442,7 @@ error:
+ /*
+  * change the user struct in a credentials set to match the new UID
+  */
+-static int set_user(struct cred *new)
++int set_user(struct cred *new)
+ {
+       struct user_struct *new_user;
+@@ -498,7 +522,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+                       goto error;
+       }
++      if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
++              goto error;
++
+       if (!uid_eq(new->uid, old->uid)) {
++              /* make sure we generate a learn log for what will
++                 end up being a role transition after a full-learning
++                 policy is generated
++                 CAP_SETUID is required to perform a transition
++                 we may not log a CAP_SETUID check above, e.g.
++                 in the case where new ruid = old euid
++              */
++              gr_learn_cap(current, new, CAP_SETUID, true);
+               retval = set_user(new);
+               if (retval < 0)
+                       goto error;
+@@ -548,6 +583,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+       old = current_cred();
+       retval = -EPERM;
++
++      if (gr_check_crash_uid(kuid))
++              goto error;
++      if (gr_check_user_change(kuid, kuid, kuid))
++              goto error;
++
+       if (ns_capable(old->user_ns, CAP_SETUID)) {
+               new->suid = new->uid = kuid;
+               if (!uid_eq(kuid, old->uid)) {
+@@ -617,6 +658,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+                       goto error;
+       }
++      if (gr_check_user_change(kruid, keuid, INVALID_UID))
++              goto error;
++
+       if (ruid != (uid_t) -1) {
+               new->uid = kruid;
+               if (!uid_eq(kruid, old->uid)) {
+@@ -701,6 +745,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+                       goto error;
+       }
++      if (gr_check_group_change(krgid, kegid, INVALID_GID))
++              goto error;
++
+       if (rgid != (gid_t) -1)
+               new->gid = krgid;
+       if (egid != (gid_t) -1)
+@@ -765,12 +812,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+           uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
+           ns_capable(old->user_ns, CAP_SETUID)) {
+               if (!uid_eq(kuid, old->fsuid)) {
++                      if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
++                              goto error;
++
+                       new->fsuid = kuid;
+                       if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
+                               goto change_okay;
+               }
+       }
++error:
+       abort_creds(new);
+       return old_fsuid;
+@@ -803,12 +854,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+       if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
+           gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
+           ns_capable(old->user_ns, CAP_SETGID)) {
++              if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
++                      goto error;
++
+               if (!gid_eq(kgid, old->fsgid)) {
+                       new->fsgid = kgid;
+                       goto change_okay;
+               }
+       }
++error:
+       abort_creds(new);
+       return old_fsgid;
+@@ -1187,19 +1242,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+               return -EFAULT;
+       down_read(&uts_sem);
+-      error = __copy_to_user(&name->sysname, &utsname()->sysname,
++      error = __copy_to_user(name->sysname, &utsname()->sysname,
+                              __OLD_UTS_LEN);
+       error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->nodename, &utsname()->nodename,
++      error |= __copy_to_user(name->nodename, &utsname()->nodename,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->release, &utsname()->release,
++      error |= __copy_to_user(name->release, &utsname()->release,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->release + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->version, &utsname()->version,
++      error |= __copy_to_user(name->version, &utsname()->version,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->version + __OLD_UTS_LEN);
+-      error |= __copy_to_user(&name->machine, &utsname()->machine,
++      error |= __copy_to_user(name->machine, &utsname()->machine,
+                               __OLD_UTS_LEN);
+       error |= __put_user(0, name->machine + __OLD_UTS_LEN);
+       up_read(&uts_sem);
+@@ -1400,6 +1455,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
+                        */
+                       new_rlim->rlim_cur = 1;
+               }
++              /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
++                 is changed to a lower value.  Since tasks can be created by the same
++                 user in between this limit change and an execve by this task, force
++                 a recheck only for this task by setting PF_NPROC_EXCEEDED
++              */
++              if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
++                      tsk->flags |= PF_NPROC_EXCEEDED;
+       }
+       if (!retval) {
+               if (old_rlim)
+diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
+index 2c5e3a8..301fb1a 100644
+--- a/kernel/sys_ni.c
++++ b/kernel/sys_ni.c
+@@ -6,12 +6,12 @@
+ /*  we can't #include <linux/syscalls.h> here,
+     but tell gcc to not warn with -Wmissing-prototypes  */
+-asmlinkage long sys_ni_syscall(void);
++asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+ /*
+  * Non-implemented system calls get redirected here.
+  */
+-asmlinkage long sys_ni_syscall(void)
++asmlinkage long sys_ni_syscall(unsigned long a, unsigned long b, unsigned long c, unsigned long d, unsigned long e, unsigned long f)
+ {
+       return -ENOSYS;
+ }
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index a13bbda..745603f 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -95,7 +95,6 @@
+ #endif
+ #if defined(CONFIG_SYSCTL)
+-
+ /* External variables not in a header file. */
+ extern int suid_dumpable;
+ #ifdef CONFIG_COREDUMP
+@@ -112,23 +111,25 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
+ #ifndef CONFIG_MMU
+ extern int sysctl_nr_trim_pages;
+ #endif
++extern int sysctl_modify_ldt;
+ /* Constants used for minimum and  maximum */
+ #ifdef CONFIG_LOCKUP_DETECTOR
+-static int sixty = 60;
++static int sixty __read_only = 60;
+ #endif
+-static int __maybe_unused neg_one = -1;
++static int __maybe_unused neg_one __read_only = -1;
+-static int zero;
+-static int __maybe_unused one = 1;
+-static int __maybe_unused two = 2;
+-static int __maybe_unused four = 4;
+-static unsigned long one_ul = 1;
+-static int one_hundred = 100;
+-static int one_thousand = 1000;
++static int zero __read_only = 0;
++static int __maybe_unused one __read_only = 1;
++static int __maybe_unused two __read_only = 2;
++static int __maybe_unused three __read_only = 3;
++static int __maybe_unused four __read_only = 4;
++static unsigned long one_ul __read_only = 1;
++static int one_hundred __read_only = 100;
++static int one_thousand __read_only = 1000;
+ #ifdef CONFIG_PRINTK
+-static int ten_thousand = 10000;
++static int ten_thousand __read_only = 10000;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+ static int six_hundred_forty_kb = 640 * 1024;
+@@ -185,10 +186,8 @@ static int proc_taint(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos);
+ #endif
+-#ifdef CONFIG_PRINTK
+-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos);
+-#endif
+ static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp, loff_t *ppos);
+@@ -219,6 +218,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
+ #endif
++extern struct ctl_table grsecurity_table[];
++
+ static struct ctl_table kern_table[];
+ static struct ctl_table vm_table[];
+ static struct ctl_table fs_table[];
+@@ -233,6 +234,20 @@ extern struct ctl_table epoll_table[];
+ int sysctl_legacy_va_layout;
+ #endif
++#ifdef CONFIG_PAX_SOFTMODE
++static struct ctl_table pax_table[] = {
++      {
++              .procname       = "softmode",
++              .data           = &pax_softmode,
++              .maxlen         = sizeof(unsigned int),
++              .mode           = 0600,
++              .proc_handler   = &proc_dointvec,
++      },
++
++      { }
++};
++#endif
++
+ /* The default sysctl tables: */
+ static struct ctl_table sysctl_base_table[] = {
+@@ -281,6 +296,22 @@ static int max_extfrag_threshold = 1000;
+ #endif
+ static struct ctl_table kern_table[] = {
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++      {
++              .procname       = "grsecurity",
++              .mode           = 0500,
++              .child          = grsecurity_table,
++      },
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++      {
++              .procname       = "pax",
++              .mode           = 0500,
++              .child          = pax_table,
++      },
++#endif
++
+       {
+               .procname       = "sched_child_runs_first",
+               .data           = &sysctl_sched_child_runs_first,
+@@ -644,7 +675,7 @@ static struct ctl_table kern_table[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               /* only handle a transition from default "0" to "1" */
+-              .proc_handler   = proc_dointvec_minmax,
++              .proc_handler   = proc_dointvec_minmax_secure,
+               .extra1         = &one,
+               .extra2         = &one,
+       },
+@@ -655,7 +686,7 @@ static struct ctl_table kern_table[] = {
+               .data           = &modprobe_path,
+               .maxlen         = KMOD_PATH_LEN,
+               .mode           = 0644,
+-              .proc_handler   = proc_dostring,
++              .proc_handler   = proc_dostring_modpriv,
+       },
+       {
+               .procname       = "modules_disabled",
+@@ -663,7 +694,7 @@ static struct ctl_table kern_table[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               /* only handle a transition from default "0" to "1" */
+-              .proc_handler   = proc_dointvec_minmax,
++              .proc_handler   = proc_dointvec_minmax_secure,
+               .extra1         = &one,
+               .extra2         = &one,
+       },
+@@ -825,20 +856,24 @@ static struct ctl_table kern_table[] = {
+               .data           = &dmesg_restrict,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = proc_dointvec_minmax_sysadmin,
++              .proc_handler   = proc_dointvec_minmax_secure_sysadmin,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
++#endif
+       {
+               .procname       = "kptr_restrict",
+               .data           = &kptr_restrict,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = proc_dointvec_minmax_sysadmin,
++              .proc_handler   = proc_dointvec_minmax_secure_sysadmin,
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              .extra1         = &one,
++#else
+               .extra1         = &zero,
++#endif
+               .extra2         = &two,
+       },
+-#endif
+       {
+               .procname       = "ngroups_max",
+               .data           = &ngroups_max,
+@@ -1003,6 +1038,17 @@ static struct ctl_table kern_table[] = {
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
++#ifdef CONFIG_MODIFY_LDT_SYSCALL
++      {
++              .procname       = "modify_ldt",
++              .data           = &sysctl_modify_ldt,
++              .maxlen         = sizeof(int),
++              .mode           = 0644,
++              .proc_handler   = proc_dointvec_minmax_secure_sysadmin,
++              .extra1         = &zero,
++              .extra2         = &one,
++      },
++#endif
+ #endif
+ #if defined(CONFIG_MMU)
+       {
+@@ -1125,10 +1171,17 @@ static struct ctl_table kern_table[] = {
+        */
+       {
+               .procname       = "perf_event_paranoid",
+-              .data           = &sysctl_perf_event_paranoid,
+-              .maxlen         = sizeof(sysctl_perf_event_paranoid),
++              .data           = &sysctl_perf_event_legitimately_concerned,
++              .maxlen         = sizeof(sysctl_perf_event_legitimately_concerned),
+               .mode           = 0644,
+-              .proc_handler   = proc_dointvec,
++              /* go ahead, be a hero */
++              .proc_handler   = proc_dointvec_minmax_secure_sysadmin,
++              .extra1         = &neg_one,
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++              .extra2         = &three,
++#else
++              .extra2         = &two,
++#endif
+       },
+       {
+               .procname       = "perf_event_mlock_kb",
+@@ -1469,6 +1522,13 @@ static struct ctl_table vm_table[] = {
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+       },
++      {
++              .procname       = "heap_stack_gap",
++              .data           = &sysctl_heap_stack_gap,
++              .maxlen         = sizeof(sysctl_heap_stack_gap),
++              .mode           = 0644,
++              .proc_handler   = proc_doulongvec_minmax,
++      },
+ #else
+       {
+               .procname       = "nr_trim_pages",
+@@ -1988,6 +2048,16 @@ int proc_dostring(struct ctl_table *table, int write,
+                              (char __user *)buffer, lenp, ppos);
+ }
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++                void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++      if (write && !capable(CAP_SYS_MODULE))
++              return -EPERM;
++
++      return _proc_do_string(table->data, table->maxlen, write,
++                             buffer, lenp, ppos);
++}
++
+ static size_t proc_skip_spaces(char **buf)
+ {
+       size_t ret;
+@@ -2093,6 +2163,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
+       len = strlen(tmp);
+       if (len > *size)
+               len = *size;
++      if (len > sizeof(tmp))
++              len = sizeof(tmp);
+       if (copy_to_user(*buf, tmp, len))
+               return -EFAULT;
+       *size -= len;
+@@ -2297,6 +2369,44 @@ int proc_douintvec(struct ctl_table *table, int write,
+                               do_proc_douintvec_conv, NULL);
+ }
++static int do_proc_dointvec_conv_secure(bool *negp, unsigned long *lvalp,
++                               int *valp,
++                               int write, void *data)
++{
++      if (write) {
++              if (*negp) {
++                      if (*lvalp > (unsigned long) INT_MAX + 1)
++                              return -EINVAL;
++                      pax_open_kernel();
++                      *valp = -*lvalp;
++                      pax_close_kernel();
++              } else {
++                      if (*lvalp > (unsigned long) INT_MAX)
++                              return -EINVAL;
++                      pax_open_kernel();
++                      *valp = *lvalp;
++                      pax_close_kernel();
++              }
++      } else {
++              int val = *valp;
++              if (val < 0) {
++                      *negp = true;
++                      *lvalp = -(unsigned long)val;
++              } else {
++                      *negp = false;
++                      *lvalp = (unsigned long)val;
++              }
++      }
++      return 0;
++}
++
++int proc_dointvec_secure(struct ctl_table *table, int write,
++                   void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++    return do_proc_dointvec(table,write,buffer,lenp,ppos,
++                          do_proc_dointvec_conv_secure,NULL);
++}
++
+ /*
+  * Taint values can only be increased
+  * This means we can safely use a temporary.
+@@ -2304,7 +2414,7 @@ int proc_douintvec(struct ctl_table *table, int write,
+ static int proc_taint(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table t;
++      ctl_table_no_const t;
+       unsigned long tmptaint = get_taint();
+       int err;
+@@ -2332,16 +2442,14 @@ static int proc_taint(struct ctl_table *table, int write,
+       return err;
+ }
+-#ifdef CONFIG_PRINTK
+-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+       if (write && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+-      return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++      return proc_dointvec_minmax_secure(table, write, buffer, lenp, ppos);
+ }
+-#endif
+ struct do_proc_dointvec_minmax_conv_param {
+       int *min;
+@@ -2372,6 +2480,32 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
+       return 0;
+ }
++static int do_proc_dointvec_minmax_conv_secure(bool *negp, unsigned long *lvalp,
++                                      int *valp,
++                                      int write, void *data)
++{
++      struct do_proc_dointvec_minmax_conv_param *param = data;
++      if (write) {
++              int val = *negp ? -*lvalp : *lvalp;
++              if ((param->min && *param->min > val) ||
++                  (param->max && *param->max < val))
++                      return -EINVAL;
++              pax_open_kernel();
++              *valp = val;
++              pax_close_kernel();
++      } else {
++              int val = *valp;
++              if (val < 0) {
++                      *negp = true;
++                      *lvalp = -(unsigned long)val;
++              } else {
++                      *negp = false;
++                      *lvalp = (unsigned long)val;
++              }
++      }
++      return 0;
++}
++
+ /**
+  * proc_dointvec_minmax - read a vector of integers with min/max values
+  * @table: the sysctl table
+@@ -2399,6 +2533,17 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
+                               do_proc_dointvec_minmax_conv, &param);
+ }
++int proc_dointvec_minmax_secure(struct ctl_table *table, int write,
++                void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++      struct do_proc_dointvec_minmax_conv_param param = {
++              .min = (int *) table->extra1,
++              .max = (int *) table->extra2,
++      };
++      return do_proc_dointvec(table, write, buffer, lenp, ppos,
++                              do_proc_dointvec_minmax_conv_secure, &param);
++}
++
+ static void validate_coredump_safety(void)
+ {
+ #ifdef CONFIG_COREDUMP
+@@ -2886,6 +3031,12 @@ int proc_dostring(struct ctl_table *table, int write,
+       return -ENOSYS;
+ }
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++                void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++      return -ENOSYS;
++}
++
+ int proc_dointvec(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -2949,5 +3100,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
+ EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
+ EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
+ EXPORT_SYMBOL(proc_dostring);
++EXPORT_SYMBOL(proc_dostring_modpriv);
+ EXPORT_SYMBOL(proc_doulongvec_minmax);
+ EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
+diff --git a/kernel/taskstats.c b/kernel/taskstats.c
+index b3f05ee..b1b5044 100644
+--- a/kernel/taskstats.c
++++ b/kernel/taskstats.c
+@@ -28,9 +28,12 @@
+ #include <linux/fs.h>
+ #include <linux/file.h>
+ #include <linux/pid_namespace.h>
++#include <linux/grsecurity.h>
+ #include <net/genetlink.h>
+ #include <linux/atomic.h>
++extern int gr_is_taskstats_denied(int pid);
++
+ /*
+  * Maximum length of a cpumask that can be specified in
+  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
+@@ -540,6 +543,9 @@ err:
+ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+ {
++      if (gr_is_taskstats_denied(current->pid))
++              return -EACCES;
++
+       if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
+               return cmd_attr_register_cpumask(info);
+       else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index c3aad68..c4b87f3 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -522,7 +522,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
+  *
+  * Provides the underlying alarm base time.
+  */
+-static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
++static int alarm_clock_get(const clockid_t which_clock, struct timespec *tp)
+ {
+       struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
+@@ -836,7 +836,7 @@ static int __init alarmtimer_init(void)
+       struct platform_device *pdev;
+       int error = 0;
+       int i;
+-      struct k_clock alarm_clock = {
++      static struct k_clock alarm_clock = {
+               .clock_getres   = alarm_clock_getres,
+               .clock_get      = alarm_clock_get,
+               .timer_create   = alarm_timer_create,
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index 9cff0ab..0e69c94 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -273,7 +273,7 @@ static void put_clock_desc(struct posix_clock_desc *cd)
+       fput(cd->fp);
+ }
+-static int pc_clock_adjtime(clockid_t id, struct timex *tx)
++static int pc_clock_adjtime(const clockid_t id, struct timex *tx)
+ {
+       struct posix_clock_desc cd;
+       int err;
+@@ -297,7 +297,7 @@ out:
+       return err;
+ }
+-static int pc_clock_gettime(clockid_t id, struct timespec *ts)
++static int pc_clock_gettime(const clockid_t id, struct timespec *ts)
+ {
+       struct posix_clock_desc cd;
+       int err;
+@@ -316,7 +316,7 @@ static int pc_clock_gettime(clockid_t id, struct timespec *ts)
+       return err;
+ }
+-static int pc_clock_getres(clockid_t id, struct timespec *ts)
++static int pc_clock_getres(const clockid_t id, struct timespec *ts)
+ {
+       struct posix_clock_desc cd;
+       int err;
+@@ -335,7 +335,7 @@ static int pc_clock_getres(clockid_t id, struct timespec *ts)
+       return err;
+ }
+-static int pc_clock_settime(clockid_t id, const struct timespec *ts)
++static int pc_clock_settime(const clockid_t id, const struct timespec *ts)
+ {
+       struct posix_clock_desc cd;
+       int err;
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 39008d7..0a60468 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -1468,14 +1468,14 @@ struct k_clock clock_posix_cpu = {
+ static __init int init_posix_cpu_timers(void)
+ {
+-      struct k_clock process = {
++      static struct k_clock process = {
+               .clock_getres   = process_cpu_clock_getres,
+               .clock_get      = process_cpu_clock_get,
+               .timer_create   = process_cpu_timer_create,
+               .nsleep         = process_cpu_nsleep,
+               .nsleep_restart = process_cpu_nsleep_restart,
+       };
+-      struct k_clock thread = {
++      static struct k_clock thread = {
+               .clock_getres   = thread_cpu_clock_getres,
+               .clock_get      = thread_cpu_clock_get,
+               .timer_create   = thread_cpu_timer_create,
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index f2826c3..7e6663a 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -43,6 +43,7 @@
+ #include <linux/hash.h>
+ #include <linux/posix-clock.h>
+ #include <linux/posix-timers.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscalls.h>
+ #include <linux/wait.h>
+ #include <linux/workqueue.h>
+@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
+  *        which we beg off on and pass to do_sys_settimeofday().
+  */
+-static struct k_clock posix_clocks[MAX_CLOCKS];
++static struct k_clock *posix_clocks[MAX_CLOCKS];
+ /*
+  * These ones are defined below.
+@@ -203,7 +204,7 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
+ }
+ /* Get clock_realtime */
+-static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
++static int posix_clock_realtime_get(const clockid_t which_clock, struct timespec *tp)
+ {
+       ktime_get_real_ts(tp);
+       return 0;
+@@ -225,7 +226,7 @@ static int posix_clock_realtime_adj(const clockid_t which_clock,
+ /*
+  * Get monotonic time for posix timers
+  */
+-static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
++static int posix_ktime_get_ts(const clockid_t which_clock, struct timespec *tp)
+ {
+       ktime_get_ts(tp);
+       return 0;
+@@ -234,20 +235,20 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
+ /*
+  * Get monotonic-raw time for posix timers
+  */
+-static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
++static int posix_get_monotonic_raw(const clockid_t which_clock, struct timespec *tp)
+ {
+       getrawmonotonic(tp);
+       return 0;
+ }
+-static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
++static int posix_get_realtime_coarse(const clockid_t which_clock, struct timespec *tp)
+ {
+       *tp = current_kernel_time();
+       return 0;
+ }
+-static int posix_get_monotonic_coarse(clockid_t which_clock,
++static int posix_get_monotonic_coarse(const clockid_t which_clock,
+                                               struct timespec *tp)
+ {
+       *tp = get_monotonic_coarse();
+@@ -266,7 +267,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
+       return 0;
+ }
+-static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
++static int posix_get_tai(const clockid_t which_clock, struct timespec *tp)
+ {
+       timekeeping_clocktai(tp);
+       return 0;
+@@ -284,7 +285,7 @@ static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec *tp)
+  */
+ static __init int init_posix_timers(void)
+ {
+-      struct k_clock clock_realtime = {
++      static struct k_clock clock_realtime = {
+               .clock_getres   = posix_get_hrtimer_res,
+               .clock_get      = posix_clock_realtime_get,
+               .clock_set      = posix_clock_realtime_set,
+@@ -296,7 +297,7 @@ static __init int init_posix_timers(void)
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+-      struct k_clock clock_monotonic = {
++      static struct k_clock clock_monotonic = {
+               .clock_getres   = posix_get_hrtimer_res,
+               .clock_get      = posix_ktime_get_ts,
+               .nsleep         = common_nsleep,
+@@ -306,19 +307,19 @@ static __init int init_posix_timers(void)
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+-      struct k_clock clock_monotonic_raw = {
++      static struct k_clock clock_monotonic_raw = {
+               .clock_getres   = posix_get_hrtimer_res,
+               .clock_get      = posix_get_monotonic_raw,
+       };
+-      struct k_clock clock_realtime_coarse = {
++      static struct k_clock clock_realtime_coarse = {
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_realtime_coarse,
+       };
+-      struct k_clock clock_monotonic_coarse = {
++      static struct k_clock clock_monotonic_coarse = {
+               .clock_getres   = posix_get_coarse_res,
+               .clock_get      = posix_get_monotonic_coarse,
+       };
+-      struct k_clock clock_tai = {
++      static struct k_clock clock_tai = {
+               .clock_getres   = posix_get_hrtimer_res,
+               .clock_get      = posix_get_tai,
+               .nsleep         = common_nsleep,
+@@ -328,7 +329,7 @@ static __init int init_posix_timers(void)
+               .timer_get      = common_timer_get,
+               .timer_del      = common_timer_del,
+       };
+-      struct k_clock clock_boottime = {
++      static struct k_clock clock_boottime = {
+               .clock_getres   = posix_get_hrtimer_res,
+               .clock_get      = posix_get_boottime,
+               .nsleep         = common_nsleep,
+@@ -540,7 +541,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
+               return;
+       }
+-      posix_clocks[clock_id] = *new_clock;
++      posix_clocks[clock_id] = new_clock;
+ }
+ EXPORT_SYMBOL_GPL(posix_timers_register_clock);
+@@ -586,9 +587,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
+               return (id & CLOCKFD_MASK) == CLOCKFD ?
+                       &clock_posix_dynamic : &clock_posix_cpu;
+-      if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
++      if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
+               return NULL;
+-      return &posix_clocks[id];
++      return posix_clocks[id];
+ }
+ static int common_timer_create(struct k_itimer *new_timer)
+@@ -606,7 +607,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+       struct k_clock *kc = clockid_to_kclock(which_clock);
+       struct k_itimer *new_timer;
+       int error, new_timer_id;
+-      sigevent_t event;
++      sigevent_t event = { };
+       int it_id_set = IT_ID_NOT_SET;
+       if (!kc)
+@@ -1021,6 +1022,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+       if (copy_from_user(&new_tp, tp, sizeof (*tp)))
+               return -EFAULT;
++      /* only the CLOCK_REALTIME clock can be set, all other clocks
++         have their clock_set fptr set to a nosettime dummy function
++         CLOCK_REALTIME has a NULL clock_set fptr which causes it to
++         call common_clock_set, which calls do_sys_settimeofday, which
++         we hook
++      */
++
+       return kc->clock_set(which_clock, &new_tp);
+ }
+diff --git a/kernel/time/time.c b/kernel/time/time.c
+index 667b933..1668952 100644
+--- a/kernel/time/time.c
++++ b/kernel/time/time.c
+@@ -177,6 +177,11 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
+               if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
+                       return -EINVAL;
++              /* we log in do_settimeofday called below, so don't log twice
++              */
++              if (!tv)
++                      gr_log_timechange();
++
+               sys_tz = *tz;
+               update_vsyscall_tz();
+               if (firsttime) {
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 37dec7e..6a6ac85 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -15,6 +15,7 @@
+ #include <linux/init.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/clocksource.h>
+ #include <linux/jiffies.h>
+@@ -1172,6 +1173,8 @@ int do_settimeofday64(const struct timespec64 *ts)
+       if (!timespec64_valid_strict(ts))
+               return -EINVAL;
++      gr_log_timechange();
++
+       raw_spin_lock_irqsave(&timekeeper_lock, flags);
+       write_seqcount_begin(&tk_core.seq);
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 32bf6f7..a0ba7cb 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1633,7 +1633,7 @@ static inline void __run_timers(struct timer_base *base)
+ /*
+  * This function runs timers and the timer-tq in bottom half context.
+  */
+-static void run_timer_softirq(struct softirq_action *h)
++static __latent_entropy void run_timer_softirq(void)
+ {
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index ba7d8b2..aa301b0 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -50,12 +50,16 @@ static void SEQ_printf(struct seq_file *m, const char *fmt, ...)
+ static void print_name_offset(struct seq_file *m, void *sym)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      SEQ_printf(m, "<%p>", NULL);
++#else
+       char symname[KSYM_NAME_LEN];
+       if (lookup_symbol_name((unsigned long)sym, symname) < 0)
+               SEQ_printf(m, "<%pK>", sym);
+       else
+               SEQ_printf(m, "%s", symname);
++#endif
+ }
+ static void
+@@ -124,11 +128,14 @@ next_one:
+ static void
+ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      SEQ_printf(m, "  .base:       %p\n", NULL);
++#else
+       SEQ_printf(m, "  .base:       %pK\n", base);
++#endif
+       SEQ_printf(m, "  .index:      %d\n", base->index);
+       SEQ_printf(m, "  .resolution: %u nsecs\n", (unsigned) hrtimer_resolution);
+-
+       SEQ_printf(m,   "  .get_time:   ");
+       print_name_offset(m, base->get_time);
+       SEQ_printf(m,   "\n");
+@@ -393,7 +400,11 @@ static int __init init_timer_list_procfs(void)
+ {
+       struct proc_dir_entry *pe;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
++#else
+       pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
++#endif
+       if (!pe)
+               return -ENOMEM;
+       return 0;
+diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
+index 087204c..671b5822 100644
+--- a/kernel/time/timer_stats.c
++++ b/kernel/time/timer_stats.c
+@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
+ static unsigned long nr_entries;
+ static struct entry entries[MAX_ENTRIES];
+-static atomic_t overflow_count;
++static atomic_unchecked_t overflow_count;
+ /*
+  * The entries are in a hash-table, for fast lookup:
+@@ -140,7 +140,7 @@ static void reset_entries(void)
+       nr_entries = 0;
+       memset(entries, 0, sizeof(entries));
+       memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
+-      atomic_set(&overflow_count, 0);
++      atomic_set_unchecked(&overflow_count, 0);
+ }
+ static struct entry *alloc_entry(void)
+@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+       if (likely(entry))
+               entry->count++;
+       else
+-              atomic_inc(&overflow_count);
++              atomic_inc_unchecked(&overflow_count);
+  out_unlock:
+       raw_spin_unlock_irqrestore(lock, flags);
+@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ static void print_name_offset(struct seq_file *m, unsigned long addr)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      seq_printf(m, "<%p>", NULL);
++#else
+       char symname[KSYM_NAME_LEN];
+       if (lookup_symbol_name(addr, symname) < 0)
+-              seq_printf(m, "<%p>", (void *)addr);
++              seq_printf(m, "<%pK>", (void *)addr);
+       else
+               seq_printf(m, "%s", symname);
++#endif
+ }
+ static int tstats_show(struct seq_file *m, void *v)
+@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
+       seq_puts(m, "Timer Stats Version: v0.3\n");
+       seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms);
+-      if (atomic_read(&overflow_count))
+-              seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
++      if (atomic_read_unchecked(&overflow_count))
++              seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
+       seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
+       for (i = 0; i < nr_entries; i++) {
+@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
+ {
+       struct proc_dir_entry *pe;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++      pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
++#else
+       pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
++#endif
+       if (!pe)
+               return -ENOMEM;
+       return 0;
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index f4b86e8..18903a2 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -107,6 +107,7 @@ config TRACING
+ config GENERIC_TRACER
+       bool
+       select TRACING
++      depends on !GRKERNSEC_KMEM
+ #
+ # Minimum requirements an architecture has to meet for us to
+@@ -120,6 +121,7 @@ config TRACING_SUPPORT
+       # irqflags tracing for your architecture.
+       depends on TRACE_IRQFLAGS_SUPPORT || PPC32
+       depends on STACKTRACE_SUPPORT
++      depends on !GRKERNSEC_KMEM
+       default y
+ if TRACING_SUPPORT
+@@ -225,6 +227,7 @@ config ENABLE_DEFAULT_TRACERS
+       bool "Trace process context switches and events"
+       depends on !GENERIC_TRACER
+       select TRACING
++      depends on !GRKERNSEC_KMEM
+       help
+         This tracer hooks to various trace points in the kernel,
+         allowing the user to pick and choose which trace point they
+@@ -378,6 +381,7 @@ config BLK_DEV_IO_TRACE
+       depends on BLOCK
+       select RELAY
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       select TRACEPOINTS
+       select GENERIC_TRACER
+       select STACKTRACE
+@@ -402,6 +406,7 @@ config KPROBE_EVENT
+       depends on HAVE_REGS_AND_STACK_ACCESS_API
+       bool "Enable kprobes-based dynamic events"
+       select TRACING
++      depends on !GRKERNSEC_KMEM
+       select PROBE_EVENTS
+       default y
+       help
+@@ -423,6 +428,7 @@ config UPROBE_EVENT
+       select UPROBES
+       select PROBE_EVENTS
+       select TRACING
++      depends on !GRKERNSEC_KMEM
+       default n
+       help
+         This allows the user to add tracing events on top of userspace
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index dbafc5d..819bd5d 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -334,7 +334,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+       struct blk_trace *bt = filp->private_data;
+       char buf[16];
+-      snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
++      snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
+       return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ }
+@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+               return 1;
+       bt = buf->chan->private_data;
+-      atomic_inc(&bt->dropped);
++      atomic_inc_unchecked(&bt->dropped);
+       return 0;
+ }
+@@ -485,7 +485,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+       bt->dir = dir;
+       bt->dev = dev;
+-      atomic_set(&bt->dropped, 0);
++      atomic_set_unchecked(&bt->dropped, 0);
+       INIT_LIST_HEAD(&bt->running_list);
+       ret = -EIO;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 84752c8..64513c9 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -120,8 +120,9 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                                struct ftrace_ops *op, struct pt_regs *regs);
+ #else
+ /* See comment below, where ftrace_ops_list_func is defined */
+-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
+-#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
++static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
++                               struct ftrace_ops *op, struct pt_regs *regs);
++#define ftrace_ops_list_func (ftrace_ops_no_ops)
+ #endif
+ /*
+@@ -2480,13 +2481,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+       if (unlikely(ftrace_disabled))
+               return 0;
++      ret = ftrace_arch_code_modify_prepare();
++      FTRACE_WARN_ON(ret);
++      if (ret)
++              return 0;
++
+       ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++      FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
+       if (ret) {
+               ftrace_bug_type = FTRACE_BUG_INIT;
+               ftrace_bug(ret, rec);
+-              return 0;
+       }
+-      return 1;
++      return ret ? 0 : 1;
+ }
+ /*
+@@ -4850,8 +4856,10 @@ static int ftrace_process_locs(struct module *mod,
+       if (!count)
+               return 0;
++      pax_open_kernel();
+       sort(start, count, sizeof(*start),
+            ftrace_cmp_ips, NULL);
++      pax_close_kernel();
+       start_pg = ftrace_allocate_pages(count);
+       if (!start_pg)
+@@ -5267,7 +5275,8 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+       __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
+ }
+ #else
+-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
++static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
++                               struct ftrace_ops *op, struct pt_regs *regs)
+ {
+       __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
+ }
+@@ -5690,8 +5699,12 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+ }
+ /* The callbacks that hook a function */
+-trace_func_graph_ret_t ftrace_graph_return =
+-                      (trace_func_graph_ret_t)ftrace_stub;
++static void ftrace_graph_return_stub(struct ftrace_graph_ret *trace)
++{
++      ftrace_stub(0, 0, NULL, NULL);
++}
++
++trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_return_stub;
+ trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
+ static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
+@@ -5724,7 +5737,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+               if (t->ret_stack == NULL) {
+                       atomic_set(&t->tracing_graph_pause, 0);
+-                      atomic_set(&t->trace_overrun, 0);
++                      atomic_set_unchecked(&t->trace_overrun, 0);
+                       t->curr_ret_stack = -1;
+                       /* Make sure the tasks see the -1 first: */
+                       smp_wmb();
+@@ -5919,7 +5932,7 @@ void unregister_ftrace_graph(void)
+               goto out;
+       ftrace_graph_active--;
+-      ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
++      ftrace_graph_return = ftrace_graph_return_stub;
+       ftrace_graph_entry = ftrace_graph_entry_stub;
+       __ftrace_graph_entry = ftrace_graph_entry_stub;
+       ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
+@@ -5947,7 +5960,7 @@ static void
+ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
+ {
+       atomic_set(&t->tracing_graph_pause, 0);
+-      atomic_set(&t->trace_overrun, 0);
++      atomic_set_unchecked(&t->trace_overrun, 0);
+       t->ftrace_timestamp = 0;
+       /* make curr_ret_stack visible before we add the ret_stack */
+       smp_wmb();
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 9c14373..5ddd763 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -296,9 +296,9 @@ struct buffer_data_page {
+  */
+ struct buffer_page {
+       struct list_head list;          /* list of buffer pages */
+-      local_t          write;         /* index for next write */
++      local_unchecked_t        write;         /* index for next write */
+       unsigned         read;          /* index for next read */
+-      local_t          entries;       /* entries on this page */
++      local_unchecked_t        entries;       /* entries on this page */
+       unsigned long    real_end;      /* real end of data */
+       struct buffer_data_page *page;  /* Actual data page */
+ };
+@@ -448,11 +448,11 @@ struct ring_buffer_per_cpu {
+       unsigned long                   last_overrun;
+       local_t                         entries_bytes;
+       local_t                         entries;
+-      local_t                         overrun;
+-      local_t                         commit_overrun;
+-      local_t                         dropped_events;
++      local_unchecked_t               overrun;
++      local_unchecked_t               commit_overrun;
++      local_unchecked_t               dropped_events;
+       local_t                         committing;
+-      local_t                         commits;
++      local_unchecked_t               commits;
+       unsigned long                   read;
+       unsigned long                   read_bytes;
+       u64                             write_stamp;
+@@ -1018,8 +1018,8 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+        *
+        * We add a counter to the write field to denote this.
+        */
+-      old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+-      old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
++      old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
++      old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
+       /*
+        * Just make sure we have seen our old_write and synchronize
+@@ -1047,8 +1047,8 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+                * cmpxchg to only update if an interrupt did not already
+                * do it for us. If the cmpxchg fails, we don't care.
+                */
+-              (void)local_cmpxchg(&next_page->write, old_write, val);
+-              (void)local_cmpxchg(&next_page->entries, old_entries, eval);
++              (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
++              (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
+               /*
+                * No need to worry about races with clearing out the commit.
+@@ -1412,12 +1412,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
+ static inline unsigned long rb_page_entries(struct buffer_page *bpage)
+ {
+-      return local_read(&bpage->entries) & RB_WRITE_MASK;
++      return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
+ }
+ static inline unsigned long rb_page_write(struct buffer_page *bpage)
+ {
+-      return local_read(&bpage->write) & RB_WRITE_MASK;
++      return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
+ }
+ static int
+@@ -1512,7 +1512,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+                        * bytes consumed in ring buffer from here.
+                        * Increment overrun to account for the lost events.
+                        */
+-                      local_add(page_entries, &cpu_buffer->overrun);
++                      local_add_unchecked(page_entries, &cpu_buffer->overrun);
+                       local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+               }
+@@ -1942,7 +1942,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+                * it is our responsibility to update
+                * the counters.
+                */
+-              local_add(entries, &cpu_buffer->overrun);
++              local_add_unchecked(entries, &cpu_buffer->overrun);
+               local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+               /*
+@@ -2079,7 +2079,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+               if (tail == BUF_PAGE_SIZE)
+                       tail_page->real_end = 0;
+-              local_sub(length, &tail_page->write);
++              local_sub_unchecked(length, &tail_page->write);
+               return;
+       }
+@@ -2114,7 +2114,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+               rb_event_set_padding(event);
+               /* Set the write back to the previous setting */
+-              local_sub(length, &tail_page->write);
++              local_sub_unchecked(length, &tail_page->write);
+               return;
+       }
+@@ -2126,7 +2126,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+       /* Set write to end of buffer */
+       length = (tail + length) - BUF_PAGE_SIZE;
+-      local_sub(length, &tail_page->write);
++      local_sub_unchecked(length, &tail_page->write);
+ }
+ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
+@@ -2154,7 +2154,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+        * about it.
+        */
+       if (unlikely(next_page == commit_page)) {
+-              local_inc(&cpu_buffer->commit_overrun);
++              local_inc_unchecked(&cpu_buffer->commit_overrun);
+               goto out_reset;
+       }
+@@ -2184,7 +2184,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+                        * this is easy, just stop here.
+                        */
+                       if (!(buffer->flags & RB_FL_OVERWRITE)) {
+-                              local_inc(&cpu_buffer->dropped_events);
++                              local_inc_unchecked(&cpu_buffer->dropped_events);
+                               goto out_reset;
+                       }
+@@ -2210,7 +2210,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+                                     cpu_buffer->tail_page) &&
+                                    (cpu_buffer->commit_page ==
+                                     cpu_buffer->reader_page))) {
+-                              local_inc(&cpu_buffer->commit_overrun);
++                              local_inc_unchecked(&cpu_buffer->commit_overrun);
+                               goto out_reset;
+                       }
+               }
+@@ -2358,7 +2358,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+       if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+               unsigned long write_mask =
+-                      local_read(&bpage->write) & ~RB_WRITE_MASK;
++                      local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
+               unsigned long event_length = rb_event_length(event);
+               /*
+                * This is on the tail page. It is possible that
+@@ -2368,7 +2368,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+                */
+               old_index += write_mask;
+               new_index += write_mask;
+-              index = local_cmpxchg(&bpage->write, old_index, new_index);
++              index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
+               if (index == old_index) {
+                       /* update counters */
+                       local_sub(event_length, &cpu_buffer->entries_bytes);
+@@ -2383,7 +2383,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+       local_inc(&cpu_buffer->committing);
+-      local_inc(&cpu_buffer->commits);
++      local_inc_unchecked(&cpu_buffer->commits);
+ }
+ static void
+@@ -2450,7 +2450,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+               return;
+  again:
+-      commits = local_read(&cpu_buffer->commits);
++      commits = local_read_unchecked(&cpu_buffer->commits);
+       /* synchronize with interrupts */
+       barrier();
+       if (local_read(&cpu_buffer->committing) == 1)
+@@ -2466,7 +2466,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+        * updating of the commit page and the clearing of the
+        * committing counter.
+        */
+-      if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
++      if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
+           !local_read(&cpu_buffer->committing)) {
+               local_inc(&cpu_buffer->committing);
+               goto again;
+@@ -2695,7 +2695,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+       /* Don't let the compiler play games with cpu_buffer->tail_page */
+       tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
+-      write = local_add_return(info->length, &tail_page->write);
++      write = local_add_return_unchecked(info->length, &tail_page->write);
+       /* set write to only the index of the write */
+       write &= RB_WRITE_MASK;
+@@ -2718,7 +2718,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+       kmemcheck_annotate_bitfield(event, bitfield);
+       rb_update_event(cpu_buffer, event, info);
+-      local_inc(&tail_page->entries);
++      local_inc_unchecked(&tail_page->entries);
+       /*
+        * If this is the first commit on the page, then update
+@@ -2755,7 +2755,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
+       barrier();
+       if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
+               local_dec(&cpu_buffer->committing);
+-              local_dec(&cpu_buffer->commits);
++              local_dec_unchecked(&cpu_buffer->commits);
+               return NULL;
+       }
+ #endif
+@@ -2884,7 +2884,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+       /* Do the likely case first */
+       if (likely(bpage->page == (void *)addr)) {
+-              local_dec(&bpage->entries);
++              local_dec_unchecked(&bpage->entries);
+               return;
+       }
+@@ -2896,7 +2896,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+       start = bpage;
+       do {
+               if (bpage->page == (void *)addr) {
+-                      local_dec(&bpage->entries);
++                      local_dec_unchecked(&bpage->entries);
+                       return;
+               }
+               rb_inc_page(cpu_buffer, &bpage);
+@@ -3184,7 +3184,7 @@ static inline unsigned long
+ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+       return local_read(&cpu_buffer->entries) -
+-              (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
++              (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
+ }
+ /**
+@@ -3273,7 +3273,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+               return 0;
+       cpu_buffer = buffer->buffers[cpu];
+-      ret = local_read(&cpu_buffer->overrun);
++      ret = local_read_unchecked(&cpu_buffer->overrun);
+       return ret;
+ }
+@@ -3296,7 +3296,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+               return 0;
+       cpu_buffer = buffer->buffers[cpu];
+-      ret = local_read(&cpu_buffer->commit_overrun);
++      ret = local_read_unchecked(&cpu_buffer->commit_overrun);
+       return ret;
+ }
+@@ -3318,7 +3318,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+               return 0;
+       cpu_buffer = buffer->buffers[cpu];
+-      ret = local_read(&cpu_buffer->dropped_events);
++      ret = local_read_unchecked(&cpu_buffer->dropped_events);
+       return ret;
+ }
+@@ -3381,7 +3381,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+       /* if you care about this being correct, lock the buffer */
+       for_each_buffer_cpu(buffer, cpu) {
+               cpu_buffer = buffer->buffers[cpu];
+-              overruns += local_read(&cpu_buffer->overrun);
++              overruns += local_read_unchecked(&cpu_buffer->overrun);
+       }
+       return overruns;
+@@ -3552,8 +3552,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+       /*
+        * Reset the reader page to size zero.
+        */
+-      local_set(&cpu_buffer->reader_page->write, 0);
+-      local_set(&cpu_buffer->reader_page->entries, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->write, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
+       local_set(&cpu_buffer->reader_page->page->commit, 0);
+       cpu_buffer->reader_page->real_end = 0;
+@@ -3587,7 +3587,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+        * want to compare with the last_overrun.
+        */
+       smp_mb();
+-      overwrite = local_read(&(cpu_buffer->overrun));
++      overwrite = local_read_unchecked(&(cpu_buffer->overrun));
+       /*
+        * Here's the tricky part.
+@@ -4173,8 +4173,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+       cpu_buffer->head_page
+               = list_entry(cpu_buffer->pages, struct buffer_page, list);
+-      local_set(&cpu_buffer->head_page->write, 0);
+-      local_set(&cpu_buffer->head_page->entries, 0);
++      local_set_unchecked(&cpu_buffer->head_page->write, 0);
++      local_set_unchecked(&cpu_buffer->head_page->entries, 0);
+       local_set(&cpu_buffer->head_page->page->commit, 0);
+       cpu_buffer->head_page->read = 0;
+@@ -4184,18 +4184,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+       INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+       INIT_LIST_HEAD(&cpu_buffer->new_pages);
+-      local_set(&cpu_buffer->reader_page->write, 0);
+-      local_set(&cpu_buffer->reader_page->entries, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->write, 0);
++      local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
+       local_set(&cpu_buffer->reader_page->page->commit, 0);
+       cpu_buffer->reader_page->read = 0;
+       local_set(&cpu_buffer->entries_bytes, 0);
+-      local_set(&cpu_buffer->overrun, 0);
+-      local_set(&cpu_buffer->commit_overrun, 0);
+-      local_set(&cpu_buffer->dropped_events, 0);
++      local_set_unchecked(&cpu_buffer->overrun, 0);
++      local_set_unchecked(&cpu_buffer->commit_overrun, 0);
++      local_set_unchecked(&cpu_buffer->dropped_events, 0);
+       local_set(&cpu_buffer->entries, 0);
+       local_set(&cpu_buffer->committing, 0);
+-      local_set(&cpu_buffer->commits, 0);
++      local_set_unchecked(&cpu_buffer->commits, 0);
+       cpu_buffer->read = 0;
+       cpu_buffer->read_bytes = 0;
+@@ -4585,8 +4585,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+               rb_init_page(bpage);
+               bpage = reader->page;
+               reader->page = *data_page;
+-              local_set(&reader->write, 0);
+-              local_set(&reader->entries, 0);
++              local_set_unchecked(&reader->write, 0);
++              local_set_unchecked(&reader->entries, 0);
+               reader->read = 0;
+               *data_page = bpage;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 7bc5676..90db3d8 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3883,7 +3883,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+       return 0;
+ }
+-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
++int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
+ {
+       /* do nothing if flag is already set */
+       if (!!(tr->trace_flags & mask) == !!enabled)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index f783df4..6d1062f 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1610,7 +1610,7 @@ void trace_printk_control(bool enabled);
+ void trace_printk_init_buffers(void);
+ void trace_printk_start_comm(void);
+ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
++int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
+ /*
+  * Normal trace_printk() and friends allocates special buffers
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index 0f06532..247c8e7 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -127,7 +127,7 @@ u64 notrace trace_clock_global(void)
+ }
+ EXPORT_SYMBOL_GPL(trace_clock_global);
+-static atomic64_t trace_counter;
++static atomic64_unchecked_t trace_counter;
+ /*
+  * trace_clock_counter(): simply an atomic counter.
+@@ -136,5 +136,5 @@ static atomic64_t trace_counter;
+  */
+ u64 notrace trace_clock_counter(void)
+ {
+-      return atomic64_add_return(1, &trace_counter);
++      return atomic64_inc_return_unchecked(&trace_counter);
+ }
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 03c0a48..154163e 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2274,7 +2274,6 @@ __trace_early_add_new_event(struct trace_event_call *call,
+       return 0;
+ }
+-struct ftrace_module_file_ops;
+ static void __add_event_to_tracers(struct trace_event_call *call);
+ /* Add an additional event_call dynamically */
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index f3a960e..f4ce9f9 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1078,8 +1078,8 @@ static void hist_trigger_show(struct seq_file *m,
+       }
+       seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
+-                 (u64)atomic64_read(&hist_data->map->hits),
+-                 n_entries, (u64)atomic64_read(&hist_data->map->drops));
++                 (u64)atomic64_read_unchecked(&hist_data->map->hits),
++                 n_entries, (u64)atomic64_read_unchecked(&hist_data->map->drops));
+ }
+ static int hist_show(struct seq_file *m, void *v)
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 7363ccf..807cbf1 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -138,7 +138,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+       /* The return trace stack is full */
+       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+-              atomic_inc(&current->trace_overrun);
++              atomic_inc_unchecked(&current->trace_overrun);
+               return -EBUSY;
+       }
+@@ -235,7 +235,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
+       *ret = current->ret_stack[index].ret;
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+-      trace->overrun = atomic_read(&current->trace_overrun);
++      trace->overrun = atomic_read_unchecked(&current->trace_overrun);
+       trace->depth = index;
+ }
+diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
+index cd7480d..f97e6e4 100644
+--- a/kernel/trace/trace_mmiotrace.c
++++ b/kernel/trace/trace_mmiotrace.c
+@@ -24,7 +24,7 @@ struct header_iter {
+ static struct trace_array *mmio_trace_array;
+ static bool overrun_detected;
+ static unsigned long prev_overruns;
+-static atomic_t dropped_count;
++static atomic_unchecked_t dropped_count;
+ static void mmio_reset_data(struct trace_array *tr)
+ {
+@@ -120,7 +120,7 @@ static void mmio_close(struct trace_iterator *iter)
+ static unsigned long count_overruns(struct trace_iterator *iter)
+ {
+-      unsigned long cnt = atomic_xchg(&dropped_count, 0);
++      unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
+       unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
+       if (over > prev_overruns)
+@@ -303,7 +303,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
+       event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
+                                         sizeof(*entry), 0, pc);
+       if (!event) {
+-              atomic_inc(&dropped_count);
++              atomic_inc_unchecked(&dropped_count);
+               return;
+       }
+       entry   = ring_buffer_event_data(event);
+@@ -333,7 +333,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
+       event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
+                                         sizeof(*entry), 0, pc);
+       if (!event) {
+-              atomic_inc(&dropped_count);
++              atomic_inc_unchecked(&dropped_count);
+               return;
+       }
+       entry   = ring_buffer_event_data(event);
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 0bb9cf2..f319026 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -717,14 +717,16 @@ int register_trace_event(struct trace_event *event)
+                       goto out;
+       }
++      pax_open_kernel();
+       if (event->funcs->trace == NULL)
+-              event->funcs->trace = trace_nop_print;
++              const_cast(event->funcs->trace) = trace_nop_print;
+       if (event->funcs->raw == NULL)
+-              event->funcs->raw = trace_nop_print;
++              const_cast(event->funcs->raw) = trace_nop_print;
+       if (event->funcs->hex == NULL)
+-              event->funcs->hex = trace_nop_print;
++              const_cast(event->funcs->hex) = trace_nop_print;
+       if (event->funcs->binary == NULL)
+-              event->funcs->binary = trace_nop_print;
++              const_cast(event->funcs->binary) = trace_nop_print;
++      pax_close_kernel();
+       key = event->type & (EVENT_HASHSIZE - 1);
+diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
+index e694c9f..6775a38 100644
+--- a/kernel/trace/trace_seq.c
++++ b/kernel/trace/trace_seq.c
+@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
+               return 0;
+       }
+-      seq_buf_path(&s->seq, path, "\n");
++      seq_buf_path(&s->seq, path, "\n\\");
+       if (unlikely(seq_buf_has_overflowed(&s->seq))) {
+               s->seq.len = save_len;
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index 2a1abba..2a81a78 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
+               return;
+       /* we do not handle interrupt stacks yet */
+-      if (!object_is_on_stack(stack))
++      if (!object_starts_on_stack(stack))
+               return;
+       /* Can't do this from NMI context (can cause deadlocks) */
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index b2b6efc..52e0a3d 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -605,6 +605,8 @@ static int perf_sysenter_enable(struct trace_event_call *call)
+       int num;
+       num = ((struct syscall_metadata *)call->data)->syscall_nr;
++      if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
++              return -EINVAL;
+       mutex_lock(&syscall_trace_lock);
+       if (!sys_perf_refcount_enter)
+@@ -625,6 +627,8 @@ static void perf_sysenter_disable(struct trace_event_call *call)
+       int num;
+       num = ((struct syscall_metadata *)call->data)->syscall_nr;
++      if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
++              return;
+       mutex_lock(&syscall_trace_lock);
+       sys_perf_refcount_enter--;
+@@ -677,6 +681,8 @@ static int perf_sysexit_enable(struct trace_event_call *call)
+       int num;
+       num = ((struct syscall_metadata *)call->data)->syscall_nr;
++      if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
++              return -EINVAL;
+       mutex_lock(&syscall_trace_lock);
+       if (!sys_perf_refcount_exit)
+@@ -697,6 +703,8 @@ static void perf_sysexit_disable(struct trace_event_call *call)
+       int num;
+       num = ((struct syscall_metadata *)call->data)->syscall_nr;
++      if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
++              return;
+       mutex_lock(&syscall_trace_lock);
+       sys_perf_refcount_exit--;
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index 0a689bb..e96cd14 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -349,7 +349,7 @@ static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
+       struct tracing_map_elt *elt = NULL;
+       int idx;
+-      idx = atomic_inc_return(&map->next_elt);
++      idx = atomic_inc_return_unchecked(&map->next_elt);
+       if (idx < map->max_elts) {
+               elt = *(TRACING_MAP_ELT(map->elts, idx));
+               if (map->ops && map->ops->elt_init)
+@@ -425,7 +425,7 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
+               if (test_key && test_key == key_hash && entry->val &&
+                   keys_match(key, entry->val->key, map->key_size)) {
+-                      atomic64_inc(&map->hits);
++                      atomic64_inc_unchecked(&map->hits);
+                       return entry->val;
+               }
+@@ -438,14 +438,14 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
+                               elt = get_free_elt(map);
+                               if (!elt) {
+-                                      atomic64_inc(&map->drops);
++                                      atomic64_inc_unchecked(&map->drops);
+                                       entry->key = 0;
+                                       break;
+                               }
+                               memcpy(elt->key, key, map->key_size);
+                               entry->val = elt;
+-                              atomic64_inc(&map->hits);
++                              atomic64_inc_unchecked(&map->hits);
+                               return entry->val;
+                       }
+@@ -557,9 +557,9 @@ void tracing_map_clear(struct tracing_map *map)
+ {
+       unsigned int i;
+-      atomic_set(&map->next_elt, -1);
+-      atomic64_set(&map->hits, 0);
+-      atomic64_set(&map->drops, 0);
++      atomic_set_unchecked(&map->next_elt, -1);
++      atomic64_set_unchecked(&map->hits, 0);
++      atomic64_set_unchecked(&map->drops, 0);
+       tracing_map_array_clear(map->map);
+@@ -641,7 +641,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits,
+       map->map_bits = map_bits;
+       map->max_elts = (1 << map_bits);
+-      atomic_set(&map->next_elt, -1);
++      atomic_set_unchecked(&map->next_elt, -1);
+       map->map_size = (1 << (map_bits + 1));
+       map->ops = ops;
+@@ -700,9 +700,10 @@ int tracing_map_init(struct tracing_map *map)
+       return err;
+ }
+-static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
+-                         const struct tracing_map_sort_entry **b)
++static int cmp_entries_dup(const void *_a, const void *_b)
+ {
++      const struct tracing_map_sort_entry **a = (const struct tracing_map_sort_entry **)_a;
++      const struct tracing_map_sort_entry **b = (const struct tracing_map_sort_entry **)_b;
+       int ret = 0;
+       if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
+@@ -711,9 +712,10 @@ static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
+       return ret;
+ }
+-static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
+-                         const struct tracing_map_sort_entry **b)
++static int cmp_entries_sum(const void *_a, const void *_b)
+ {
++      const struct tracing_map_sort_entry **a = (const struct tracing_map_sort_entry **)_a;
++      const struct tracing_map_sort_entry **b = (const struct tracing_map_sort_entry **)_b;
+       const struct tracing_map_elt *elt_a, *elt_b;
+       struct tracing_map_sort_key *sort_key;
+       struct tracing_map_field *field;
+@@ -739,9 +741,10 @@ static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
+       return ret;
+ }
+-static int cmp_entries_key(const struct tracing_map_sort_entry **a,
+-                         const struct tracing_map_sort_entry **b)
++static int cmp_entries_key(const void *_a, const void *_b)
+ {
++      const struct tracing_map_sort_entry **a = (const struct tracing_map_sort_entry **)_a;
++      const struct tracing_map_sort_entry **b = (const struct tracing_map_sort_entry **)_b;
+       const struct tracing_map_elt *elt_a, *elt_b;
+       struct tracing_map_sort_key *sort_key;
+       struct tracing_map_field *field;
+@@ -874,8 +877,7 @@ static int merge_dups(struct tracing_map_sort_entry **sort_entries,
+       if (n_entries < 2)
+               return total_dups;
+-      sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
+-           (int (*)(const void *, const void *))cmp_entries_dup, NULL);
++      sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *), cmp_entries_dup, NULL);
+       key = sort_entries[0]->key;
+       for (i = 1; i < n_entries; i++) {
+@@ -923,10 +925,8 @@ static void sort_secondary(struct tracing_map *map,
+                          struct tracing_map_sort_key *primary_key,
+                          struct tracing_map_sort_key *secondary_key)
+ {
+-      int (*primary_fn)(const struct tracing_map_sort_entry **,
+-                        const struct tracing_map_sort_entry **);
+-      int (*secondary_fn)(const struct tracing_map_sort_entry **,
+-                          const struct tracing_map_sort_entry **);
++      int (*primary_fn)(const void*, const void*);
++      int (*secondary_fn)(const void*, const void*);
+       unsigned i, start = 0, n_sub = 1;
+       if (is_key(map, primary_key->field_idx))
+@@ -958,7 +958,7 @@ static void sort_secondary(struct tracing_map *map,
+               set_sort_key(map, secondary_key);
+               sort(&entries[start], n_sub,
+                    sizeof(struct tracing_map_sort_entry *),
+-                   (int (*)(const void *, const void *))secondary_fn, NULL);
++                   secondary_fn, NULL);
+               set_sort_key(map, primary_key);
+               start = i + 1;
+@@ -995,8 +995,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
+                            unsigned int n_sort_keys,
+                            struct tracing_map_sort_entry ***sort_entries)
+ {
+-      int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
+-                            const struct tracing_map_sort_entry **);
++      int (*cmp_entries_fn)(const void*, const void*);
+       struct tracing_map_sort_entry *sort_entry, **entries;
+       int i, n_entries, ret;
+@@ -1042,8 +1041,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
+       set_sort_key(map, &sort_keys[0]);
+-      sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
+-           (int (*)(const void *, const void *))cmp_entries_fn, NULL);
++      sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *), cmp_entries_fn, NULL);
+       if (n_sort_keys > 1)
+               sort_secondary(map,
+diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h
+index 618838f..3dc1b9b 100644
+--- a/kernel/trace/tracing_map.h
++++ b/kernel/trace/tracing_map.h
+@@ -181,7 +181,7 @@ struct tracing_map {
+       unsigned int                    map_bits;
+       unsigned int                    map_size;
+       unsigned int                    max_elts;
+-      atomic_t                        next_elt;
++      atomic_unchecked_t              next_elt;
+       struct tracing_map_array        *elts;
+       struct tracing_map_array        *map;
+       const struct tracing_map_ops    *ops;
+@@ -191,8 +191,8 @@ struct tracing_map {
+       int                             key_idx[TRACING_MAP_KEYS_MAX];
+       unsigned int                    n_keys;
+       struct tracing_map_sort_key     sort_key;
+-      atomic64_t                      hits;
+-      atomic64_t                      drops;
++      atomic64_unchecked_t            hits;
++      atomic64_unchecked_t            drops;
+ };
+ /**
+diff --git a/kernel/user.c b/kernel/user.c
+index b069ccb..c59fe26 100644
+--- a/kernel/user.c
++++ b/kernel/user.c
+@@ -127,8 +127,8 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
+  * IRQ state (as stored in flags) is restored and uidhash_lock released
+  * upon function exit.
+  */
++static void free_user(struct user_struct *up, unsigned long flags) __releases(&uidhash_lock);
+ static void free_user(struct user_struct *up, unsigned long flags)
+-      __releases(&uidhash_lock)
+ {
+       uid_hash_remove(up);
+       spin_unlock_irqrestore(&uidhash_lock, flags);
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
+index 68f5942..8576ce7 100644
+--- a/kernel/user_namespace.c
++++ b/kernel/user_namespace.c
+@@ -84,6 +84,21 @@ int create_user_ns(struct cred *new)
+           !kgid_has_mapping(parent_ns, group))
+               return -EPERM;
++#ifdef CONFIG_GRKERNSEC
++      /*
++       * This doesn't really inspire confidence:
++       * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
++       * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
++       * Increases kernel attack surface in areas developers
++       * previously cared little about ("low importance due
++       * to requiring "root" capability")
++       * To be removed when this code receives *proper* review
++       */
++      if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
++                      !capable(CAP_SETGID))
++              return -EPERM;
++#endif
++
+       ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
+       if (!ns)
+               return -ENOMEM;
+@@ -988,7 +1003,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+       if (!thread_group_empty(current))
+               return -EINVAL;
+-      if (current->fs->users != 1)
++      if (atomic_read(&current->fs->users) != 1)
+               return -EINVAL;
+       if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
+index c8eac43..4b5f08f 100644
+--- a/kernel/utsname_sysctl.c
++++ b/kernel/utsname_sysctl.c
+@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
+ static int proc_do_uts_string(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table uts_table;
++      ctl_table_no_const uts_table;
+       int r;
+       memcpy(&uts_table, table, sizeof(uts_table));
+       uts_table.data = get_uts(table, write);
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 9acb29f..6fe517c 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -680,7 +680,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
+ static void watchdog_nmi_disable(unsigned int cpu) { return; }
+ #endif /* CONFIG_HARDLOCKUP_DETECTOR */
+-static struct smp_hotplug_thread watchdog_threads = {
++static struct smp_hotplug_thread watchdog_threads __read_only = {
+       .store                  = &softlockup_watchdog,
+       .thread_should_run      = watchdog_should_run,
+       .thread_fn              = watchdog,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index ef071ca..621135c 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1922,9 +1922,8 @@ static void pool_mayday_timeout(unsigned long __pool)
+  * multiple times.  Does GFP_KERNEL allocations.  Called only from
+  * manager.
+  */
++static void maybe_create_worker(struct worker_pool *pool) __must_hold(&pool->lock);
+ static void maybe_create_worker(struct worker_pool *pool)
+-__releases(&pool->lock)
+-__acquires(&pool->lock)
+ {
+ restart:
+       spin_unlock_irq(&pool->lock);
+@@ -2014,9 +2013,8 @@ static bool manage_workers(struct worker *worker)
+  * CONTEXT:
+  * spin_lock_irq(pool->lock) which is released and regrabbed.
+  */
++static void process_one_work(struct worker *worker, struct work_struct *work) __must_hold(&pool->lock);
+ static void process_one_work(struct worker *worker, struct work_struct *work)
+-__releases(&pool->lock)
+-__acquires(&pool->lock)
+ {
+       struct pool_workqueue *pwq = get_work_pwq(work);
+       struct worker_pool *pool = worker->pool;
+@@ -4573,7 +4571,7 @@ static void rebind_workers(struct worker_pool *pool)
+               WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
+               worker_flags |= WORKER_REBOUND;
+               worker_flags &= ~WORKER_UNBOUND;
+-              ACCESS_ONCE(worker->flags) = worker_flags;
++              ACCESS_ONCE_RW(worker->flags) = worker_flags;
+       }
+       spin_unlock_irq(&pool->lock);
+diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c
+index 4051339..7144fad 100644
+--- a/lib/842/842_compress.c
++++ b/lib/842/842_compress.c
+@@ -306,7 +306,7 @@ static int add_template(struct sw842_param *p, u8 c)
+       }
+       if (sw842_template_counts)
+-              atomic_inc(&template_count[t[4]]);
++              atomic_inc_unchecked(&template_count[t[4]]);
+       return 0;
+ }
+@@ -328,7 +328,7 @@ static int add_repeat_template(struct sw842_param *p, u8 r)
+               return ret;
+       if (sw842_template_counts)
+-              atomic_inc(&template_repeat_count);
++              atomic_inc_unchecked(&template_repeat_count);
+       return 0;
+ }
+@@ -355,7 +355,7 @@ static int add_short_data_template(struct sw842_param *p, u8 b)
+       }
+       if (sw842_template_counts)
+-              atomic_inc(&template_short_data_count);
++              atomic_inc_unchecked(&template_short_data_count);
+       return 0;
+ }
+@@ -368,7 +368,7 @@ static int add_zeros_template(struct sw842_param *p)
+               return ret;
+       if (sw842_template_counts)
+-              atomic_inc(&template_zeros_count);
++              atomic_inc_unchecked(&template_zeros_count);
+       return 0;
+ }
+@@ -381,7 +381,7 @@ static int add_end_template(struct sw842_param *p)
+               return ret;
+       if (sw842_template_counts)
+-              atomic_inc(&template_end_count);
++              atomic_inc_unchecked(&template_end_count);
+       return 0;
+ }
+diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
+index e7f3bff..77d1d92 100644
+--- a/lib/842/842_debugfs.h
++++ b/lib/842/842_debugfs.h
+@@ -7,7 +7,7 @@
+ static bool sw842_template_counts;
+ module_param_named(template_counts, sw842_template_counts, bool, 0444);
+-static atomic_t template_count[OPS_MAX], template_repeat_count,
++static atomic_unchecked_t template_count[OPS_MAX], template_repeat_count,
+       template_zeros_count, template_short_data_count, template_end_count;
+ static struct dentry *sw842_debugfs_root;
+@@ -28,16 +28,16 @@ static int __init sw842_debugfs_create(void)
+               char name[32];
+               snprintf(name, 32, "template_%02x", i);
+-              debugfs_create_atomic_t(name, m, sw842_debugfs_root,
++              debugfs_create_atomic_unchecked_t(name, m, sw842_debugfs_root,
+                                       &template_count[i]);
+       }
+-      debugfs_create_atomic_t("template_repeat", m, sw842_debugfs_root,
++      debugfs_create_atomic_unchecked_t("template_repeat", m, sw842_debugfs_root,
+                               &template_repeat_count);
+-      debugfs_create_atomic_t("template_zeros", m, sw842_debugfs_root,
++      debugfs_create_atomic_unchecked_t("template_zeros", m, sw842_debugfs_root,
+                               &template_zeros_count);
+-      debugfs_create_atomic_t("template_short_data", m, sw842_debugfs_root,
++      debugfs_create_atomic_unchecked_t("template_short_data", m, sw842_debugfs_root,
+                               &template_short_data_count);
+-      debugfs_create_atomic_t("template_end", m, sw842_debugfs_root,
++      debugfs_create_atomic_unchecked_t("template_end", m, sw842_debugfs_root,
+                               &template_end_count);
+       return 0;
+diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c
+index 11fc39b..e5cfa58 100644
+--- a/lib/842/842_decompress.c
++++ b/lib/842/842_decompress.c
+@@ -263,7 +263,7 @@ static int do_op(struct sw842_param *p, u8 o)
+       }
+       if (sw842_template_counts)
+-              atomic_inc(&template_count[o]);
++              atomic_inc_unchecked(&template_count[o]);
+       return 0;
+ }
+@@ -331,7 +331,7 @@ int sw842_decompress(const u8 *in, unsigned int ilen,
+                       }
+                       if (sw842_template_counts)
+-                              atomic_inc(&template_repeat_count);
++                              atomic_inc_unchecked(&template_repeat_count);
+                       break;
+               case OP_ZEROS:
+@@ -343,7 +343,7 @@ int sw842_decompress(const u8 *in, unsigned int ilen,
+                       p.olen -= 8;
+                       if (sw842_template_counts)
+-                              atomic_inc(&template_zeros_count);
++                              atomic_inc_unchecked(&template_zeros_count);
+                       break;
+               case OP_SHORT_DATA:
+@@ -364,12 +364,12 @@ int sw842_decompress(const u8 *in, unsigned int ilen,
+                       }
+                       if (sw842_template_counts)
+-                              atomic_inc(&template_short_data_count);
++                              atomic_inc_unchecked(&template_short_data_count);
+                       break;
+               case OP_END:
+                       if (sw842_template_counts)
+-                              atomic_inc(&template_end_count);
++                              atomic_inc_unchecked(&template_end_count);
+                       break;
+               default: /* use template */
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index cab7405..c65d473 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -243,6 +243,7 @@ config PAGE_OWNER
+       bool "Track page owner"
+       depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       select STACKTRACE
+       select STACKDEPOT
+       select PAGE_EXTENSION
+@@ -259,6 +260,7 @@ config PAGE_OWNER
+ config DEBUG_FS
+       bool "Debug Filesystem"
+       select SRCU
++      depends on !GRKERNSEC_KMEM
+       help
+         debugfs is a virtual file system that kernel developers use to put
+         debugging files into.  Enable this option to be able to read and
+@@ -512,6 +514,7 @@ config DEBUG_KMEMLEAK
+       bool "Kernel memory leak detector"
+       depends on DEBUG_KERNEL && HAVE_DEBUG_KMEMLEAK
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       select STACKTRACE if STACKTRACE_SUPPORT
+       select KALLSYMS
+       select CRC32
+@@ -711,6 +714,7 @@ config KCOV
+       select DEBUG_FS
+       select GCC_PLUGINS if !COMPILE_TEST
+       select GCC_PLUGIN_SANCOV if !COMPILE_TEST
++      depends on !GRKERNSEC_KMEM
+       help
+         KCOV exposes kernel code coverage information in a form suitable
+         for coverage-guided fuzzing (randomized testing).
+@@ -1012,7 +1016,7 @@ config DEBUG_MUTEXES
+ config DEBUG_WW_MUTEX_SLOWPATH
+       bool "Wait/wound mutex debugging: Slowpath testing"
+-      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+       select DEBUG_LOCK_ALLOC
+       select DEBUG_SPINLOCK
+       select DEBUG_MUTEXES
+@@ -1029,7 +1033,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
+ config DEBUG_LOCK_ALLOC
+       bool "Lock debugging: detect incorrect freeing of live locks"
+-      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+       select DEBUG_SPINLOCK
+       select DEBUG_MUTEXES
+       select LOCKDEP
+@@ -1043,7 +1047,7 @@ config DEBUG_LOCK_ALLOC
+ config PROVE_LOCKING
+       bool "Lock debugging: prove locking correctness"
+-      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+       select LOCKDEP
+       select DEBUG_SPINLOCK
+       select DEBUG_MUTEXES
+@@ -1094,7 +1098,7 @@ config LOCKDEP
+ config LOCK_STAT
+       bool "Lock usage statistics"
+-      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++      depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+       select LOCKDEP
+       select DEBUG_SPINLOCK
+       select DEBUG_MUTEXES
+@@ -1507,6 +1511,7 @@ config NOTIFIER_ERROR_INJECTION
+       tristate "Notifier error injection"
+       depends on DEBUG_KERNEL
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       help
+         This option provides the ability to inject artificial errors to
+         specified notifier chain callbacks. It is useful to test the error
+@@ -1652,6 +1657,7 @@ config FAIL_MMC_REQUEST
+ config FAIL_FUTEX
+       bool "Fault-injection capability for futexes"
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       depends on FAULT_INJECTION && FUTEX
+       help
+         Provide fault-injection capability for futexes.
+@@ -1676,6 +1682,7 @@ config LATENCYTOP
+       depends on DEBUG_KERNEL
+       depends on STACKTRACE_SUPPORT
+       depends on PROC_FS
++      depends on !GRKERNSEC_HIDESYM
+       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
+       select KALLSYMS
+       select KALLSYMS_ALL
+@@ -1830,7 +1837,7 @@ endmenu # runtime tests
+ config PROVIDE_OHCI1394_DMA_INIT
+       bool "Remote debugging over FireWire early on boot"
+-      depends on PCI && X86
++      depends on PCI && X86 && !GRKERNSEC
+       help
+         If you want to debug problems which hang or crash the kernel early
+         on boot and the crashing machine has a FireWire port, you can use
+diff --git a/lib/Makefile b/lib/Makefile
+index 5dc77a8..8c18345 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -76,7 +76,7 @@ obj-$(CONFIG_BTREE) += btree.o
+ obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
+ obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
+ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
++obj-y += list_debug.o
+ obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
+ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index eca8808..23b3fd8 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -363,7 +363,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
+ {
+       int c, old_c, totaldigits, ndigits, nchunks, nbits;
+       u32 chunk;
+-      const char __user __force *ubuf = (const char __user __force *)buf;
++      const char __user *ubuf = (const char __force_user *)buf;
+       bitmap_zero(maskp, nmaskbits);
+@@ -449,7 +449,7 @@ int bitmap_parse_user(const char __user *ubuf,
+ {
+       if (!access_ok(VERIFY_READ, ubuf, ulen))
+               return -EFAULT;
+-      return __bitmap_parse((const char __force *)ubuf,
++      return __bitmap_parse((const char __force_kernel *)ubuf,
+                               ulen, 1, maskp, nmaskbits);
+ }
+@@ -509,7 +509,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ {
+       unsigned a, b;
+       int c, old_c, totaldigits, ndigits;
+-      const char __user __force *ubuf = (const char __user __force *)buf;
++      const char __user *ubuf = (const char __force_user *)buf;
+       int at_start, in_range;
+       totaldigits = c = 0;
+@@ -613,7 +613,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+ {
+       if (!access_ok(VERIFY_READ, ubuf, ulen))
+               return -EFAULT;
+-      return __bitmap_parselist((const char __force *)ubuf,
++      return __bitmap_parselist((const char __force_kernel *)ubuf,
+                                       ulen, 1, maskp, nmaskbits);
+ }
+ EXPORT_SYMBOL(bitmap_parselist_user);
+diff --git a/lib/bug.c b/lib/bug.c
+index bc3656e..470f3ab 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -148,6 +148,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+               return BUG_TRAP_TYPE_NONE;
+       bug = find_bug(bugaddr);
++      if (!bug)
++              return BUG_TRAP_TYPE_NONE;
+       file = NULL;
+       line = 0;
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index a8e1260..cf8f2be 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
+       if (limit > 4)
+               return;
+-      is_on_stack = object_is_on_stack(addr);
++      is_on_stack = object_starts_on_stack(addr);
+       if (is_on_stack == onstack)
+               return;
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 0234361..41a411c 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
+       /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
+          uncompressed data.  Allocate intermediate buffer for block. */
+-      bd->dbufSize = 100000*(i-BZh0);
++      i -= BZh0;
++      bd->dbufSize = 100000 * i;
+       bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+       if (!bd->dbuf)
+diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
+index ed7a1fd..44a1a62 100644
+--- a/lib/decompress_unlzma.c
++++ b/lib/decompress_unlzma.c
+@@ -39,10 +39,10 @@
+ #define       MIN(a, b) (((a) < (b)) ? (a) : (b))
+-static long long INIT read_int(unsigned char *ptr, int size)
++static unsigned long long INIT read_int(unsigned char *ptr, int size)
+ {
+       int i;
+-      long long ret = 0;
++      unsigned long long ret = 0;
+       for (i = 0; i < size; i++)
+               ret = (ret << 8) | ptr[size-i-1];
+diff --git a/lib/div64.c b/lib/div64.c
+index 7f34525..c53be4b 100644
+--- a/lib/div64.c
++++ b/lib/div64.c
+@@ -61,7 +61,7 @@ EXPORT_SYMBOL(__div64_32);
+ #endif
+ #ifndef div_s64_rem
+-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
++s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+ {
+       u64 quotient;
+@@ -132,7 +132,7 @@ EXPORT_SYMBOL(div64_u64_rem);
+  * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
+  */
+ #ifndef div64_u64
+-u64 div64_u64(u64 dividend, u64 divisor)
++u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
+ {
+       u32 high = divisor >> 32;
+       u64 quot;
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index fcfa193..b345d59 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -984,7 +984,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
+ void dma_debug_add_bus(struct bus_type *bus)
+ {
+-      struct notifier_block *nb;
++      notifier_block_no_const *nb;
+       if (dma_debug_disabled())
+               return;
+@@ -1166,7 +1166,7 @@ static void check_unmap(struct dma_debug_entry *ref)
+ static void check_for_stack(struct device *dev, void *addr)
+ {
+-      if (object_is_on_stack(addr))
++      if (object_starts_on_stack(addr))
+               err_printk(dev, NULL, "DMA-API: device driver maps memory from "
+                               "stack [addr=%p]\n", addr);
+ }
+diff --git a/lib/inflate.c b/lib/inflate.c
+index 013a761..c28f3fc 100644
+--- a/lib/inflate.c
++++ b/lib/inflate.c
+@@ -269,7 +269,7 @@ static void free(void *where)
+               malloc_ptr = free_mem_ptr;
+ }
+ #else
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #endif
+diff --git a/lib/ioremap.c b/lib/ioremap.c
+index 86c8911..f5bfc34 100644
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -75,7 +75,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
+       unsigned long next;
+       phys_addr -= addr;
+-      pmd = pmd_alloc(&init_mm, pud, addr);
++      pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+@@ -101,7 +101,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+       unsigned long next;
+       phys_addr -= addr;
+-      pud = pud_alloc(&init_mm, pgd, addr);
++      pud = pud_alloc_kernel(&init_mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+diff --git a/lib/irq_poll.c b/lib/irq_poll.c
+index 836f7db..44d9849 100644
+--- a/lib/irq_poll.c
++++ b/lib/irq_poll.c
+@@ -74,7 +74,7 @@ void irq_poll_complete(struct irq_poll *iop)
+ }
+ EXPORT_SYMBOL(irq_poll_complete);
+-static void irq_poll_softirq(struct softirq_action *h)
++static __latent_entropy void irq_poll_softirq(void)
+ {
+       struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
+       int rearm = 0, budget = irq_poll_budget;
+diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
+index 391fd23..96e17b6 100644
+--- a/lib/is_single_threaded.c
++++ b/lib/is_single_threaded.c
+@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
+       struct task_struct *p, *t;
+       bool ret;
++      if (!mm)
++              return true;
++
+       if (atomic_read(&task->signal->live) != 1)
+               return false;
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 445dcae..cbfd25d 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -955,9 +955,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+ static DEFINE_SPINLOCK(kobj_ns_type_lock);
+-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
++static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
+-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+ {
+       enum kobj_ns_type type = ops->type;
+       int error;
+diff --git a/lib/list_debug.c b/lib/list_debug.c
+index 3859bf6..818741d6 100644
+--- a/lib/list_debug.c
++++ b/lib/list_debug.c
+@@ -11,7 +11,9 @@
+ #include <linux/bug.h>
+ #include <linux/kernel.h>
+ #include <linux/rculist.h>
++#include <linux/mm.h>
++#ifdef CONFIG_DEBUG_LIST
+ /*
+  * Insert a new entry between two known consecutive entries.
+  *
+@@ -19,21 +21,40 @@
+  * the prev/next entries already!
+  */
++static bool __list_add_debug(struct list_head *new,
++                           struct list_head *prev,
++                           struct list_head *next)
++{
++      if (unlikely(next->prev != prev)) {
++              printk(KERN_ERR "list_add corruption. next->prev should be "
++                      "prev (%p), but was %p. (next=%p).\n",
++                      prev, next->prev, next);
++              BUG();
++              return false;
++      }
++      if (unlikely(prev->next != next)) {
++              printk(KERN_ERR "list_add corruption. prev->next should be "
++                      "next (%p), but was %p. (prev=%p).\n",
++                      next, prev->next, prev);
++              BUG();
++              return false;
++      }
++      if (unlikely(new == prev || new == next)) {
++              printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
++                      new, prev, next);
++              BUG();
++              return false;
++      }
++      return true;
++}
++
+ void __list_add(struct list_head *new,
+-                            struct list_head *prev,
+-                            struct list_head *next)
++              struct list_head *prev,
++              struct list_head *next)
+ {
+-      WARN(next->prev != prev,
+-              "list_add corruption. next->prev should be "
+-              "prev (%p), but was %p. (next=%p).\n",
+-              prev, next->prev, next);
+-      WARN(prev->next != next,
+-              "list_add corruption. prev->next should be "
+-              "next (%p), but was %p. (prev=%p).\n",
+-              next, prev->next, prev);
+-      WARN(new == prev || new == next,
+-           "list_add double add: new=%p, prev=%p, next=%p.\n",
+-           new, prev, next);
++      if (!__list_add_debug(new, prev, next))
++              return;
++
+       next->prev = new;
+       new->next = next;
+       new->prev = prev;
+@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
+ }
+ EXPORT_SYMBOL(__list_add);
+-void __list_del_entry(struct list_head *entry)
++static bool __list_del_entry_debug(struct list_head *entry)
+ {
+       struct list_head *prev, *next;
+       prev = entry->prev;
+       next = entry->next;
+-      if (WARN(next == LIST_POISON1,
+-              "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+-              entry, LIST_POISON1) ||
+-          WARN(prev == LIST_POISON2,
+-              "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+-              entry, LIST_POISON2) ||
+-          WARN(prev->next != entry,
+-              "list_del corruption. prev->next should be %p, "
+-              "but was %p\n", entry, prev->next) ||
+-          WARN(next->prev != entry,
+-              "list_del corruption. next->prev should be %p, "
+-              "but was %p\n", entry, next->prev))
++      if (unlikely(next == LIST_POISON1)) {
++              printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
++                      entry, LIST_POISON1);
++              BUG();
++              return false;
++      }
++      if (unlikely(prev == LIST_POISON2)) {
++              printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
++                      entry, LIST_POISON2);
++              BUG();
++              return false;
++      }
++      if (unlikely(entry->prev->next != entry)) {
++              printk(KERN_ERR "list_del corruption. prev->next should be %p, "
++                      "but was %p\n", entry, prev->next);
++              BUG();
++              return false;
++      }
++      if (unlikely(entry->next->prev != entry)) {
++              printk(KERN_ERR "list_del corruption. next->prev should be %p, "
++                      "but was %p\n", entry, next->prev);
++              BUG();
++              return false;
++      }
++      return true;
++}
++
++void __list_del_entry(struct list_head *entry)
++{
++      if (!__list_del_entry_debug(entry))
+               return;
+-      __list_del(prev, next);
++      __list_del(entry->prev, entry->next);
+ }
+ EXPORT_SYMBOL(__list_del_entry);
+@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
+ void __list_add_rcu(struct list_head *new,
+                   struct list_head *prev, struct list_head *next)
+ {
+-      WARN(next->prev != prev,
+-              "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+-              prev, next->prev, next);
+-      WARN(prev->next != next,
+-              "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+-              next, prev->next, prev);
++      if (!__list_add_debug(new, prev, next))
++              return;
++
+       new->next = next;
+       new->prev = prev;
+       rcu_assign_pointer(list_next_rcu(prev), new);
+       next->prev = new;
+ }
+ EXPORT_SYMBOL(__list_add_rcu);
++#endif
++
++void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_add_debug(new, prev, next))
++              return;
++#endif
++
++      pax_open_kernel();
++      next->prev = new;
++      new->next = next;
++      new->prev = prev;
++      prev->next = new;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(__pax_list_add);
++
++void pax_list_del(struct list_head *entry)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_del_entry_debug(entry))
++              return;
++#endif
++
++      pax_open_kernel();
++      __list_del(entry->prev, entry->next);
++      entry->next = LIST_POISON1;
++      entry->prev = LIST_POISON2;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del);
++
++void pax_list_del_init(struct list_head *entry)
++{
++      pax_open_kernel();
++      __list_del(entry->prev, entry->next);
++      INIT_LIST_HEAD(entry);
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del_init);
++
++void __pax_list_add_rcu(struct list_head *new,
++                      struct list_head *prev, struct list_head *next)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_add_debug(new, prev, next))
++              return;
++#endif
++
++      pax_open_kernel();
++      new->next = next;
++      new->prev = prev;
++      rcu_assign_pointer(list_next_rcu(prev), new);
++      next->prev = new;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(__pax_list_add_rcu);
++
++void pax_list_del_rcu(struct list_head *entry)
++{
++#ifdef CONFIG_DEBUG_LIST
++      if (!__list_del_entry_debug(entry))
++              return;
++#endif
++
++      pax_open_kernel();
++      __list_del(entry->prev, entry->next);
++      entry->next = LIST_POISON1;
++      entry->prev = LIST_POISON2;
++      pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del_rcu);
+diff --git a/lib/llist.c b/lib/llist.c
+index ae5872b..63a9698 100644
+--- a/lib/llist.c
++++ b/lib/llist.c
+@@ -25,6 +25,7 @@
+ #include <linux/kernel.h>
+ #include <linux/export.h>
+ #include <linux/llist.h>
++#include <linux/mm.h>
+ /**
+@@ -48,6 +49,22 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ }
+ EXPORT_SYMBOL_GPL(llist_add_batch);
++bool pax_llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
++                       struct llist_head *head)
++{
++      struct llist_node *first;
++
++      do {
++              first = ACCESS_ONCE(head->first);
++              pax_open_kernel();
++              new_last->next = first;
++              pax_close_kernel();
++      } while (cmpxchg(&head->first, first, new_first) != first);
++
++      return !first;
++}
++EXPORT_SYMBOL_GPL(pax_llist_add_batch);
++
+ /**
+  * llist_del_first - delete the first entry of lock-less list
+  * @head:     the head for your lock-less list
+diff --git a/lib/lockref.c b/lib/lockref.c
+index 5a92189..d77978d 100644
+--- a/lib/lockref.c
++++ b/lib/lockref.c
+@@ -40,13 +40,13 @@
+ void lockref_get(struct lockref *lockref)
+ {
+       CMPXCHG_LOOP(
+-              new.count++;
++              __lockref_inc(&new);
+       ,
+               return;
+       );
+       spin_lock(&lockref->lock);
+-      lockref->count++;
++      __lockref_inc(lockref);
+       spin_unlock(&lockref->lock);
+ }
+ EXPORT_SYMBOL(lockref_get);
+@@ -61,8 +61,8 @@ int lockref_get_not_zero(struct lockref *lockref)
+       int retval;
+       CMPXCHG_LOOP(
+-              new.count++;
+-              if (old.count <= 0)
++              __lockref_inc(&new);
++              if (__lockref_read(&old) <= 0)
+                       return 0;
+       ,
+               return 1;
+@@ -70,8 +70,8 @@ int lockref_get_not_zero(struct lockref *lockref)
+       spin_lock(&lockref->lock);
+       retval = 0;
+-      if (lockref->count > 0) {
+-              lockref->count++;
++      if (__lockref_read(lockref) > 0) {
++              __lockref_inc(lockref);
+               retval = 1;
+       }
+       spin_unlock(&lockref->lock);
+@@ -88,17 +88,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
+ int lockref_get_or_lock(struct lockref *lockref)
+ {
+       CMPXCHG_LOOP(
+-              new.count++;
+-              if (old.count <= 0)
++              __lockref_inc(&new);
++              if (__lockref_read(&old) <= 0)
+                       break;
+       ,
+               return 1;
+       );
+       spin_lock(&lockref->lock);
+-      if (lockref->count <= 0)
++      if (__lockref_read(lockref) <= 0)
+               return 0;
+-      lockref->count++;
++      __lockref_inc(lockref);
+       spin_unlock(&lockref->lock);
+       return 1;
+ }
+@@ -114,11 +114,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
+ int lockref_put_return(struct lockref *lockref)
+ {
+       CMPXCHG_LOOP(
+-              new.count--;
+-              if (old.count <= 0)
++              __lockref_dec(&new);
++              if (__lockref_read(&old) <= 0)
+                       return -1;
+       ,
+-              return new.count;
++              return __lockref_read(&new);
+       );
+       return -1;
+ }
+@@ -132,17 +132,17 @@ EXPORT_SYMBOL(lockref_put_return);
+ int lockref_put_or_lock(struct lockref *lockref)
+ {
+       CMPXCHG_LOOP(
+-              new.count--;
+-              if (old.count <= 1)
++              __lockref_dec(&new);
++              if (__lockref_read(&old) <= 1)
+                       break;
+       ,
+               return 1;
+       );
+       spin_lock(&lockref->lock);
+-      if (lockref->count <= 1)
++      if (__lockref_read(lockref) <= 1)
+               return 0;
+-      lockref->count--;
++      __lockref_dec(lockref);
+       spin_unlock(&lockref->lock);
+       return 1;
+ }
+@@ -155,7 +155,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
+ void lockref_mark_dead(struct lockref *lockref)
+ {
+       assert_spin_locked(&lockref->lock);
+-      lockref->count = -128;
++      __lockref_set(lockref, -128);
+ }
+ EXPORT_SYMBOL(lockref_mark_dead);
+@@ -169,8 +169,8 @@ int lockref_get_not_dead(struct lockref *lockref)
+       int retval;
+       CMPXCHG_LOOP(
+-              new.count++;
+-              if (old.count < 0)
++              __lockref_inc(&new);
++              if (__lockref_read(&old) < 0)
+                       return 0;
+       ,
+               return 1;
+@@ -178,8 +178,8 @@ int lockref_get_not_dead(struct lockref *lockref)
+       spin_lock(&lockref->lock);
+       retval = 0;
+-      if (lockref->count >= 0) {
+-              lockref->count++;
++      if (__lockref_read(lockref) >= 0) {
++              __lockref_inc(lockref);
+               retval = 1;
+       }
+       spin_unlock(&lockref->lock);
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index fce1e9a..d44559b 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
+ {
+       int minlen = min_t(int, count, nla_len(src));
++      BUG_ON(minlen < 0);
++
+       memcpy(dest, nla_data(src), minlen);
+       if (count > minlen)
+               memset(dest + minlen, 0, count - minlen);
+diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
+index 27fe749..2c0e855 100644
+--- a/lib/percpu-refcount.c
++++ b/lib/percpu-refcount.c
+@@ -31,7 +31,7 @@
+  * atomic_long_t can't hit 0 before we've added up all the percpu refs.
+  */
+-#define PERCPU_COUNT_BIAS     (1LU << (BITS_PER_LONG - 1))
++#define PERCPU_COUNT_BIAS     (1LU << (BITS_PER_LONG - 2))
+ static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 8e6d552..3b33b84 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -67,7 +67,7 @@ struct radix_tree_preload {
+       /* nodes->private_data points to next preallocated node */
+       struct radix_tree_node *nodes;
+ };
+-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
+ static inline void *node_to_entry(void *ptr)
+ {
+diff --git a/lib/random32.c b/lib/random32.c
+index 69ed593..a309235 100644
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
+ }
+ #endif
+-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
++static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+ /**
+  *    prandom_u32_state - seeded pseudo-random number generator.
+diff --git a/lib/rbtree.c b/lib/rbtree.c
+index eb8a19f..3cb9b61 100644
+--- a/lib/rbtree.c
++++ b/lib/rbtree.c
+@@ -412,7 +412,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
+ static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
+ static const struct rb_augment_callbacks dummy_callbacks = {
+-      dummy_propagate, dummy_copy, dummy_rotate
++      .propagate = dummy_propagate,
++      .copy = dummy_copy,
++      .rotate = dummy_rotate
+ };
+ void rb_insert_color(struct rb_node *node, struct rb_root *root)
+diff --git a/lib/rhashtable.c b/lib/rhashtable.c
+index 56054e5..dd1cdc4 100644
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -563,8 +563,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
+  * will rewind back to the beginning and you may use it immediately
+  * by calling rhashtable_walk_next.
+  */
++int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+ int rhashtable_walk_start(struct rhashtable_iter *iter)
+-      __acquires(RCU)
+ {
+       struct rhashtable *ht = iter->ht;
+@@ -648,8 +648,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_next);
+  *
+  * Finish a hash table walk.
+  */
++void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+ void rhashtable_walk_stop(struct rhashtable_iter *iter)
+-      __releases(RCU)
+ {
+       struct rhashtable *ht;
+       struct bucket_table *tbl = iter->walker->tbl;
+diff --git a/lib/seq_buf.c b/lib/seq_buf.c
+index cb18469..20ac511 100644
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -259,7 +259,7 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
+  */
+ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
+ {
+-      char *buf;
++      unsigned char *buf;
+       size_t size = seq_buf_get_buf(s, &buf);
+       int res = -1;
+@@ -268,7 +268,7 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
+       if (size) {
+               char *p = d_path(path, buf, size);
+               if (!IS_ERR(p)) {
+-                      char *end = mangle_path(buf, p, esc);
++                      unsigned char *end = mangle_path(buf, p, esc);
+                       if (end)
+                               res = end - buf;
+               }
+diff --git a/lib/show_mem.c b/lib/show_mem.c
+index 1feed6a..4ede1e9 100644
+--- a/lib/show_mem.c
++++ b/lib/show_mem.c
+@@ -47,6 +47,6 @@ void show_mem(unsigned int filter)
+               quicklist_total_size());
+ #endif
+ #ifdef CONFIG_MEMORY_FAILURE
+-      printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
++      printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
+ #endif
+ }
+diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
+index 9c5fe81..00657ec 100644
+--- a/lib/strncpy_from_user.c
++++ b/lib/strncpy_from_user.c
+@@ -23,7 +23,7 @@
+  */
+ static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+ {
+-      const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
++      static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       long res = 0;
+       /*
+diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
+index 8e105ed..eefbbf9 100644
+--- a/lib/strnlen_user.c
++++ b/lib/strnlen_user.c
+@@ -26,7 +26,7 @@
+  */
+ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+ {
+-      const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
++      static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       long align, res = 0;
+       unsigned long c;
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 0967771..2871684 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -16,6 +16,9 @@
+  * - scnprintf and vscnprintf
+  */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <stdarg.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+@@ -118,7 +121,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
+ }
+ EXPORT_SYMBOL(simple_strtoll);
+-static noinline_for_stack
++static noinline_for_stack __nocapture(1) __unverified_nocapture(1)
+ int skip_atoi(const char **s)
+ {
+       int i = 0;
+@@ -680,7 +683,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
+ #ifdef CONFIG_KALLSYMS
+       if (*fmt == 'B')
+               sprint_backtrace(sym, value);
+-      else if (*fmt != 'f' && *fmt != 's')
++      else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
+               sprint_symbol(sym, value);
+       else
+               sprint_symbol_no_offset(sym, value);
+@@ -1470,7 +1473,11 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt)
+       return format_flags(buf, end, flags, names);
+ }
+-int kptr_restrict __read_mostly;
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++int kptr_restrict __read_only = 1;
++#else
++int kptr_restrict __read_only;
++#endif
+ /*
+  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
+@@ -1481,8 +1488,10 @@ int kptr_restrict __read_mostly;
+  *
+  * - 'F' For symbolic function descriptor pointers with offset
+  * - 'f' For simple symbolic function names without offset
++ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
+  * - 'S' For symbolic direct pointers with offset
+  * - 's' For symbolic direct pointers without offset
++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
+  * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
+  * - 'B' For backtraced symbolic direct pointers with offset
+  * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+@@ -1570,7 +1579,7 @@ int kptr_restrict __read_mostly;
+  * function pointers are really function descriptors, which contain a
+  * pointer to the real address.
+  */
+-static noinline_for_stack
++static noinline_for_stack __nocapture(1) __unverified_nocapture(1)
+ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+             struct printf_spec spec)
+ {
+@@ -1578,12 +1587,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+       if (!ptr && *fmt != 'K') {
+               /*
+-               * Print (null) with the same width as a pointer so it makes
++               * Print (nil) with the same width as a pointer so it makes
+                * tabular output look nice.
+                */
+               if (spec.field_width == -1)
+                       spec.field_width = default_width;
+-              return string(buf, end, "(null)", spec);
++              return string(buf, end, "(nil)", spec);
+       }
+       switch (*fmt) {
+@@ -1593,6 +1602,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+               /* Fallthrough */
+       case 'S':
+       case 's':
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              break;
++#else
++              return symbol_string(buf, end, ptr, spec, fmt);
++#endif
++      case 'X':
++              ptr = dereference_function_descriptor(ptr);
++      case 'A':
+       case 'B':
+               return symbol_string(buf, end, ptr, spec, fmt);
+       case 'R':
+@@ -1657,6 +1674,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+                       va_end(va);
+                       return buf;
+               }
++      case 'P':
++              break;
+       case 'K':
+               switch (kptr_restrict) {
+               case 0:
+@@ -1686,6 +1705,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+                        */
+                       cred = current_cred();
+                       if (!has_capability_noaudit(current, CAP_SYSLOG) ||
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                          !has_capability_noaudit(current, CAP_SYS_ADMIN) ||
++#endif
+                           !uid_eq(cred->euid, cred->uid) ||
+                           !gid_eq(cred->egid, cred->gid))
+                               ptr = NULL;
+@@ -1719,6 +1741,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+       case 'G':
+               return flags_string(buf, end, ptr, fmt);
+       }
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      /* 'P' = approved pointers to copy to userland,
++         as in the /proc/kallsyms case, as we make it display nothing
++         for non-root users, and the real contents for root users
++         'X' = approved simple symbols
++         Also ignore 'K' pointers, since we force their NULLing for non-root users
++         above
++      */
++      if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
++              printk(KERN_ALERT "grsec: kernel infoleak detected!  Please report this log to spender@grsecurity.net.\n");
++              dump_stack();
++              ptr = NULL;
++      }
++#endif
++
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = default_width;
+@@ -1749,7 +1787,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+  * @precision: precision of a number
+  * @qualifier: qualifier of a number (long, size_t, ...)
+  */
+-static noinline_for_stack
++static noinline_for_stack __nocapture(1)
+ int format_decode(const char *fmt, struct printf_spec *spec)
+ {
+       const char *start = fmt;
+@@ -2419,11 +2457,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+       typeof(type) value;                                             \
+       if (sizeof(type) == 8) {                                        \
+               args = PTR_ALIGN(args, sizeof(u32));                    \
+-              *(u32 *)&value = *(u32 *)args;                          \
+-              *((u32 *)&value + 1) = *(u32 *)(args + 4);              \
++              *(u32 *)&value = *(const u32 *)args;                    \
++              *((u32 *)&value + 1) = *(const u32 *)(args + 4);        \
+       } else {                                                        \
+               args = PTR_ALIGN(args, sizeof(type));                   \
+-              value = *(typeof(type) *)args;                          \
++              value = *(const typeof(type) *)args;                    \
+       }                                                               \
+       args += sizeof(type);                                           \
+       value;                                                          \
+@@ -2486,7 +2524,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+               case FORMAT_TYPE_STR: {
+                       const char *str_arg = args;
+                       args += strlen(str_arg) + 1;
+-                      str = string(str, end, (char *)str_arg, spec);
++                      str = string(str, end, str_arg, spec);
+                       break;
+               }
+diff --git a/localversion-grsec b/localversion-grsec
+new file mode 100644
+index 0000000..7cd6065
+--- /dev/null
++++ b/localversion-grsec
+@@ -0,0 +1 @@
++-grsec
+diff --git a/mm/Kconfig b/mm/Kconfig
+index be0ee11..8e98a95 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -342,10 +342,11 @@ config KSM
+         root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+ config DEFAULT_MMAP_MIN_ADDR
+-        int "Low address space to protect from user allocation"
++      int "Low address space to protect from user allocation"
+       depends on MMU
+-        default 4096
+-        help
++      default 32768 if ALPHA || ARM || PARISC || SPARC32
++      default 65536
++      help
+         This is the portion of low virtual memory which should be protected
+         from userspace allocation.  Keeping a user from writing to low pages
+         can help reduce the impact of kernel NULL pointer bugs.
+@@ -377,8 +378,9 @@ config MEMORY_FAILURE
+ config HWPOISON_INJECT
+       tristate "HWPoison pages injector"
+-      depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
++      depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
+       select PROC_PAGE_MONITOR
++      depends on !GRKERNSEC
+ config NOMMU_INITIAL_TRIM_EXCESS
+       int "Turn on mmap() excess space trimming before booting"
+@@ -548,6 +550,7 @@ config MEM_SOFT_DIRTY
+       bool "Track memory changes"
+       depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
+       select PROC_PAGE_MONITOR
++      depends on !GRKERNSEC
+       help
+         This option enables memory changes tracking by introducing a
+         soft-dirty bit on pte-s. This bit it set when someone writes
+@@ -632,6 +635,7 @@ config ZSMALLOC_STAT
+       bool "Export zsmalloc statistics"
+       depends on ZSMALLOC
+       select DEBUG_FS
++      depends on !GRKERNSEC_KMEM
+       help
+         This option enables code in the zsmalloc to collect various
+         statistics about whats happening in zsmalloc and exports that
+diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
+index 22f4cd9..ed3f097 100644
+--- a/mm/Kconfig.debug
++++ b/mm/Kconfig.debug
+@@ -10,6 +10,7 @@ config PAGE_EXTENSION
+ config DEBUG_PAGEALLOC
+       bool "Debug page memory allocations"
+       depends on DEBUG_KERNEL
++      depends on !PAX_MEMORY_SANITIZE
+       depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
+       depends on !KMEMCHECK
+       select PAGE_EXTENSION
+@@ -76,8 +77,6 @@ config PAGE_POISONING_ZERO
+          no longer necessary to write zeros when GFP_ZERO is used on
+          allocation.
+-         Enabling page poisoning with this option will disable hibernation
+-
+          If unsure, say N
+       bool
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 8fde443..a8cc381 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -12,7 +12,7 @@
+ #include <linux/device.h>
+ #include <trace/events/writeback.h>
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+ struct backing_dev_info noop_backing_dev_info = {
+       .name           = "noop",
+@@ -898,7 +898,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
+               return err;
+       err = bdi_register(bdi, NULL, "%.28s-%ld", name,
+-                         atomic_long_inc_return(&bdi_seq));
++                         atomic_long_inc_return_unchecked(&bdi_seq));
+       if (err) {
+               bdi_destroy(bdi);
+               return err;
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index 6c707bf..c8d0529 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -165,7 +165,7 @@ out:
+ #ifdef __ARCH_WANT_SYS_FADVISE64
+-SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
++SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, loff_t, len, int, advice)
+ {
+       return sys_fadvise64_64(fd, offset, len, advice);
+ }
+diff --git a/mm/filemap.c b/mm/filemap.c
+index ced9ef6..e042a5b 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2334,7 +2334,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
+       struct address_space *mapping = file->f_mapping;
+       if (!mapping->a_ops->readpage)
+-              return -ENOEXEC;
++              return -ENODEV;
+       file_accessed(file);
+       vma->vm_ops = &generic_file_vm_ops;
+       return 0;
+@@ -2377,7 +2377,7 @@ static struct page *wait_on_page_read(struct page *page)
+ static struct page *do_read_cache_page(struct address_space *mapping,
+                               pgoff_t index,
+-                              int (*filler)(void *, struct page *),
++                              filler_t *filler,
+                               void *data,
+                               gfp_t gfp)
+ {
+@@ -2484,7 +2484,7 @@ out:
+  */
+ struct page *read_cache_page(struct address_space *mapping,
+                               pgoff_t index,
+-                              int (*filler)(void *, struct page *),
++                              filler_t *filler,
+                               void *data)
+ {
+       return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
+@@ -2506,7 +2506,7 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
+                               pgoff_t index,
+                               gfp_t gfp)
+ {
+-      filler_t *filler = (filler_t *)mapping->a_ops->readpage;
++      filler_t *filler = mapping->a_ops->readpage;
+       return do_read_cache_page(mapping, index, filler, NULL, gfp);
+ }
+@@ -2536,6 +2536,7 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
+       pos = iocb->ki_pos;
+       if (limit != RLIM_INFINITY) {
++              gr_learn_resource(current, RLIMIT_FSIZE, iocb->ki_pos, 0);
+               if (iocb->ki_pos >= limit) {
+                       send_sig(SIGXFSZ, current, 0);
+                       return -EFBIG;
+diff --git a/mm/gup.c b/mm/gup.c
+index 22cc22e..361d456 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -370,11 +370,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
+       /* mlock all present pages, but do not fault in new pages */
+       if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
+               return -ENOENT;
+-      /* For mm_populate(), just skip the stack guard page. */
+-      if ((*flags & FOLL_POPULATE) &&
+-                      (stack_guard_page_start(vma, address) ||
+-                       stack_guard_page_end(vma, address + PAGE_SIZE)))
+-              return -ENOENT;
+       if (*flags & FOLL_WRITE)
+               fault_flags |= FAULT_FLAG_WRITE;
+       if (*flags & FOLL_REMOTE)
+@@ -548,14 +543,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+       if (!(gup_flags & FOLL_FORCE))
+               gup_flags |= FOLL_NUMA;
+-      do {
++      while (nr_pages) {
+               struct page *page;
+               unsigned int foll_flags = gup_flags;
+               unsigned int page_increm;
+               /* first iteration or cross vma bound */
+               if (!vma || start >= vma->vm_end) {
+-                      vma = find_extend_vma(mm, start);
++                      vma = find_vma(mm, start);
+                       if (!vma && in_gate_area(mm, start)) {
+                               int ret;
+                               ret = get_gate_page(mm, start & PAGE_MASK,
+@@ -567,7 +562,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                               goto next_page;
+                       }
+-                      if (!vma || check_vma_flags(vma, gup_flags))
++                      if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
+                               return i ? : -EFAULT;
+                       if (is_vm_hugetlb_page(vma)) {
+                               i = follow_hugetlb_page(mm, vma, pages, vmas,
+@@ -628,7 +623,7 @@ next_page:
+               i += page_increm;
+               start += page_increm * PAGE_SIZE;
+               nr_pages -= page_increm;
+-      } while (nr_pages);
++      }
+       return i;
+ }
+ EXPORT_SYMBOL(__get_user_pages);
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 50b4ca6..cf64608 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -191,8 +191,9 @@ static void flush_all_zero_pkmaps(void)
+                * So no dangers, even with speculative execution.
+                */
+               page = pte_page(pkmap_page_table[i]);
++              pax_open_kernel();
+               pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
+-
++              pax_close_kernel();
+               set_page_address(page, NULL);
+               need_flush = 1;
+       }
+@@ -255,8 +256,11 @@ start:
+               }
+       }
+       vaddr = PKMAP_ADDR(last_pkmap_nr);
++
++      pax_open_kernel();
+       set_pte_at(&init_mm, vaddr,
+                  &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
++      pax_close_kernel();
+       pkmap_count[last_pkmap_nr] = 1;
+       set_page_address(page, (void *)vaddr);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 770d83e..7cd013a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -38,7 +38,72 @@ int hugepages_treat_as_movable;
+ int hugetlb_max_hstate __read_mostly;
+ unsigned int default_hstate_idx;
+-struct hstate hstates[HUGE_MAX_HSTATE];
++
++#ifdef CONFIG_CGROUP_HUGETLB
++static struct cftype hugetlb_files[HUGE_MAX_HSTATE][5] = {
++# define MEMFILE_PRIVATE(x, val)      (((x) << 16) | (val))
++# define CFTYPE_INIT(idx) \
++      { /* Add the limit file */                                      \
++        [0] = { .private = MEMFILE_PRIVATE(idx, RES_LIMIT),           \
++                .read_u64 = hugetlb_cgroup_read_u64,                  \
++                .write = hugetlb_cgroup_write, },                     \
++        /* Add the usage file */                                      \
++        [1] = { .private = MEMFILE_PRIVATE(idx, RES_USAGE),           \
++                .read_u64 = hugetlb_cgroup_read_u64, },               \
++        /* Add the MAX usage file */                                  \
++        [2] = { .private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE),       \
++                .write = hugetlb_cgroup_reset,                        \
++                .read_u64 = hugetlb_cgroup_read_u64, },               \
++        /* Add the failcntfile */                                     \
++        [3] = { .private = MEMFILE_PRIVATE(idx, RES_FAILCNT),         \
++                .write = hugetlb_cgroup_reset,                        \
++                .read_u64 = hugetlb_cgroup_read_u64, },               \
++        [4] = { /* NULL terminator */ },                              \
++      }
++
++# if HUGE_MAX_HSTATE > 0
++      [0] = CFTYPE_INIT(0),
++# endif
++# if HUGE_MAX_HSTATE > 1
++      [1] = CFTYPE_INIT(1),
++# endif
++# if HUGE_MAX_HSTATE > 2
++      [2] = CFTYPE_INIT(2),
++# endif
++# if HUGE_MAX_HSTATE > 3
++      [3] = CFTYPE_INIT(3),
++# endif
++# if HUGE_MAX_HSTATE > 4
++#  error PaX: add more initializers...
++# endif
++
++# undef CFTYPE_INIT
++};
++#endif
++
++struct hstate hstates[HUGE_MAX_HSTATE] = {
++#ifdef CONFIG_CGROUP_HUGETLB
++# define HSTATE_INIT(idx) [idx] = { .cgroup_files = &hugetlb_files[idx] }
++
++# if HUGE_MAX_HSTATE > 0
++      HSTATE_INIT(0),
++# endif
++# if HUGE_MAX_HSTATE > 1
++      HSTATE_INIT(1),
++# endif
++# if HUGE_MAX_HSTATE > 2
++      HSTATE_INIT(2),
++# endif
++# if HUGE_MAX_HSTATE > 3
++      HSTATE_INIT(3),
++# endif
++# if HUGE_MAX_HSTATE > 4
++#  error PaX: add more initializers...
++# endif
++
++# undef HSTATE_INIT
++#endif
++};
+ /*
+  * Minimum page order among possible hugepage sizes, set to a proper value
+  * at boot time.
+@@ -2830,6 +2895,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+                        struct ctl_table *table, int write,
+                        void __user *buffer, size_t *length, loff_t *ppos)
+ {
++      ctl_table_no_const t;
+       struct hstate *h = &default_hstate;
+       unsigned long tmp = h->max_huge_pages;
+       int ret;
+@@ -2837,9 +2903,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+       if (!hugepages_supported())
+               return -EOPNOTSUPP;
+-      table->data = &tmp;
+-      table->maxlen = sizeof(unsigned long);
+-      ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
++      t = *table;
++      t.data = &tmp;
++      t.maxlen = sizeof(unsigned long);
++      ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
+       if (ret)
+               goto out;
+@@ -2874,6 +2941,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+       struct hstate *h = &default_hstate;
+       unsigned long tmp;
+       int ret;
++      ctl_table_no_const hugetlb_table;
+       if (!hugepages_supported())
+               return -EOPNOTSUPP;
+@@ -2883,9 +2951,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+       if (write && hstate_is_gigantic(h))
+               return -EINVAL;
+-      table->data = &tmp;
+-      table->maxlen = sizeof(unsigned long);
+-      ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
++      hugetlb_table = *table;
++      hugetlb_table.data = &tmp;
++      hugetlb_table.maxlen = sizeof(unsigned long);
++      ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
+       if (ret)
+               goto out;
+@@ -3379,6 +3448,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+       i_mmap_unlock_write(mapping);
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      struct vm_area_struct *vma_m;
++      unsigned long address_m;
++      pte_t *ptep_m;
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return;
++
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
++      get_page(page_m);
++      hugepage_add_anon_rmap(page_m, vma_m, address_m);
++      set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
++}
++#endif
++
+ /*
+  * Hugetlb_cow() should be called with page lock of the original hugepage held.
+  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
+@@ -3492,6 +3582,11 @@ retry_avoidcopy:
+                               make_huge_pte(vma, new_page, 1));
+               page_remove_rmap(old_page, true);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              pax_mirror_huge_pte(vma, address, new_page);
++#endif
++
+               /* Make the old page be freed below */
+               new_page = old_page;
+       }
+@@ -3665,6 +3760,10 @@ retry:
+                               && (vma->vm_flags & VM_SHARED)));
+       set_huge_pte_at(mm, address, ptep, new_pte);
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_mirror_huge_pte(vma, address, page);
++#endif
++
+       hugetlb_count_add(pages_per_huge_page(h), mm);
+       if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+               /* Optimization, do the COW without a second fault */
+@@ -3733,6 +3832,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+       struct address_space *mapping;
+       int need_wait_lock = 0;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       address &= huge_page_mask(h);
+       ptep = huge_pte_offset(mm, address);
+@@ -3750,6 +3853,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                       return VM_FAULT_OOM;
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m) {
++              unsigned long address_m;
++
++              if (vma->vm_start > vma_m->vm_start) {
++                      address_m = address;
++                      address -= SEGMEXEC_TASK_SIZE;
++                      vma = vma_m;
++                      h = hstate_vma(vma);
++              } else
++                      address_m = address + SEGMEXEC_TASK_SIZE;
++
++              if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
++                      return VM_FAULT_OOM;
++              address_m &= HPAGE_MASK;
++              unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
++      }
++#endif
++
+       mapping = vma->vm_file->f_mapping;
+       idx = vma_hugecache_offset(h, vma, address);
+diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
+index eec1150..af03e3e 100644
+--- a/mm/hugetlb_cgroup.c
++++ b/mm/hugetlb_cgroup.c
+@@ -27,7 +27,6 @@ struct hugetlb_cgroup {
+       struct page_counter hugepage[HUGE_MAX_HSTATE];
+ };
+-#define MEMFILE_PRIVATE(x, val)       (((x) << 16) | (val))
+ #define MEMFILE_IDX(val)      (((val) >> 16) & 0xffff)
+ #define MEMFILE_ATTR(val)     ((val) & 0xffff)
+@@ -254,14 +253,7 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+       return;
+ }
+-enum {
+-      RES_USAGE,
+-      RES_LIMIT,
+-      RES_MAX_USAGE,
+-      RES_FAILCNT,
+-};
+-
+-static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
++u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
+                                  struct cftype *cft)
+ {
+       struct page_counter *counter;
+@@ -285,7 +277,7 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
+ static DEFINE_MUTEX(hugetlb_limit_mutex);
+-static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
++ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
+                                   char *buf, size_t nbytes, loff_t off)
+ {
+       int ret, idx;
+@@ -316,7 +308,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
+       return ret ?: nbytes;
+ }
+-static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
++ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
+                                   char *buf, size_t nbytes, loff_t off)
+ {
+       int ret = 0;
+@@ -352,46 +344,26 @@ static char *mem_fmt(char *buf, int size, unsigned long hsize)
+ static void __init __hugetlb_cgroup_file_init(int idx)
+ {
++      char names[4][MAX_CFTYPE_NAME];
+       char buf[32];
+-      struct cftype *cft;
+       struct hstate *h = &hstates[idx];
+       /* format the size */
+       mem_fmt(buf, 32, huge_page_size(h));
+-
+-      /* Add the limit file */
+-      cft = &h->cgroup_files[0];
+-      snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
+-      cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
+-      cft->read_u64 = hugetlb_cgroup_read_u64;
+-      cft->write = hugetlb_cgroup_write;
+-
+-      /* Add the usage file */
+-      cft = &h->cgroup_files[1];
+-      snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
+-      cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
+-      cft->read_u64 = hugetlb_cgroup_read_u64;
+-
+-      /* Add the MAX usage file */
+-      cft = &h->cgroup_files[2];
+-      snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
+-      cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
+-      cft->write = hugetlb_cgroup_reset;
+-      cft->read_u64 = hugetlb_cgroup_read_u64;
+-
+-      /* Add the failcntfile */
+-      cft = &h->cgroup_files[3];
+-      snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
+-      cft->private  = MEMFILE_PRIVATE(idx, RES_FAILCNT);
+-      cft->write = hugetlb_cgroup_reset;
+-      cft->read_u64 = hugetlb_cgroup_read_u64;
+-
+-      /* NULL terminate the last cft */
+-      cft = &h->cgroup_files[4];
+-      memset(cft, 0, sizeof(*cft));
++      snprintf(names[0], MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
++      snprintf(names[1], MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
++      snprintf(names[2], MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
++      snprintf(names[3], MAX_CFTYPE_NAME, "%s.failcnt", buf);
++
++      pax_open_kernel();
++      strncpy((void *)(*h->cgroup_files)[0].name, names[0], MAX_CFTYPE_NAME);
++      strncpy((void *)(*h->cgroup_files)[1].name, names[1], MAX_CFTYPE_NAME);
++      strncpy((void *)(*h->cgroup_files)[2].name, names[2], MAX_CFTYPE_NAME);
++      strncpy((void *)(*h->cgroup_files)[3].name, names[3], MAX_CFTYPE_NAME);
++      pax_close_kernel();
+       WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
+-                                        h->cgroup_files));
++                                        *h->cgroup_files));
+ }
+ void __init hugetlb_cgroup_file_init(void)
+diff --git a/mm/internal.h b/mm/internal.h
+index 1501304..e026d61 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -151,6 +151,7 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
+ extern int __isolate_free_page(struct page *page, unsigned int order);
+ extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
+                                       unsigned int order);
++extern void free_compound_page(struct page *page);
+ extern void prep_compound_page(struct page *page, unsigned int order);
+ extern void post_alloc_hook(struct page *page, unsigned int order,
+                                       gfp_t gfp_flags);
+@@ -251,7 +252,7 @@ static inline bool is_exec_mapping(vm_flags_t flags)
+  */
+ static inline bool is_stack_mapping(vm_flags_t flags)
+ {
+-      return (flags & VM_STACK) == VM_STACK;
++      return flags & (VM_GROWSUP | VM_GROWSDOWN);
+ }
+ /*
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 086292f..702caa3 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -367,7 +367,7 @@ static void print_unreferenced(struct seq_file *seq,
+       for (i = 0; i < object->trace_len; i++) {
+               void *ptr = (void *)object->trace[i];
+-              seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
++              seq_printf(seq, "    [<%pP>] %pA\n", ptr, ptr);
+       }
+ }
+@@ -1959,7 +1959,7 @@ static int __init kmemleak_late_init(void)
+               return -ENOMEM;
+       }
+-      dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
++      dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
+                                    &kmemleak_fops);
+       if (!dentry)
+               pr_warn("Failed to create the debugfs kmemleak file\n");
+diff --git a/mm/maccess.c b/mm/maccess.c
+index 78f9274..5d8c2e02 100644
+--- a/mm/maccess.c
++++ b/mm/maccess.c
+@@ -28,12 +28,12 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
+       long ret;
+       mm_segment_t old_fs = get_fs();
+-      set_fs(KERNEL_DS);
+       pagefault_disable();
++      set_fs(KERNEL_DS);
+       ret = __copy_from_user_inatomic(dst,
+-                      (__force const void __user *)src, size);
+-      pagefault_enable();
++                      (const void __force_user *)src, size);
+       set_fs(old_fs);
++      pagefault_enable();
+       return ret ? -EFAULT : 0;
+ }
+@@ -56,11 +56,11 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
+       long ret;
+       mm_segment_t old_fs = get_fs();
+-      set_fs(KERNEL_DS);
+       pagefault_disable();
+-      ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+-      pagefault_enable();
++      set_fs(KERNEL_DS);
++      ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
+       set_fs(old_fs);
++      pagefault_enable();
+       return ret ? -EFAULT : 0;
+ }
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 93fb63e..0aa6448 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -56,6 +56,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
+       pgoff_t pgoff;
+       unsigned long new_flags = vma->vm_flags;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       switch (behavior) {
+       case MADV_NORMAL:
+               new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
+@@ -132,6 +136,13 @@ success:
+       /*
+        * vm_flags is protected by the mmap_sem held in write mode.
+        */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m)
++              vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
++#endif
++
+       vma->vm_flags = new_flags;
+ out:
+@@ -471,11 +482,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
+                            struct vm_area_struct **prev,
+                            unsigned long start, unsigned long end)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       *prev = vma;
+       if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
+               return -EINVAL;
+       zap_page_range(vma, start, end - start, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m) {
++              if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
++                      return -EINVAL;
++
++              zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
++      }
++#endif
++
+       return 0;
+ }
+@@ -702,6 +729,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+       if (end < start)
+               return error;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (end > SEGMEXEC_TASK_SIZE)
++                      return error;
++      } else
++#endif
++
++      if (end > TASK_SIZE)
++              return error;
++
+       error = 0;
+       if (end == start)
+               return error;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4be518d..450a2ae 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -702,7 +702,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
+                       mem_cgroup_update_tree(memcg, page);
+ #if MAX_NUMNODES > 1
+               if (unlikely(do_numainfo))
+-                      atomic_inc(&memcg->numainfo_events);
++                      atomic64_inc(&memcg->numainfo_events);
+ #endif
+       }
+ }
+@@ -1318,7 +1318,7 @@ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
+        * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
+        * pagein/pageout changes since the last update.
+        */
+-      if (!atomic_read(&memcg->numainfo_events))
++      if (!atomic64_read(&memcg->numainfo_events))
+               return;
+       if (atomic_inc_return(&memcg->numainfo_updating) > 1)
+               return;
+@@ -1332,7 +1332,7 @@ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
+                       node_clear(nid, memcg->scan_nodes);
+       }
+-      atomic_set(&memcg->numainfo_events, 0);
++      atomic64_set(&memcg->numainfo_events, 0);
+       atomic_set(&memcg->numainfo_updating, 0);
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index de88f33..f9d9816 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -64,7 +64,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
+ int sysctl_memory_failure_recovery __read_mostly = 1;
+-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
++atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
+ #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
+@@ -188,7 +188,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
+               pfn, t->comm, t->pid);
+       si.si_signo = SIGBUS;
+       si.si_errno = 0;
+-      si.si_addr = (void *)addr;
++      si.si_addr = (void __user *)addr;
+ #ifdef __ARCH_SI_TRAPNO
+       si.si_trapno = trapno;
+ #endif
+@@ -779,7 +779,7 @@ static struct page_state {
+       unsigned long res;
+       enum mf_action_page_type type;
+       int (*action)(struct page *p, unsigned long pfn);
+-} error_states[] = {
++} __do_const error_states[] = {
+       { reserved,     reserved,       MF_MSG_KERNEL,  me_kernel },
+       /*
+        * free pages are specially detected outside this table:
+diff --git a/mm/memory.c b/mm/memory.c
+index 793fe0f..6e94a87 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -427,6 +427,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+               free_pte_range(tlb, pmd, addr);
+       } while (pmd++, addr = next, addr != end);
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
+       start &= PUD_MASK;
+       if (start < floor)
+               return;
+@@ -442,6 +443,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+       pud_clear(pud);
+       pmd_free_tlb(tlb, pmd, start);
+       mm_dec_nr_pmds(tlb->mm);
++#endif
+ }
+ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -461,6 +463,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+               free_pmd_range(tlb, pud, addr, next, floor, ceiling);
+       } while (pud++, addr = next, addr != end);
++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
+       start &= PGDIR_MASK;
+       if (start < floor)
+               return;
+@@ -475,6 +478,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+       pud = pud_offset(pgd, start);
+       pgd_clear(pgd);
+       pud_free_tlb(tlb, pud, start);
++#endif
++
+ }
+ /*
+@@ -693,7 +698,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+       /*
+        * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+        */
+-      pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
++      pr_alert("file:%pD fault:%pX mmap:%pX readpage:%pX\n",
+                vma->vm_file,
+                vma->vm_ops ? vma->vm_ops->fault : NULL,
+                vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
+@@ -1464,6 +1469,10 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+       return NULL;
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
++#endif
++
+ /*
+  * This is the old fallback for page remapping.
+  *
+@@ -1497,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+       page_add_file_rmap(page, false);
+       set_pte_at(mm, addr, pte, mk_pte(page, prot));
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_mirror_file_pte(vma, addr, page, ptl);
++#endif
++
+       retval = 0;
+       pte_unmap_unlock(pte, ptl);
+       return retval;
+@@ -1541,9 +1554,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+       if (!page_count(page))
+               return -EINVAL;
+       if (!(vma->vm_flags & VM_MIXEDMAP)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              struct vm_area_struct *vma_m;
++#endif
++
+               BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+               BUG_ON(vma->vm_flags & VM_PFNMAP);
+               vma->vm_flags |= VM_MIXEDMAP;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              vma_m = pax_find_mirror_vma(vma);
++              if (vma_m)
++                      vma_m->vm_flags |= VM_MIXEDMAP;
++#endif
++
+       }
+       return insert_page(vma, addr, page, vma->vm_page_prot);
+ }
+@@ -1650,6 +1675,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+                       pfn_t pfn)
+ {
+       BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
++      BUG_ON(vma->vm_mirror);
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+@@ -1903,7 +1929,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+       BUG_ON(pud_huge(*pud));
+-      pmd = pmd_alloc(mm, pud, addr);
++      pmd = (mm == &init_mm) ?
++              pmd_alloc_kernel(mm, pud, addr) :
++              pmd_alloc(mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+@@ -1923,7 +1951,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+       unsigned long next;
+       int err;
+-      pud = pud_alloc(mm, pgd, addr);
++      pud = (mm == &init_mm) ?
++              pud_alloc_kernel(mm, pgd, addr) :
++              pud_alloc(mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+@@ -2119,6 +2149,185 @@ static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte,
+       return VM_FAULT_WRITE;
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      spinlock_t *ptl;
++      pte_t *pte, entry;
++
++      pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++      entry = *pte;
++      if (pte_none(entry))
++              ;
++      else if (!pte_present(entry)) {
++              swp_entry_t swapentry;
++
++              swapentry = pte_to_swp_entry(entry);
++              if (!non_swap_entry(swapentry))
++                      dec_mm_counter_fast(mm, MM_SWAPENTS);
++              else if (is_migration_entry(swapentry))
++                      dec_mm_counter_fast(mm, mm_counter(migration_entry_to_page(swapentry)));
++              free_swap_and_cache(swapentry);
++              pte_clear_not_present_full(mm, address, pte, 0);
++      } else {
++              struct page *page;
++
++              flush_cache_page(vma, address, pte_pfn(entry));
++              entry = ptep_clear_flush(vma, address, pte);
++              BUG_ON(pte_dirty(entry));
++              page = vm_normal_page(vma, address, entry);
++              if (page) {
++                      update_hiwater_rss(mm);
++                      dec_mm_counter_fast(mm, mm_counter(page));
++                      page_remove_rmap(page, false);
++                      put_page(page);
++              }
++      }
++      pte_unmap_unlock(pte, ptl);
++}
++
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * the ptl of the lower mapped page is held on entry and is not released on exit
++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static bool pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      unsigned long address_m;
++      spinlock_t *ptl_m;
++      struct vm_area_struct *vma_m;
++      pmd_t *pmd_m;
++      pte_t *pte_m, entry_m;
++
++      BUG_ON(!page_m || !PageAnon(page_m));
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return false;
++
++      BUG_ON(!PageLocked(page_m));
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++      pte_m = pte_offset_map(pmd_m, address_m);
++      ptl_m = pte_lockptr(mm, pmd_m);
++      if (ptl != ptl_m) {
++              spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++              if (!pte_none(*pte_m))
++                      goto out;
++      }
++
++      entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++      get_page(page_m);
++      page_add_anon_rmap(page_m, vma_m, address_m, false);
++      inc_mm_counter_fast(mm, MM_ANONPAGES);
++      set_pte_at(mm, address_m, pte_m, entry_m);
++      update_mmu_cache(vma_m, address_m, pte_m);
++out:
++      if (ptl != ptl_m)
++              spin_unlock(ptl_m);
++      pte_unmap(pte_m);
++      return true;
++}
++
++static void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      unsigned long address_m;
++      spinlock_t *ptl_m;
++      struct vm_area_struct *vma_m;
++      pmd_t *pmd_m;
++      pte_t *pte_m, entry_m;
++
++      BUG_ON(!page_m || PageAnon(page_m));
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return;
++
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++      pte_m = pte_offset_map(pmd_m, address_m);
++      ptl_m = pte_lockptr(mm, pmd_m);
++      if (ptl != ptl_m) {
++              spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++              if (!pte_none(*pte_m))
++                      goto out;
++      }
++
++      entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++      get_page(page_m);
++      page_add_file_rmap(page_m, false);
++      inc_mm_counter_fast(mm, mm_counter_file(page_m));
++      set_pte_at(mm, address_m, pte_m, entry_m);
++      update_mmu_cache(vma_m, address_m, pte_m);
++out:
++      if (ptl != ptl_m)
++              spin_unlock(ptl_m);
++      pte_unmap(pte_m);
++}
++
++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      unsigned long address_m;
++      spinlock_t *ptl_m;
++      struct vm_area_struct *vma_m;
++      pmd_t *pmd_m;
++      pte_t *pte_m, entry_m;
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (!vma_m)
++              return;
++
++      BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++      address_m = address + SEGMEXEC_TASK_SIZE;
++      pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++      pte_m = pte_offset_map(pmd_m, address_m);
++      ptl_m = pte_lockptr(mm, pmd_m);
++      if (ptl != ptl_m) {
++              spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++              if (!pte_none(*pte_m))
++                      goto out;
++      }
++
++      entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
++      set_pte_at(mm, address_m, pte_m, entry_m);
++out:
++      if (ptl != ptl_m)
++              spin_unlock(ptl_m);
++      pte_unmap(pte_m);
++}
++
++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
++{
++      struct page *page_m;
++      pte_t entry;
++
++      if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
++              return;
++
++      entry = *pte;
++      page_m  = vm_normal_page(vma, address, entry);
++      if (!page_m)
++              pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
++      else if (PageAnon(page_m)) {
++              if (pax_find_mirror_vma(vma)) {
++                      pte_unmap_unlock(pte, ptl);
++                      lock_page(page_m);
++                      pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
++                      if (pte_same(entry, *pte))
++                              pax_mirror_anon_pte(vma, address, page_m, ptl);
++                      unlock_page(page_m);
++              }
++      } else
++              pax_mirror_file_pte(vma, address, page_m, ptl);
++}
++#endif
++
+ /*
+  * Handle the case of a page which we actually need to copy to a new page.
+  *
+@@ -2174,6 +2383,12 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
+        */
+       fe->pte = pte_offset_map_lock(mm, fe->pmd, fe->address, &fe->ptl);
+       if (likely(pte_same(*fe->pte, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (pax_find_mirror_vma(vma))
++                      BUG_ON(!trylock_page(new_page));
++#endif
++
+               if (old_page) {
+                       if (!PageAnon(old_page)) {
+                               dec_mm_counter_fast(mm,
+@@ -2229,6 +2444,11 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
+                       page_remove_rmap(old_page, false);
+               }
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (pax_mirror_anon_pte(vma, fe->address, new_page, fe->ptl))
++                      unlock_page(new_page);
++#endif
++
+               /* Free the old page.. */
+               new_page = old_page;
+               page_copied = 1;
+@@ -2653,6 +2873,11 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
+       if (mem_cgroup_swap_full(page) ||
+           (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+               try_to_free_swap(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((fe->flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
++#endif
++
+       unlock_page(page);
+       if (page != swapcache) {
+               /*
+@@ -2676,6 +2901,12 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(vma, fe->address, fe->pte);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (pax_mirror_anon_pte(vma, fe->address, page, fe->ptl))
++              unlock_page(page);
++#endif
++
+ unlock:
+       pte_unmap_unlock(fe->pte, fe->ptl);
+ out:
+@@ -2695,40 +2926,6 @@ out_release:
+ }
+ /*
+- * This is like a special single-page "expand_{down|up}wards()",
+- * except we must first make sure that 'address{-|+}PAGE_SIZE'
+- * doesn't hit another vma.
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+-      address &= PAGE_MASK;
+-      if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+-              struct vm_area_struct *prev = vma->vm_prev;
+-
+-              /*
+-               * Is there a mapping abutting this one below?
+-               *
+-               * That's only ok if it's the same stack mapping
+-               * that has gotten split..
+-               */
+-              if (prev && prev->vm_end == address)
+-                      return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+-              return expand_downwards(vma, address - PAGE_SIZE);
+-      }
+-      if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+-              struct vm_area_struct *next = vma->vm_next;
+-
+-              /* As VM_GROWSDOWN but s/below/above/ */
+-              if (next && next->vm_start == address + PAGE_SIZE)
+-                      return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+-
+-              return expand_upwards(vma, address + PAGE_SIZE);
+-      }
+-      return 0;
+-}
+-
+-/*
+  * We enter with non-exclusive mmap_sem (to exclude vma changes,
+  * but allow concurrent faults), and pte mapped but not yet locked.
+  * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2737,17 +2934,13 @@ static int do_anonymous_page(struct fault_env *fe)
+ {
+       struct vm_area_struct *vma = fe->vma;
+       struct mem_cgroup *memcg;
+-      struct page *page;
++      struct page *page = NULL;
+       pte_t entry;
+       /* File mapping without ->vm_ops ? */
+       if (vma->vm_flags & VM_SHARED)
+               return VM_FAULT_SIGBUS;
+-      /* Check if we need to add a guard page to the stack */
+-      if (check_stack_guard_page(vma, fe->address) < 0)
+-              return VM_FAULT_SIGSEGV;
+-
+       /*
+        * Use pte_alloc() instead of pte_alloc_map().  We can't run
+        * pte_offset_map() on pmds where a huge pmd might be created
+@@ -2816,6 +3009,11 @@ static int do_anonymous_page(struct fault_env *fe)
+               return handle_userfault(fe, VM_UFFD_MISSING);
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (pax_find_mirror_vma(vma))
++              BUG_ON(!trylock_page(page));
++#endif
++
+       inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
+       page_add_new_anon_rmap(page, vma, fe->address, false);
+       mem_cgroup_commit_charge(page, memcg, false, false);
+@@ -2825,6 +3023,12 @@ setpte:
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(vma, fe->address, fe->pte);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (page && pax_mirror_anon_pte(vma, fe->address, page, fe->ptl))
++              unlock_page(page);
++#endif
++
+ unlock:
+       pte_unmap_unlock(fe->pte, fe->ptl);
+       return 0;
+@@ -3039,6 +3243,13 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
+       }
+       set_pte_at(vma->vm_mm, fe->address, fe->pte, entry);
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (write && !(vma->vm_flags & VM_SHARED))
++              pax_mirror_anon_pte(vma, fe->address, page, fe->ptl);
++      else
++              pax_mirror_file_pte(vma, fe->address, page, fe->ptl);
++#endif
++
+       /* no need to invalidate: a not-present page won't be cached */
+       update_mmu_cache(vma, fe->address, fe->pte);
+@@ -3552,6 +3763,11 @@ static int handle_pte_fault(struct fault_env *fe)
+               if (fe->flags & FAULT_FLAG_WRITE)
+                       flush_tlb_fix_spurious_fault(fe->vma, fe->address);
+       }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      pax_mirror_pte(fe->vma, fe->address, fe->pte, fe->pmd, fe->ptl);
++#endif
++
+ unlock:
+       pte_unmap_unlock(fe->pte, fe->ptl);
+       return 0;
+@@ -3575,14 +3791,49 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+       pgd_t *pgd;
+       pud_t *pud;
+-      pgd = pgd_offset(mm, address);
+-      pud = pud_alloc(mm, pgd, address);
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++
++      vma_m = pax_find_mirror_vma(vma);
++      if (vma_m) {
++              unsigned long address_m;
++              pgd_t *pgd_m;
++              pud_t *pud_m;
++              pmd_t *pmd_m;
++              pmd_t orig_pmd_m;
++
++              if (vma->vm_start > vma_m->vm_start) {
++                      address_m = address;
++                      fe.address -= SEGMEXEC_TASK_SIZE;
++                      fe.vma = vma_m;
++              } else
++                      address_m = address + SEGMEXEC_TASK_SIZE;
++
++              pgd_m = pgd_offset(mm, address_m);
++              pud_m = pud_alloc(mm, pgd_m, address_m);
++              if (!pud_m)
++                      return VM_FAULT_OOM;
++              pmd_m = pmd_alloc(mm, pud_m, address_m);
++              if (!pmd_m)
++                      return VM_FAULT_OOM;
++              BUG_ON(transparent_hugepage_enabled(vma_m));
++              orig_pmd_m = *pmd_m;
++              barrier();
++              BUG_ON(pmd_trans_huge(orig_pmd_m) || pmd_devmap(orig_pmd_m));
++              if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
++                      return VM_FAULT_OOM;
++              pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
++      }
++#endif
++
++      pgd = pgd_offset(mm, fe.address);
++      pud = pud_alloc(mm, pgd, fe.address);
+       if (!pud)
+               return VM_FAULT_OOM;
+-      fe.pmd = pmd_alloc(mm, pud, address);
++      fe.pmd = pmd_alloc(mm, pud, fe.address);
+       if (!fe.pmd)
+               return VM_FAULT_OOM;
+-      if (pmd_none(*fe.pmd) && transparent_hugepage_enabled(vma)) {
++      if (pmd_none(*fe.pmd) && transparent_hugepage_enabled(fe.vma)) {
+               int ret = create_huge_pmd(&fe);
+               if (!(ret & VM_FAULT_FALLBACK))
+                       return ret;
+@@ -3592,7 +3843,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+               barrier();
+               if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
+-                      if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
++                      if (pmd_protnone(orig_pmd) && vma_is_accessible(fe.vma))
+                               return do_huge_pmd_numa_page(&fe, orig_pmd);
+                       if ((fe.flags & FAULT_FLAG_WRITE) &&
+@@ -3667,7 +3918,7 @@ EXPORT_SYMBOL_GPL(handle_mm_fault);
+  * Allocate page upper directory.
+  * We've already handled the fast-path in-line.
+  */
+-int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++static int ____pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address, bool kernel)
+ {
+       pud_t *new = pud_alloc_one(mm, address);
+       if (!new)
+@@ -3678,11 +3929,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+       spin_lock(&mm->page_table_lock);
+       if (pgd_present(*pgd))          /* Another has populated it */
+               pud_free(mm, new);
++      else if (kernel)
++              pgd_populate_kernel(mm, pgd, new);
+       else
+               pgd_populate(mm, pgd, new);
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+ }
++
++int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++      return ____pud_alloc(mm, pgd, address, false);
++}
++
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++      return ____pud_alloc(mm, pgd, address, true);
++}
+ #endif /* __PAGETABLE_PUD_FOLDED */
+ #ifndef __PAGETABLE_PMD_FOLDED
+@@ -3690,7 +3953,7 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+  * Allocate page middle directory.
+  * We've already handled the fast-path in-line.
+  */
+-int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
++static int ____pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address, bool kernel)
+ {
+       pmd_t *new = pmd_alloc_one(mm, address);
+       if (!new)
+@@ -3702,19 +3965,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+ #ifndef __ARCH_HAS_4LEVEL_HACK
+       if (!pud_present(*pud)) {
+               mm_inc_nr_pmds(mm);
+-              pud_populate(mm, pud, new);
++              if (kernel)
++                      pud_populate_kernel(mm, pud, new);
++              else
++                      pud_populate(mm, pud, new);
+       } else  /* Another has populated it */
+               pmd_free(mm, new);
+ #else
+       if (!pgd_present(*pud)) {
+               mm_inc_nr_pmds(mm);
+-              pgd_populate(mm, pud, new);
++              if (kernel)
++                      pgd_populate_kernel(mm, pud, new);
++              else
++                      pgd_populate(mm, pud, new);
+       } else /* Another has populated it */
+               pmd_free(mm, new);
+ #endif /* __ARCH_HAS_4LEVEL_HACK */
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+ }
++
++int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++      return ____pmd_alloc(mm, pud, address, false);
++}
++
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++      return ____pmd_alloc(mm, pud, address, true);
++}
+ #endif /* __PAGETABLE_PMD_FOLDED */
+ static int __follow_pte(struct mm_struct *mm, unsigned long address,
+@@ -3824,8 +4103,8 @@ out:
+       return ret;
+ }
+-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+-                      void *buf, int len, int write)
++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
++                      void *buf, size_t len, int write)
+ {
+       resource_size_t phys_addr;
+       unsigned long prot = 0;
+@@ -3851,8 +4130,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+  * Access another process' address space as given in mm.  If non-NULL, use the
+  * given task for page fault accounting.
+  */
+-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+-              unsigned long addr, void *buf, int len, int write)
++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
++              unsigned long addr, void *buf, size_t len, int write)
+ {
+       struct vm_area_struct *vma;
+       void *old_buf = buf;
+@@ -3860,7 +4139,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+       down_read(&mm->mmap_sem);
+       /* ignore errors, just check how much was successfully transferred */
+       while (len) {
+-              int bytes, ret, offset;
++              ssize_t bytes, ret, offset;
+               void *maddr;
+               struct page *page = NULL;
+@@ -3921,8 +4200,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+  *
+  * The caller must hold a reference on @mm.
+  */
+-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+-              void *buf, int len, int write)
++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++              void *buf, size_t len, int write)
+ {
+       return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ }
+@@ -3932,11 +4211,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+  * Source/target buffer must be kernel space,
+  * Do not walk the page table directly, use get_user_pages
+  */
+-int access_process_vm(struct task_struct *tsk, unsigned long addr,
+-              void *buf, int len, int write)
++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
++              void *buf, size_t len, int write)
+ {
+       struct mm_struct *mm;
+-      int ret;
++      ssize_t ret;
+       mm = get_task_mm(tsk);
+       if (!mm)
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 2da72a5..845e125 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -732,6 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+       unsigned long vmstart;
+       unsigned long vmend;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m;
++#endif
++
+       vma = find_vma(mm, start);
+       if (!vma || vma->vm_start > start)
+               return -EFAULT;
+@@ -775,6 +779,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+               err = vma_replace_policy(vma, new_pol);
+               if (err)
+                       goto out;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              vma_m = pax_find_mirror_vma(vma);
++              if (vma_m) {
++                      err = vma_replace_policy(vma_m, new_pol);
++                      if (err)
++                              goto out;
++              }
++#endif
++
+       }
+  out:
+@@ -1190,6 +1204,17 @@ static long do_mbind(unsigned long start, unsigned long len,
+       if (end < start)
+               return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (end > SEGMEXEC_TASK_SIZE)
++                      return -EINVAL;
++      } else
++#endif
++
++      if (end > TASK_SIZE)
++              return -EINVAL;
++
+       if (end == start)
+               return 0;
+@@ -1415,8 +1440,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+        */
+       tcred = __task_cred(task);
+       if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
+-          !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
+-          !capable(CAP_SYS_NICE)) {
++          !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
+               rcu_read_unlock();
+               err = -EPERM;
+               goto out_put;
+@@ -1447,6 +1471,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+               goto out;
+       }
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      if (mm != current->mm &&
++          (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++              mmput(mm);
++              err = -EPERM;
++              goto out;
++      }
++#endif
++
+       err = do_migrate_pages(mm, old, new,
+               capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
+diff --git a/mm/migrate.c b/mm/migrate.c
+index f7ee04a..41da9dc 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1686,8 +1686,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+        */
+       tcred = __task_cred(task);
+       if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
+-          !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
+-          !capable(CAP_SYS_NICE)) {
++          !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
+               rcu_read_unlock();
+               err = -EPERM;
+               goto out;
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 14645be..e2c7aa1 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -14,6 +14,7 @@
+ #include <linux/pagevec.h>
+ #include <linux/mempolicy.h>
+ #include <linux/syscalls.h>
++#include <linux/security.h>
+ #include <linux/sched.h>
+ #include <linux/export.h>
+ #include <linux/rmap.h>
+@@ -573,7 +574,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
+ {
+       unsigned long nstart, end, tmp;
+       struct vm_area_struct * vma, * prev;
+-      int error;
++      int error = 0;
+       VM_BUG_ON(offset_in_page(start));
+       VM_BUG_ON(len != PAGE_ALIGN(len));
+@@ -582,6 +583,9 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
+               return -EINVAL;
+       if (end == start)
+               return 0;
++      if (end > TASK_SIZE)
++              return -EINVAL;
++
+       vma = find_vma(current->mm, start);
+       if (!vma || vma->vm_start > start)
+               return -ENOMEM;
+@@ -591,8 +595,14 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
+               prev = vma;
+       for (nstart = start ; ; ) {
+-              vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
++              vm_flags_t newflags;
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++                      break;
++#endif
++
++              newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
+               newflags |= flags;
+               /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
+@@ -641,6 +651,10 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
+       locked += current->mm->locked_vm;
+       /* check against resource limits */
++      if (locked > (ULONG_MAX >> PAGE_SHIFT))
++              gr_learn_resource(current, RLIMIT_MEMLOCK, ULONG_MAX, 1);
++      else
++              gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+       if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
+               error = apply_vma_lock_flags(start, len, flags);
+@@ -722,6 +736,11 @@ static int apply_mlockall_flags(int flags)
+       for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+               vm_flags_t newflags;
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++                      break;
++#endif
++
+               newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
+               newflags |= to_add;
+@@ -754,6 +773,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+               return -EINTR;
+       ret = -ENOMEM;
++      if (current->mm->total_vm > (ULONG_MAX >> PAGE_SHIFT))
++              gr_learn_resource(current, RLIMIT_MEMLOCK, ULONG_MAX, 1);
++      else
++              gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
+       if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
+           capable(CAP_IPC_LOCK))
+               ret = apply_mlockall_flags(flags);
+diff --git a/mm/mm_init.c b/mm/mm_init.c
+index 5b72266..dc04ce5 100644
+--- a/mm/mm_init.c
++++ b/mm/mm_init.c
+@@ -169,7 +169,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
+       return NOTIFY_OK;
+ }
+-static struct notifier_block compute_batch_nb __meminitdata = {
++static struct notifier_block compute_batch_nb = {
+       .notifier_call = mm_compute_batch_notifier,
+       .priority = IPC_CALLBACK_PRI, /* use lowest priority */
+ };
+diff --git a/mm/mmap.c b/mm/mmap.c
+index ca9d91b..b2438f1 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -44,6 +44,7 @@
+ #include <linux/userfaultfd_k.h>
+ #include <linux/moduleparam.h>
+ #include <linux/pkeys.h>
++#include <linux/random.h>
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -70,6 +71,16 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
+ static bool ignore_rlimit_data;
+ core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
++static inline void verify_mm_writelocked(struct mm_struct *mm)
++{
++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
++      if (unlikely(down_read_trylock(&mm->mmap_sem))) {
++              up_read(&mm->mmap_sem);
++              BUG();
++      }
++#endif
++}
++
+ static void unmap_region(struct mm_struct *mm,
+               struct vm_area_struct *vma, struct vm_area_struct *prev,
+               unsigned long start, unsigned long end);
+@@ -89,16 +100,25 @@ static void unmap_region(struct mm_struct *mm,
+  *            x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
+  *
+  */
+-pgprot_t protection_map[16] = {
++pgprot_t protection_map[16] __read_only = {
+       __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+       __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+-pgprot_t vm_get_page_prot(unsigned long vm_flags)
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
+ {
+-      return __pgprot(pgprot_val(protection_map[vm_flags &
++      pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+                       pgprot_val(arch_vm_get_page_prot(vm_flags)));
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++      if (!(__supported_pte_mask & _PAGE_NX) &&
++          (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
++          (vm_flags & (VM_READ | VM_WRITE)))
++              prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
++#endif
++
++      return prot;
+ }
+ EXPORT_SYMBOL(vm_get_page_prot);
+@@ -160,6 +180,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+       struct vm_area_struct *next = vma->vm_next;
+       might_sleep();
++      BUG_ON(vma->vm_mirror);
+       if (vma->vm_ops && vma->vm_ops->close)
+               vma->vm_ops->close(vma);
+       if (vma->vm_file)
+@@ -173,6 +194,7 @@ static int do_brk(unsigned long addr, unsigned long len);
+ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ {
++      unsigned long rlim;
+       unsigned long retval;
+       unsigned long newbrk, oldbrk;
+       struct mm_struct *mm = current->mm;
+@@ -204,7 +226,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+        * segment grow beyond its set limit the in case where the limit is
+        * not page aligned -Ram Gupta
+        */
+-      if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
++      rlim = rlimit(RLIMIT_DATA);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++      /* force a minimum 16MB brk heap on setuid/setgid binaries */
++      if (rlim < (4096 * PAGE_SIZE) && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
++              rlim = 4096 * PAGE_SIZE;
++#endif
++      if (check_data_rlimit(rlim, brk, mm->start_brk,
+                             mm->end_data, mm->start_data))
+               goto out;
+@@ -879,6 +907,12 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
+                    pgoff_t vm_pgoff,
+                    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
++              return 0;
++#endif
++
+       if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+               if (vma->vm_pgoff == vm_pgoff)
+@@ -900,6 +934,12 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+                   pgoff_t vm_pgoff,
+                   struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
++              return 0;
++#endif
++
+       if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+               pgoff_t vm_pglen;
+@@ -950,6 +990,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+       struct vm_area_struct *area, *next;
+       int err;
++#ifdef CONFIG_PAX_SEGMEXEC
++      unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
++      struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
++
++      BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
++#endif
++
+       /*
+        * We later require that vma->vm_flags == vm_flags,
+        * so this tests vma->vm_flags & VM_SPECIAL, too.
+@@ -965,6 +1012,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+       if (next && next->vm_end == end)                /* cases 6, 7, 8 */
+               next = next->vm_next;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (prev)
++              prev_m = pax_find_mirror_vma(prev);
++      if (area)
++              area_m = pax_find_mirror_vma(area);
++      if (next)
++              next_m = pax_find_mirror_vma(next);
++#endif
++
+       /*
+        * Can it merge with the predecessor?
+        */
+@@ -987,9 +1043,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+                                                       /* cases 1, 6 */
+                       err = vma_adjust(prev, prev->vm_start,
+                               next->vm_end, prev->vm_pgoff, NULL);
+-              } else                                  /* cases 2, 5, 7 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && prev_m)
++                              err = vma_adjust(prev_m, prev_m->vm_start,
++                                      next_m->vm_end, prev_m->vm_pgoff, NULL);
++#endif
++
++              } else {                                /* cases 2, 5, 7 */
+                       err = vma_adjust(prev, prev->vm_start,
+                               end, prev->vm_pgoff, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && prev_m)
++                              err = vma_adjust(prev_m, prev_m->vm_start,
++                                              end_m, prev_m->vm_pgoff, NULL);
++#endif
++
++              }
+               if (err)
+                       return NULL;
+               khugepaged_enter_vma_merge(prev, vm_flags);
+@@ -1004,12 +1075,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+                       can_vma_merge_before(next, vm_flags,
+                                            anon_vma, file, pgoff+pglen,
+                                            vm_userfaultfd_ctx)) {
+-              if (prev && addr < prev->vm_end)        /* case 4 */
++              if (prev && addr < prev->vm_end) {      /* case 4 */
+                       err = vma_adjust(prev, prev->vm_start,
+                               addr, prev->vm_pgoff, NULL);
+-              else                                    /* cases 3, 8 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && prev_m)
++                              err = vma_adjust(prev_m, prev_m->vm_start,
++                                              addr_m, prev_m->vm_pgoff, NULL);
++#endif
++
++              } else {                                /* cases 3, 8 */
+                       err = vma_adjust(area, addr, next->vm_end,
+                               next->vm_pgoff - pglen, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      if (!err && area_m)
++                              err = vma_adjust(area_m, addr_m, next_m->vm_end,
++                                              next_m->vm_pgoff - pglen, NULL);
++#endif
++
++              }
+               if (err)
+                       return NULL;
+               khugepaged_enter_vma_merge(area, vm_flags);
+@@ -1139,6 +1225,10 @@ static inline int mlock_future_check(struct mm_struct *mm,
+               locked += mm->locked_vm;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
+               lock_limit >>= PAGE_SHIFT;
++              if (locked > (ULONG_MAX >> PAGE_SHIFT))
++                      gr_learn_resource(current, RLIMIT_MEMLOCK, ULONG_MAX, 1);
++              else
++                      gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+                       return -EAGAIN;
+       }
+@@ -1167,7 +1257,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+        * (the exception is when the underlying filesystem is noexec
+        *  mounted, in which case we dont add PROT_EXEC.)
+        */
+-      if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++      if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+               if (!(file && path_noexec(&file->f_path)))
+                       prot |= PROT_EXEC;
+@@ -1190,7 +1280,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+       /* Obtain the address to map to. we verify (or select) it and ensure
+        * that it represents a valid section of the address space.
+        */
+-      addr = get_unmapped_area(file, addr, len, pgoff, flags);
++      addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
+       if (offset_in_page(addr))
+               return addr;
+@@ -1207,6 +1297,43 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+       vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
+                       mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
++#ifdef CONFIG_PAX_MPROTECT
++      if (mm->pax_flags & MF_PAX_MPROTECT) {
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++              if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
++                  mm->binfmt->handle_mmap)
++                      mm->binfmt->handle_mmap(file);
++#endif
++
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
++                      gr_log_rwxmmap(file);
++
++#ifdef CONFIG_PAX_EMUPLT
++                      vm_flags &= ~VM_EXEC;
++#else
++                      return -EPERM;
++#endif
++
++              }
++
++              if (!(vm_flags & VM_EXEC))
++                      vm_flags &= ~VM_MAYEXEC;
++#else
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++                      vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++              else
++                      vm_flags &= ~VM_MAYWRITE;
++      }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++      if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
++              vm_flags &= ~VM_PAGEEXEC;
++#endif
++
+       if (flags & MAP_LOCKED)
+               if (!can_do_mlock())
+                       return -EPERM;
+@@ -1294,6 +1421,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+                       vm_flags |= VM_NORESERVE;
+       }
++      if (!gr_acl_handle_mmap(file, prot))
++              return -EACCES;
++      
+       addr = mmap_region(file, addr, len, vm_flags, pgoff);
+       if (!IS_ERR_VALUE(addr) &&
+           ((vm_flags & VM_LOCKED) ||
+@@ -1387,7 +1517,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+       const struct vm_operations_struct *vm_ops = vma->vm_ops;
+       /* If it was private or non-writable, the write bit is already clear */
+-      if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
++      if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
+               return 0;
+       /* The backer wishes to know when pages are first written to? */
+@@ -1438,7 +1568,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+       struct rb_node **rb_link, *rb_parent;
+       unsigned long charged = 0;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m = NULL;
++#endif
++
++      /*
++       * mm->mmap_sem is required to protect against another thread
++       * changing the mappings in case we sleep.
++       */
++      verify_mm_writelocked(mm);
++
+       /* Check against address space limit. */
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
++
+       if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
+               unsigned long nr_pages;
+@@ -1458,6 +1603,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+                             &rb_parent)) {
+               if (do_munmap(mm, addr, len))
+                       return -ENOMEM;
++              BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
+       }
+       /*
+@@ -1489,6 +1635,16 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+               goto unacct_error;
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
++              vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++              if (!vma_m) {
++                      error = -ENOMEM;
++                      goto free_vma;
++              }
++      }
++#endif
++
+       vma->vm_mm = mm;
+       vma->vm_start = addr;
+       vma->vm_end = addr + len;
+@@ -1519,6 +1675,13 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+               if (error)
+                       goto unmap_and_free_vma;
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++              if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
++                      vma->vm_flags |= VM_PAGEEXEC;
++                      vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++              }
++#endif
++
+               /* Can addr have changed??
+                *
+                * Answer: Yes, several device drivers can do it in their
+@@ -1537,6 +1700,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+       }
+       vma_link(mm, vma, prev, rb_link, rb_parent);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m)
++              BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+       /* Once vma denies write, undo our temporary denial count */
+       if (file) {
+               if (vm_flags & VM_SHARED)
+@@ -1549,6 +1718,7 @@ out:
+       perf_event_mmap(vma);
+       vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
++      track_exec_limit(mm, addr, addr + len, vm_flags);
+       if (vm_flags & VM_LOCKED) {
+               if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
+                                       vma == get_gate_vma(current->mm)))
+@@ -1586,6 +1756,12 @@ allow_write_and_free_vma:
+       if (vm_flags & VM_DENYWRITE)
+               allow_write_access(file);
+ free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m)
++              kmem_cache_free(vm_area_cachep, vma_m);
++#endif
++
+       kmem_cache_free(vm_area_cachep, vma);
+ unacct_error:
+       if (charged)
+@@ -1593,7 +1769,54 @@ unacct_error:
+       return error;
+ }
+-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++      if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
++              return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
++
++      return 0;
++}
++#endif
++
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
++{
++      if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++              if (addr > sysctl_heap_stack_gap)
++                      vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
++              else
++                      vma = find_vma(current->mm, 0);
++              if (vma && (vma->vm_flags & VM_GROWSUP))
++                      return false;
++#endif
++              return true;
++      }
++
++      if (addr + len > vma->vm_start)
++              return false;
++
++      if (vma->vm_flags & VM_GROWSDOWN)
++              return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++      else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
++              return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
++#endif
++      else if (offset)
++              return offset <= vma->vm_start - addr - len;
++
++      return true;
++}
++
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long flag, unsigned long gap_start, unsigned long gap_end)
++{
++      if (!vma || !(vma->vm_flags & flag))
++              return 0;
++
++      return min(sysctl_heap_stack_gap, gap_end - gap_start);
++}
++
++unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
+ {
+       /*
+        * We implement the search by looking for an rbtree node that
+@@ -1646,6 +1869,15 @@ check_current:
+               /* Check if current node has a suitable gap */
+               if (gap_start > high_limit)
+                       return -ENOMEM;
++
++              gap_start += skip_heap_stack_gap(vma->vm_prev, VM_GROWSUP, gap_start, gap_end);
++              gap_end -= skip_heap_stack_gap(vma, VM_GROWSDOWN, gap_start, gap_end);
++
++              if (gap_end - gap_start > info->threadstack_offset)
++                      gap_start += info->threadstack_offset;
++              else
++                      gap_start = gap_end;
++
+               if (gap_end >= low_limit && gap_end - gap_start >= length)
+                       goto found;
+@@ -1695,7 +1927,7 @@ found:
+       return gap_start;
+ }
+-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
++unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
+ {
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+@@ -1749,6 +1981,15 @@ check_current:
+               gap_end = vma->vm_start;
+               if (gap_end < low_limit)
+                       return -ENOMEM;
++
++              gap_start += skip_heap_stack_gap(vma->vm_prev, VM_GROWSUP, gap_start, gap_end);
++              gap_end -= skip_heap_stack_gap(vma, VM_GROWSDOWN, gap_start, gap_end);
++
++              if (gap_end - gap_start > info->threadstack_offset)
++                      gap_end -= info->threadstack_offset;
++              else
++                      gap_end = gap_start;
++
+               if (gap_start <= high_limit && gap_end - gap_start >= length)
+                       goto found;
+@@ -1812,6 +2053,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       if (len > TASK_SIZE - mmap_min_addr)
+               return -ENOMEM;
+@@ -1819,11 +2061,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       if (flags & MAP_FIXED)
+               return addr;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+-                  (!vma || addr + len <= vma->vm_start))
++                  check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -1832,6 +2078,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       info.low_limit = mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = 0;
++      info.threadstack_offset = offset;
+       return vm_unmapped_area(&info);
+ }
+ #endif
+@@ -1850,6 +2097,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       struct vm_unmapped_area_info info;
++      unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+       /* requested length too big for entire address space */
+       if (len > TASK_SIZE - mmap_min_addr)
+@@ -1858,12 +2106,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       if (flags & MAP_FIXED)
+               return addr;
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+       /* requesting a specific address */
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+-                              (!vma || addr + len <= vma->vm_start))
++                              check_heap_stack_gap(vma, addr, len, offset))
+                       return addr;
+       }
+@@ -1872,6 +2124,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+       info.high_limit = mm->mmap_base;
+       info.align_mask = 0;
++      info.threadstack_offset = offset;
+       addr = vm_unmapped_area(&info);
+       /*
+@@ -1884,6 +2137,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++              if (mm->pax_flags & MF_PAX_RANDMMAP)
++                      info.low_limit += mm->delta_mmap;
++#endif
++
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
+@@ -1993,6 +2252,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+       return vma;
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
++{
++      struct vm_area_struct *vma_m;
++
++      BUG_ON(!vma || vma->vm_start >= vma->vm_end);
++      if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
++              BUG_ON(vma->vm_mirror);
++              return NULL;
++      }
++      BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
++      vma_m = vma->vm_mirror;
++      BUG_ON(!vma_m || vma_m->vm_mirror != vma);
++      BUG_ON(vma->vm_file != vma_m->vm_file);
++      BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
++      BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
++      BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
++      BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
++      return vma_m;
++}
++#endif
++
+ /*
+  * Verify that the stack growth is acceptable and
+  * update accounting. This is shared with both the
+@@ -2010,8 +2291,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+       /* Stack limit test */
+       actual_size = size;
+-      if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+-              actual_size -= PAGE_SIZE;
++      gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
+       if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+               return -ENOMEM;
+@@ -2022,6 +2302,10 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+               locked = mm->locked_vm + grow;
+               limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
+               limit >>= PAGE_SHIFT;
++              if (locked > (ULONG_MAX >> PAGE_SHIFT))
++                      gr_learn_resource(current, RLIMIT_MEMLOCK, ULONG_MAX, 1);
++              else
++                      gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+               if (locked > limit && !capable(CAP_IPC_LOCK))
+                       return -ENOMEM;
+       }
+@@ -2047,17 +2331,21 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+  * vma is the last one with address > vma->vm_end.  Have to extend vma.
+  */
++#ifndef CONFIG_IA64
++static
++#endif
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+       struct mm_struct *mm = vma->vm_mm;
+       int error = 0;
++      bool locknext;
+       if (!(vma->vm_flags & VM_GROWSUP))
+               return -EFAULT;
+       /* Guard against wrapping around to address 0. */
+-      if (address < PAGE_ALIGN(address+4))
+-              address = PAGE_ALIGN(address+4);
++      if (address < PAGE_ALIGN(address+1))
++              address = PAGE_ALIGN(address+1);
+       else
+               return -ENOMEM;
+@@ -2065,15 +2353,24 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+       if (unlikely(anon_vma_prepare(vma)))
+               return -ENOMEM;
++      locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
++      if (locknext && anon_vma_prepare(vma->vm_next))
++              return -ENOMEM;
++
+       /*
+        * vma->vm_start/vm_end cannot change under us because the caller
+        * is required to hold the mmap_sem in read mode.  We need the
+-       * anon_vma lock to serialize against concurrent expand_stacks.
++       * anon_vma locks to serialize against concurrent expand_stacks
++       * and expand_upwards.
+        */
+       anon_vma_lock_write(vma->anon_vma);
++      if (locknext)
++              anon_vma_lock_write(vma->vma_next->anon_vma);
+       /* Somebody else might have raced and expanded it already */
+-      if (address > vma->vm_end) {
++      if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++              error = -ENOMEM;
++      else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
+               unsigned long size, grow;
+               size = address - vma->vm_start;
+@@ -2111,6 +2408,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+                       }
+               }
+       }
++      if (locknext)
++              anon_vma_unlock_write(vma->vm_next->anon_vma);
+       anon_vma_unlock_write(vma->anon_vma);
+       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       validate_mm(mm);
+@@ -2126,6 +2425,8 @@ int expand_downwards(struct vm_area_struct *vma,
+ {
+       struct mm_struct *mm = vma->vm_mm;
+       int error;
++      bool lockprev = false;
++      struct vm_area_struct *prev;
+       address &= PAGE_MASK;
+       error = security_mmap_addr(address);
+@@ -2136,6 +2437,15 @@ int expand_downwards(struct vm_area_struct *vma,
+       if (unlikely(anon_vma_prepare(vma)))
+               return -ENOMEM;
++      prev = vma->vm_prev;
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
++      lockprev = prev && (prev->vm_flags & VM_GROWSUP);
++#endif
++      if (lockprev && anon_vma_prepare(prev))
++              return -ENOMEM;
++      if (lockprev)
++              anon_vma_lock_write(prev->anon_vma);
++
+       /*
+        * vma->vm_start/vm_end cannot change under us because the caller
+        * is required to hold the mmap_sem in read mode.  We need the
+@@ -2144,9 +2454,17 @@ int expand_downwards(struct vm_area_struct *vma,
+       anon_vma_lock_write(vma->anon_vma);
+       /* Somebody else might have raced and expanded it already */
+-      if (address < vma->vm_start) {
++      if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++              error = -ENOMEM;
++      else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
+               unsigned long size, grow;
++#ifdef CONFIG_PAX_SEGMEXEC
++              struct vm_area_struct *vma_m;
++
++              vma_m = pax_find_mirror_vma(vma);
++#endif
++
+               size = vma->vm_end - address;
+               grow = (vma->vm_start - address) >> PAGE_SHIFT;
+@@ -2174,13 +2492,27 @@ int expand_downwards(struct vm_area_struct *vma,
+                               vma->vm_pgoff -= grow;
+                               anon_vma_interval_tree_post_update_vma(vma);
+                               vma_gap_update(vma);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                              if (vma_m) {
++                                      anon_vma_interval_tree_pre_update_vma(vma_m);
++                                      vma_m->vm_start -= grow << PAGE_SHIFT;
++                                      vma_m->vm_pgoff -= grow;
++                                      anon_vma_interval_tree_post_update_vma(vma_m);
++                                      vma_gap_update(vma_m);
++                              }
++#endif
++
+                               spin_unlock(&mm->page_table_lock);
++                              track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
+                               perf_event_mmap(vma);
+                       }
+               }
+       }
+       anon_vma_unlock_write(vma->anon_vma);
++      if (lockprev)
++              anon_vma_unlock_write(prev->anon_vma);
+       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       validate_mm(mm);
+       return error;
+@@ -2280,6 +2612,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+       do {
+               long nrpages = vma_pages(vma);
++#ifdef CONFIG_PAX_SEGMEXEC
++              if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
++                      vma = remove_vma(vma);
++                      continue;
++              }
++#endif
++
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += nrpages;
+               vm_stat_account(mm, vma->vm_flags, -nrpages);
+@@ -2324,6 +2663,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+       insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+       vma->vm_prev = NULL;
+       do {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (vma->vm_mirror) {
++                      BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
++                      vma->vm_mirror->vm_mirror = NULL;
++                      vma->vm_mirror->vm_flags &= ~VM_EXEC;
++                      vma->vm_mirror = NULL;
++              }
++#endif
++
+               vma_rb_erase(vma, &mm->mm_rb);
+               mm->map_count--;
+               tail_vma = vma;
+@@ -2351,14 +2700,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+       struct vm_area_struct *new;
+       int err;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m, *new_m = NULL;
++      unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
++#endif
++
+       if (is_vm_hugetlb_page(vma) && (addr &
+                                       ~(huge_page_mask(hstate_vma(vma)))))
+               return -EINVAL;
++#ifdef CONFIG_PAX_SEGMEXEC
++      vma_m = pax_find_mirror_vma(vma);
++#endif
++
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       if (!new)
+               return -ENOMEM;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m) {
++              new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++              if (!new_m) {
++                      kmem_cache_free(vm_area_cachep, new);
++                      return -ENOMEM;
++              }
++      }
++#endif
++
+       /* most fields are the same, copy all, and then fixup */
+       *new = *vma;
+@@ -2371,6 +2739,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+               new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m) {
++              *new_m = *vma_m;
++              INIT_LIST_HEAD(&new_m->anon_vma_chain);
++              new_m->vm_mirror = new;
++              new->vm_mirror = new_m;
++
++              if (new_below)
++                      new_m->vm_end = addr_m;
++              else {
++                      new_m->vm_start = addr_m;
++                      new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
++              }
++      }
++#endif
++
+       err = vma_dup_policy(vma, new);
+       if (err)
+               goto out_free_vma;
+@@ -2391,6 +2775,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+       else
+               err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (!err && vma_m) {
++              struct mempolicy *pol = vma_policy(new);
++
++              if (anon_vma_clone(new_m, vma_m))
++                      goto out_free_mpol;
++
++              mpol_get(pol);
++              set_vma_policy(new_m, pol);
++
++              if (new_m->vm_file)
++                      get_file(new_m->vm_file);
++
++              if (new_m->vm_ops && new_m->vm_ops->open)
++                      new_m->vm_ops->open(new_m);
++
++              if (new_below)
++                      err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
++                              ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
++              else
++                      err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
++
++              if (err) {
++                      if (new_m->vm_ops && new_m->vm_ops->close)
++                              new_m->vm_ops->close(new_m);
++                      if (new_m->vm_file)
++                              fput(new_m->vm_file);
++                      mpol_put(pol);
++              }
++      }
++#endif
++
+       /* Success. */
+       if (!err)
+               return 0;
+@@ -2400,10 +2816,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+               new->vm_ops->close(new);
+       if (new->vm_file)
+               fput(new->vm_file);
+-      unlink_anon_vmas(new);
+  out_free_mpol:
+       mpol_put(vma_policy(new));
+  out_free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (new_m) {
++              unlink_anon_vmas(new_m);
++              kmem_cache_free(vm_area_cachep, new_m);
++      }
++#endif
++
++      unlink_anon_vmas(new);
+       kmem_cache_free(vm_area_cachep, new);
+       return err;
+ }
+@@ -2415,6 +2839,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+             unsigned long addr, int new_below)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++              BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
++              if (mm->map_count >= sysctl_max_map_count-1)
++                      return -ENOMEM;
++      } else
++#endif
++
+       if (mm->map_count >= sysctl_max_map_count)
+               return -ENOMEM;
+@@ -2426,11 +2859,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+  * work.  This now handles partial unmappings.
+  * Jeremy Fitzhardinge <jeremy@goop.org>
+  */
++#ifdef CONFIG_PAX_SEGMEXEC
+ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+ {
++      int ret = __do_munmap(mm, start, len);
++      if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
++              return ret;
++
++      return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
++}
++
++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#else
++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#endif
++{
+       unsigned long end;
+       struct vm_area_struct *vma, *prev, *last;
++      /*
++       * mm->mmap_sem is required to protect against another thread
++       * changing the mappings in case we sleep.
++       */
++      verify_mm_writelocked(mm);
++
+       if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
+               return -EINVAL;
+@@ -2508,6 +2960,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+       /* Fix up all other VM information */
+       remove_vma_list(mm, vma);
++      track_exec_limit(mm, start, end, 0UL);
++
+       return 0;
+ }
+@@ -2516,6 +2970,12 @@ int vm_munmap(unsigned long start, size_t len)
+       int ret;
+       struct mm_struct *mm = current->mm;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
++          (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
++              return -EINVAL;
++#endif
++
+       if (down_write_killable(&mm->mmap_sem))
+               return -EINTR;
+@@ -2572,6 +3032,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+       vma = find_vma(mm, start);
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
++              goto out;
++#endif
++
+       if (!vma || !(vma->vm_flags & VM_SHARED))
+               goto out;
+@@ -2638,16 +3103,6 @@ out:
+       return ret;
+ }
+-static inline void verify_mm_writelocked(struct mm_struct *mm)
+-{
+-#ifdef CONFIG_DEBUG_VM
+-      if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+-              WARN_ON(1);
+-              up_read(&mm->mmap_sem);
+-      }
+-#endif
+-}
+-
+ /*
+  *  this is really a simplified "do_mmap".  it only handles
+  *  anonymous maps.  eventually we may be able to do some
+@@ -2661,6 +3116,7 @@ static int do_brk(unsigned long addr, unsigned long request)
+       struct rb_node **rb_link, *rb_parent;
+       pgoff_t pgoff = addr >> PAGE_SHIFT;
+       int error;
++      unsigned long charged;
+       len = PAGE_ALIGN(request);
+       if (len < request)
+@@ -2670,10 +3126,24 @@ static int do_brk(unsigned long addr, unsigned long request)
+       flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++      if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++              flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++              if (mm->pax_flags & MF_PAX_MPROTECT)
++                      flags &= ~VM_MAYEXEC;
++#endif
++
++      }
++#endif
++
+       error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+       if (offset_in_page(error))
+               return error;
++      charged = len >> PAGE_SHIFT;
++
+       error = mlock_future_check(mm, mm->def_flags, len);
+       if (error)
+               return error;
+@@ -2691,16 +3161,17 @@ static int do_brk(unsigned long addr, unsigned long request)
+                             &rb_parent)) {
+               if (do_munmap(mm, addr, len))
+                       return -ENOMEM;
++              BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
+       }
+       /* Check against address space limits *after* clearing old maps... */
+-      if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
++      if (!may_expand_vm(mm, flags, charged))
+               return -ENOMEM;
+       if (mm->map_count > sysctl_max_map_count)
+               return -ENOMEM;
+-      if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
++      if (security_vm_enough_memory_mm(mm, charged))
+               return -ENOMEM;
+       /* Can we just expand an old private anonymous mapping? */
+@@ -2714,7 +3185,7 @@ static int do_brk(unsigned long addr, unsigned long request)
+        */
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       if (!vma) {
+-              vm_unacct_memory(len >> PAGE_SHIFT);
++              vm_unacct_memory(charged);
+               return -ENOMEM;
+       }
+@@ -2728,11 +3199,12 @@ static int do_brk(unsigned long addr, unsigned long request)
+       vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+       perf_event_mmap(vma);
+-      mm->total_vm += len >> PAGE_SHIFT;
+-      mm->data_vm += len >> PAGE_SHIFT;
++      mm->total_vm += charged;
++      mm->data_vm += charged;
+       if (flags & VM_LOCKED)
+-              mm->locked_vm += (len >> PAGE_SHIFT);
++              mm->locked_vm += charged;
+       vma->vm_flags |= VM_SOFTDIRTY;
++      track_exec_limit(mm, addr, addr + len, flags);
+       return 0;
+ }
+@@ -2796,6 +3268,7 @@ void exit_mmap(struct mm_struct *mm)
+       while (vma) {
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += vma_pages(vma);
++              vma->vm_mirror = NULL;
+               vma = remove_vma(vma);
+       }
+       vm_unacct_memory(nr_accounted);
+@@ -2810,6 +3283,10 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+       struct vm_area_struct *prev;
+       struct rb_node **rb_link, *rb_parent;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m = NULL;
++#endif
++
+       if (find_vma_links(mm, vma->vm_start, vma->vm_end,
+                          &prev, &rb_link, &rb_parent))
+               return -ENOMEM;
+@@ -2817,6 +3294,9 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+            security_vm_enough_memory_mm(mm, vma_pages(vma)))
+               return -ENOMEM;
++      if (security_mmap_addr(vma->vm_start))
++              return -EPERM;
++
+       /*
+        * The vm_pgoff of a purely anonymous vma should be irrelevant
+        * until its first write fault, when page's anon_vma and index
+@@ -2834,7 +3314,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+               vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
++              vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++              if (!vma_m)
++                      return -ENOMEM;
++      }
++#endif
++
+       vma_link(mm, vma, prev, rb_link, rb_parent);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (vma_m)
++              BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+       return 0;
+ }
+@@ -2853,6 +3347,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+       struct rb_node **rb_link, *rb_parent;
+       bool faulted_in_anon_vma = true;
++      BUG_ON(vma->vm_mirror);
++
+       /*
+        * If anonymous vma has not yet been faulted, update new pgoff
+        * to match new location, to increase its chance of merging.
+@@ -2919,27 +3415,70 @@ out:
+       return NULL;
+ }
++#ifdef CONFIG_PAX_SEGMEXEC
++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
++{
++      struct vm_area_struct *prev_m;
++      struct rb_node **rb_link_m, *rb_parent_m;
++      struct mempolicy *pol_m;
++
++      BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
++      BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
++      BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
++      *vma_m = *vma;
++      INIT_LIST_HEAD(&vma_m->anon_vma_chain);
++      if (anon_vma_clone(vma_m, vma))
++              return -ENOMEM;
++      pol_m = vma_policy(vma_m);
++      mpol_get(pol_m);
++      set_vma_policy(vma_m, pol_m);
++      vma_m->vm_start += SEGMEXEC_TASK_SIZE;
++      vma_m->vm_end += SEGMEXEC_TASK_SIZE;
++      vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
++      vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
++      if (vma_m->vm_file)
++              get_file(vma_m->vm_file);
++      if (vma_m->vm_ops && vma_m->vm_ops->open)
++              vma_m->vm_ops->open(vma_m);
++      BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
++      vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
++      vma_m->vm_mirror = vma;
++      vma->vm_mirror = vma_m;
++      return 0;
++}
++#endif
++
+ /*
+  * Return true if the calling process may expand its vm space by the passed
+  * number of pages
+  */
+ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
+ {
++      if ((mm->total_vm + npages) > (ULONG_MAX >> PAGE_SHIFT))
++              gr_learn_resource(current, RLIMIT_AS, ULONG_MAX, 1);
++      else
++              gr_learn_resource(current, RLIMIT_AS, (mm->total_vm + npages) << PAGE_SHIFT, 1);
++
+       if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
+               return false;
+-      if (is_data_mapping(flags) &&
+-          mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
+-              /* Workaround for Valgrind */
+-              if (rlimit(RLIMIT_DATA) == 0 &&
+-                  mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
+-                      return true;
+-              if (!ignore_rlimit_data) {
+-                      pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits or use boot option ignore_rlimit_data.\n",
+-                                   current->comm, current->pid,
+-                                   (mm->data_vm + npages) << PAGE_SHIFT,
+-                                   rlimit(RLIMIT_DATA));
+-                      return false;
++      if (is_data_mapping(flags)) {
++              if ((mm->data_vm + npages) > (ULONG_MAX >> PAGE_SHIFT))
++                      gr_learn_resource(current, RLIMIT_DATA, ULONG_MAX, 1);
++              else
++                      gr_learn_resource(current, RLIMIT_DATA, (mm->data_vm + npages) << PAGE_SHIFT, 1);
++              if (mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
++                      /* Workaround for Valgrind */
++                      if (rlimit(RLIMIT_DATA) == 0 &&
++                          mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
++                              return true;
++                      if (!ignore_rlimit_data) {
++                              pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits or use boot option ignore_rlimit_data.\n",
++                                           current->comm, current->pid,
++                                           (mm->data_vm + npages) << PAGE_SHIFT,
++                                           rlimit(RLIMIT_DATA));
++                              return false;
++                      }
+               }
+       }
+@@ -2948,6 +3487,11 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
+ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
+ {
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
++
+       mm->total_vm += npages;
+       if (is_exec_mapping(flags))
+@@ -3042,6 +3586,22 @@ static struct vm_area_struct *__install_special_mapping(
+       vma->vm_start = addr;
+       vma->vm_end = addr + len;
++#ifdef CONFIG_PAX_MPROTECT
++      if (mm->pax_flags & MF_PAX_MPROTECT) {
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
++                      return ERR_PTR(-EPERM);
++              if (!(vm_flags & VM_EXEC))
++                      vm_flags &= ~VM_MAYEXEC;
++#else
++              if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++                      vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++              else
++                      vm_flags &= ~VM_MAYWRITE;
++      }
++#endif
++
+       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index a4830f0..0675c13 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -25,10 +25,18 @@
+ #include <linux/perf_event.h>
+ #include <linux/ksm.h>
+ #include <linux/pkeys.h>
++#include <linux/sched/sysctl.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#include <linux/binfmts.h>
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+ #include "internal.h"
+@@ -258,6 +266,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+       return pages;
+ }
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++/* called while holding the mmap semaphor for writing except stack expansion */
++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
++{
++      unsigned long oldlimit, newlimit = 0UL;
++
++      if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
++              return;
++
++      spin_lock(&mm->page_table_lock);
++      oldlimit = mm->context.user_cs_limit;
++      if ((prot & VM_EXEC) && oldlimit < end)
++              /* USER_CS limit moved up */
++              newlimit = end;
++      else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
++              /* USER_CS limit moved down */
++              newlimit = start;
++
++      if (newlimit) {
++              mm->context.user_cs_limit = newlimit;
++
++#ifdef CONFIG_SMP
++              wmb();
++              cpumask_clear(&mm->context.cpu_user_cs_mask);
++              cpumask_set_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask);
++#endif
++
++              set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
++      }
++      spin_unlock(&mm->page_table_lock);
++      if (newlimit == end) {
++              struct vm_area_struct *vma = find_vma(mm, oldlimit);
++
++              for (; vma && vma->vm_start < end; vma = vma->vm_next)
++                      if (is_vm_hugetlb_page(vma))
++                              hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
++                      else
++                              change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
++      }
++}
++#endif
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+       unsigned long start, unsigned long end, unsigned long newflags)
+@@ -270,11 +320,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+       int error;
+       int dirty_accountable = 0;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct vm_area_struct *vma_m = NULL;
++      unsigned long start_m, end_m;
++
++      start_m = start + SEGMEXEC_TASK_SIZE;
++      end_m = end + SEGMEXEC_TASK_SIZE;
++#endif
++
+       if (newflags == oldflags) {
+               *pprev = vma;
+               return 0;
+       }
++      if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++              struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++              if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++                      return -ENOMEM;
++
++              if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++                      return -ENOMEM;
++      }
++
+       /*
+        * If we make a private mapping writable we increase our commit;
+        * but (without finer accounting) cannot reduce our commit if we
+@@ -295,6 +363,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+               }
+       }
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
++              if (start != vma->vm_start) {
++                      error = split_vma(mm, vma, start, 1);
++                      if (error)
++                              goto fail;
++                      BUG_ON(!*pprev || (*pprev)->vm_next == vma);
++                      *pprev = (*pprev)->vm_next;
++              }
++
++              if (end != vma->vm_end) {
++                      error = split_vma(mm, vma, end, 0);
++                      if (error)
++                              goto fail;
++              }
++
++              if (pax_find_mirror_vma(vma)) {
++                      error = __do_munmap(mm, start_m, end_m - start_m);
++                      if (error)
++                              goto fail;
++              } else {
++                      vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++                      if (!vma_m) {
++                              error = -ENOMEM;
++                              goto fail;
++                      }
++                      vma->vm_flags = newflags;
++                      error = pax_mirror_vma(vma_m, vma);
++                      if (error) {
++                              vma->vm_flags = oldflags;
++                              goto fail;
++                      }
++              }
++      }
++#endif
++
+       /*
+        * First try to merge with previous and/or next vma.
+        */
+@@ -326,7 +430,19 @@ success:
+        * vm_flags and vm_page_prot are protected by the mmap_sem
+        * held in write mode.
+        */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
++              pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
++#endif
++
+       vma->vm_flags = newflags;
++
++#ifdef CONFIG_PAX_MPROTECT
++      if (mm->binfmt && mm->binfmt->handle_mprotect)
++              mm->binfmt->handle_mprotect(vma, newflags);
++#endif
++
+       dirty_accountable = vma_wants_writenotify(vma);
+       vma_set_page_prot(vma);
+@@ -360,7 +476,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+       int error = -EINVAL;
+       const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
+       const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
+-                              (prot & PROT_READ);
++                              (prot & (PROT_READ | PROT_WRITE));
+       prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
+       if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
+@@ -374,6 +490,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+       end = start + len;
+       if (end <= start)
+               return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++              if (end > SEGMEXEC_TASK_SIZE)
++                      return -EINVAL;
++      } else
++#endif
++
++      if (end > TASK_SIZE)
++              return -EINVAL;
++
+       if (!arch_validate_prot(prot))
+               return -EINVAL;
+@@ -407,6 +534,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+       if (start > vma->vm_start)
+               prev = vma;
++#ifdef CONFIG_PAX_MPROTECT
++      if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
++              current->mm->binfmt->handle_mprotect(vma, calc_vm_prot_bits(prot, 0));
++#endif
++
+       for (nstart = start ; ; ) {
+               unsigned long newflags;
+               int pkey = arch_override_mprotect_pkey(vma, prot, -1);
+@@ -422,6 +554,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+               /* newflags >> 4 shift VM_MAY% in place of VM_% */
+               if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
++                      if (prot & (PROT_WRITE | PROT_EXEC))
++                              gr_log_rwxmprotect(vma);
++
++                      error = -EACCES;
++                      goto out;
++              }
++
++              if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
+                       error = -EACCES;
+                       goto out;
+               }
+@@ -436,6 +576,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+               error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
+               if (error)
+                       goto out;
++
++              track_exec_limit(current->mm, nstart, tmp, newflags);
++
+               nstart = tmp;
+               if (nstart < prev->vm_end)
+diff --git a/mm/mremap.c b/mm/mremap.c
+index da22ad2..f98a3df 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -148,6 +148,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
+                       continue;
+               pte = ptep_get_and_clear(mm, old_addr, old_pte);
+               pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++              if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
++                      pte = pte_exprotect(pte);
++#endif
++
+               pte = move_soft_dirty_pte(pte);
+               set_pte_at(mm, new_addr, new_pte, pte);
+       }
+@@ -357,6 +363,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
+       if (is_vm_hugetlb_page(vma))
+               return ERR_PTR(-EINVAL);
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (pax_find_mirror_vma(vma))
++              return ERR_PTR(-EINVAL);
++#endif
++
+       /* We can't remap across vm area boundaries */
+       if (old_len > vma->vm_end - addr)
+               return ERR_PTR(-EFAULT);
+@@ -404,11 +415,19 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
+       unsigned long ret = -EINVAL;
+       unsigned long charged = 0;
+       unsigned long map_flags;
++      unsigned long pax_task_size = TASK_SIZE;
+       if (offset_in_page(new_addr))
+               goto out;
+-      if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
+               goto out;
+       /* Ensure the old/new locations do not overlap */
+@@ -481,6 +500,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+       unsigned long ret = -EINVAL;
+       unsigned long charged = 0;
+       bool locked = false;
++      unsigned long pax_task_size = TASK_SIZE;
+       if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+               return ret;
+@@ -502,6 +522,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+       if (!new_len)
+               return ret;
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (mm->pax_flags & MF_PAX_SEGMEXEC)
++              pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++      pax_task_size -= PAGE_SIZE;
++
++      if (new_len > pax_task_size || addr > pax_task_size-new_len ||
++          old_len > pax_task_size || addr > pax_task_size-old_len)
++              return ret;
++
+       if (down_write_killable(&current->mm->mmap_sem))
+               return -EINTR;
+@@ -553,6 +584,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+                               new_addr = addr;
+                       }
+                       ret = addr;
++                      track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
+                       goto out;
+               }
+       }
+@@ -576,7 +608,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+                       goto out;
+               }
++              map_flags = vma->vm_flags;
+               ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
++              if (!(ret & ~PAGE_MASK)) {
++                      track_exec_limit(current->mm, addr, addr + old_len, 0UL);
++                      track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
++              }
+       }
+ out:
+       if (offset_in_page(ret)) {
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 95daf81..559c30b 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -48,7 +48,6 @@ unsigned long max_mapnr;
+ EXPORT_SYMBOL(max_mapnr);
+ unsigned long highest_memmap_pfn;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+-int heap_stack_gap = 0;
+ atomic_long_t mmap_pages_allocated;
+@@ -836,15 +835,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ EXPORT_SYMBOL(find_vma);
+ /*
+- * find a VMA
+- * - we don't extend stack VMAs under NOMMU conditions
+- */
+-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+-{
+-      return find_vma(mm, addr);
+-}
+-
+-/*
+  * expand a stack to a given address
+  * - not supported under NOMMU conditions
+  */
+@@ -1509,6 +1499,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+       /* most fields are the same, copy all, and then fixup */
+       *new = *vma;
++      INIT_LIST_HEAD(&new->anon_vma_chain);
+       *region = *vma->vm_region;
+       new->vm_region = region;
+@@ -1816,8 +1807,8 @@ void filemap_map_pages(struct fault_env *fe,
+ }
+ EXPORT_SYMBOL(filemap_map_pages);
+-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+-              unsigned long addr, void *buf, int len, int write)
++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
++              unsigned long addr, void *buf, size_t len, int write)
+ {
+       struct vm_area_struct *vma;
+@@ -1858,8 +1849,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+  *
+  * The caller must hold a reference on @mm.
+  */
+-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+-              void *buf, int len, int write)
++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++              void *buf, size_t len, int write)
+ {
+       return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ }
+@@ -1868,7 +1859,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+  * Access another process' address space.
+  * - source/target buffer must be kernel space
+  */
+-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
+ {
+       struct mm_struct *mm;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index f4cd7d8..982c35d 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -902,7 +902,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
+  *   card's wb_dirty may rush to many times higher than wb_setpoint.
+  * - the wb dirty thresh drops quickly due to change of JBOD workload
+  */
+-static void wb_position_ratio(struct dirty_throttle_control *dtc)
++static void __intentional_overflow(-1) wb_position_ratio(struct dirty_throttle_control *dtc)
+ {
+       struct bdi_writeback *wb = dtc->wb;
+       unsigned long write_bw = wb->avg_write_bandwidth;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a2214c6..72191b7 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -64,6 +64,7 @@
+ #include <linux/page_owner.h>
+ #include <linux/kthread.h>
+ #include <linux/memcontrol.h>
++#include <linux/random.h>
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -676,7 +677,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
+               __mod_zone_freepage_state(zone, (1 << order), migratetype);
+ }
+ #else
+-struct page_ext_operations debug_guardpage_ops = { NULL, };
++struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
+ static inline void set_page_guard(struct zone *zone, struct page *page,
+                               unsigned int order, int migratetype) {}
+ static inline void clear_page_guard(struct zone *zone, struct page *page,
+@@ -979,6 +980,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ {
+       int bad = 0;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      unsigned long index = 1UL << order;
++#endif
++
+       VM_BUG_ON_PAGE(PageTail(page), page);
+       trace_mm_page_free(page, order);
+@@ -1025,6 +1030,12 @@ static __always_inline bool free_pages_prepare(struct page *page,
+               debug_check_no_obj_freed(page_address(page),
+                                          PAGE_SIZE << order);
+       }
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      for (; index; --index)
++              sanitize_highpage(page + index - 1);
++#endif
++
+       arch_free_page(page, order);
+       kernel_poison_pages(page, 1 << order, 0);
+       kernel_map_pages(page, 1 << order, 0);
+@@ -1234,6 +1245,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+       local_irq_restore(flags);
+ }
++bool __meminitdata extra_latent_entropy;
++
++static int __init setup_pax_extra_latent_entropy(char *str)
++{
++      extra_latent_entropy = true;
++      return 0;
++}
++early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
++
++#ifdef LATENT_ENTROPY_PLUGIN
++volatile unsigned long latent_entropy __latent_entropy;
++EXPORT_SYMBOL(latent_entropy);
++#endif
++
+ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+ {
+       unsigned int nr_pages = 1 << order;
+@@ -1249,6 +1274,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+       __ClearPageReserved(p);
+       set_page_count(p, 0);
++      if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
++              unsigned long hash = 0;
++              size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
++              const unsigned long *data = lowmem_page_address(page);
++
++              for (index = 0; index < end; index++)
++                      hash ^= hash + data[index];
++#ifdef LATENT_ENTROPY_PLUGIN
++              latent_entropy ^= hash;
++              add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++#else
++              add_device_randomness((const void *)&hash, sizeof(hash));
++#endif
++      }
++
+       page_zone(page)->managed_pages += nr_pages;
+       set_page_refcounted(page);
+       __free_pages(page, order);
+@@ -1305,7 +1345,6 @@ static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
+ }
+ #endif
+-
+ void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+                                                       unsigned int order)
+ {
+@@ -1678,8 +1717,8 @@ static inline int check_new_page(struct page *page)
+ static inline bool free_pages_prezeroed(bool poisoned)
+ {
+-      return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
+-              page_poisoning_enabled() && poisoned;
++      return IS_ENABLED(CONFIG_PAX_MEMORY_SANITIZE) ||
++              (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled() && poisoned);
+ }
+ #ifdef CONFIG_DEBUG_VM
+@@ -1735,11 +1774,13 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
+       int i;
+       bool poisoned = true;
++#ifndef CONFIG_PAX_MEMORY_SANITIZE
+       for (i = 0; i < (1 << order); i++) {
+               struct page *p = page + i;
+               if (poisoned)
+                       poisoned &= page_is_poisoned(p);
+       }
++#endif
+       post_alloc_hook(page, order, gfp_flags);
+@@ -2278,8 +2319,9 @@ static void drain_pages(unsigned int cpu)
+  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
+  * the single zone's pages.
+  */
+-void drain_local_pages(struct zone *zone)
++void drain_local_pages(void *_zone)
+ {
++      struct zone *zone = _zone;
+       int cpu = smp_processor_id();
+       if (zone)
+@@ -2339,8 +2381,7 @@ void drain_all_pages(struct zone *zone)
+               else
+                       cpumask_clear_cpu(cpu, &cpus_with_pcps);
+       }
+-      on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
+-                                                              zone, 1);
++      on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, zone, 1);
+ }
+ #ifdef CONFIG_HIBERNATION
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 9903830..5176325 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -133,7 +133,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
+ static unsigned int pcpu_high_unit_cpu __read_mostly;
+ /* the address of the first chunk which starts with the kernel static area */
+-void *pcpu_base_addr __read_mostly;
++void *pcpu_base_addr __read_only;
+ EXPORT_SYMBOL_GPL(pcpu_base_addr);
+ static const int *pcpu_unit_map __read_mostly;                /* cpu -> unit */
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index 07514d4..9989090 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -13,6 +13,7 @@
+ #include <linux/uio.h>
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/slab.h>
+ #include <linux/syscalls.h>
+@@ -159,19 +160,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
+       ssize_t iov_len;
+       size_t total_len = iov_iter_count(iter);
++      return -ENOSYS; // PaX: until properly audited
++
+       /*
+        * Work out how many pages of struct pages we're going to need
+        * when eventually calling get_user_pages
+        */
+       for (i = 0; i < riovcnt; i++) {
+               iov_len = rvec[i].iov_len;
+-              if (iov_len > 0) {
+-                      nr_pages_iov = ((unsigned long)rvec[i].iov_base
+-                                      + iov_len)
+-                              / PAGE_SIZE - (unsigned long)rvec[i].iov_base
+-                              / PAGE_SIZE + 1;
+-                      nr_pages = max(nr_pages, nr_pages_iov);
+-              }
++              if (iov_len <= 0)
++                      continue;
++              nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
++                              (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
++              nr_pages = max(nr_pages, nr_pages_iov);
+       }
+       if (nr_pages == 0)
+@@ -199,6 +200,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
+               goto free_proc_pages;
+       }
++      if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
++              rc = -EPERM;
++              goto put_task_struct;
++      }
++
+       mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
+       if (!mm || IS_ERR(mm)) {
+               rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
+diff --git a/mm/readahead.c b/mm/readahead.c
+index c8a955b..fad2128 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -81,7 +81,7 @@ static void read_cache_pages_invalidate_pages(struct address_space *mapping,
+  * Hides the details of the LRU cache etc from the filesystems.
+  */
+ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
+-                      int (*filler)(void *, struct page *), void *data)
++                      filler_t *filler, void *data)
+ {
+       struct page *page;
+       int ret = 0;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 1ef3640..88c345d 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -172,6 +172,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+       struct anon_vma *anon_vma = vma->anon_vma;
+       struct anon_vma_chain *avc;
++#ifdef CONFIG_PAX_SEGMEXEC
++      struct anon_vma_chain *avc_m = NULL;
++#endif
++
+       might_sleep();
+       if (unlikely(!anon_vma)) {
+               struct mm_struct *mm = vma->vm_mm;
+@@ -181,6 +185,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+               if (!avc)
+                       goto out_enomem;
++#ifdef CONFIG_PAX_SEGMEXEC
++              avc_m = anon_vma_chain_alloc(GFP_KERNEL);
++              if (!avc_m)
++                      goto out_enomem_free_avc;
++#endif
++
+               anon_vma = find_mergeable_anon_vma(vma);
+               allocated = NULL;
+               if (!anon_vma) {
+@@ -194,6 +204,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+               /* page_table_lock to protect against threads */
+               spin_lock(&mm->page_table_lock);
+               if (likely(!vma->anon_vma)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++                      struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
++
++                      if (vma_m) {
++                              BUG_ON(vma_m->anon_vma);
++                              vma_m->anon_vma = anon_vma;
++                              anon_vma_chain_link(vma_m, avc_m, anon_vma);
++                              anon_vma->degree++;
++                              avc_m = NULL;
++                      }
++#endif
++
+                       vma->anon_vma = anon_vma;
+                       anon_vma_chain_link(vma, avc, anon_vma);
+                       /* vma reference or self-parent link for new root */
+@@ -206,12 +229,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+               if (unlikely(allocated))
+                       put_anon_vma(allocated);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++              if (unlikely(avc_m))
++                      anon_vma_chain_free(avc_m);
++#endif
++
+               if (unlikely(avc))
+                       anon_vma_chain_free(avc);
+       }
+       return 0;
+  out_enomem_free_avc:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++      if (avc_m)
++              anon_vma_chain_free(avc_m);
++#endif
++
+       anon_vma_chain_free(avc);
+  out_enomem:
+       return -ENOMEM;
+@@ -255,7 +290,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
+  * good chance of avoiding scanning the whole hierarchy when it searches where
+  * page is mapped.
+  */
+-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
+ {
+       struct anon_vma_chain *avc, *pavc;
+       struct anon_vma *root = NULL;
+@@ -309,7 +344,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+  * the corresponding VMA in the parent process is attached to.
+  * Returns 0 on success, non-zero on failure.
+  */
+-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
+ {
+       struct anon_vma_chain *avc;
+       struct anon_vma *anon_vma;
+@@ -429,10 +464,10 @@ static void anon_vma_ctor(void *data)
+ void __init anon_vma_init(void)
+ {
+       anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
+-                      0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
++                      0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE,
+                       anon_vma_ctor);
+       anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
+-                      SLAB_PANIC|SLAB_ACCOUNT);
++                      SLAB_PANIC|SLAB_ACCOUNT|SLAB_NO_SANITIZE);
+ }
+ /*
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 971fc83..6afaf44 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -34,7 +34,7 @@
+ #include <linux/uio.h>
+ #include <linux/khugepaged.h>
+-static struct vfsmount *shm_mnt;
++struct vfsmount *shm_mnt;
+ #ifdef CONFIG_SHMEM
+ /*
+@@ -83,7 +83,7 @@ static struct vfsmount *shm_mnt;
+ #define BOGO_DIRENT_SIZE 20
+ /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
+-#define SHORT_SYMLINK_LEN 128
++#define SHORT_SYMLINK_LEN 64
+ /*
+  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+@@ -3255,6 +3255,24 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
+       return simple_xattr_set(&info->xattrs, name, value, size, flags);
+ }
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++static int shmem_user_xattr_handler_set(const struct xattr_handler *handler,
++                                      struct dentry *dentry, struct inode *inode,
++                                      const char *name, const void *value,
++                                      size_t size, int flags)
++{
++      struct shmem_inode_info *info = SHMEM_I(inode);
++
++      if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++              return -EOPNOTSUPP;
++      if (size > 8)
++              return -EINVAL;
++
++      name = xattr_full_name(handler, name);
++      return simple_xattr_set(&info->xattrs, name, value, size, flags);
++}
++#endif
++
+ static const struct xattr_handler shmem_security_xattr_handler = {
+       .prefix = XATTR_SECURITY_PREFIX,
+       .get = shmem_xattr_handler_get,
+@@ -3267,6 +3285,14 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
+       .set = shmem_xattr_handler_set,
+ };
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++static const struct xattr_handler shmem_user_xattr_handler = {
++      .prefix = XATTR_USER_PREFIX,
++      .get = shmem_xattr_handler_get,
++      .set = shmem_user_xattr_handler_set,
++};
++#endif
++
+ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #ifdef CONFIG_TMPFS_POSIX_ACL
+       &posix_acl_access_xattr_handler,
+@@ -3274,6 +3300,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #endif
+       &shmem_security_xattr_handler,
+       &shmem_trusted_xattr_handler,
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++      &shmem_user_xattr_handler,
++#endif
++
+       NULL
+ };
+@@ -3653,8 +3684,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+       int err = -ENOMEM;
+       /* Round up to L1_CACHE_BYTES to resist false sharing */
+-      sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
+-                              L1_CACHE_BYTES), GFP_KERNEL);
++      sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
+       if (!sbinfo)
+               return -ENOMEM;
+diff --git a/mm/slab.c b/mm/slab.c
+index b672710..9ebcec1 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -116,6 +116,7 @@
+ #include      <linux/kmemcheck.h>
+ #include      <linux/memory.h>
+ #include      <linux/prefetch.h>
++#include      <linux/vmalloc.h>
+ #include      <net/sock.h>
+@@ -284,10 +285,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
+               if ((x)->max_freeable < i)                              \
+                       (x)->max_freeable = i;                          \
+       } while (0)
+-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
+-#define STATS_INC_ALLOCMISS(x)        atomic_inc(&(x)->allocmiss)
+-#define STATS_INC_FREEHIT(x)  atomic_inc(&(x)->freehit)
+-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
++#define STATS_INC_ALLOCMISS(x)        atomic_inc_unchecked(&(x)->allocmiss)
++#define STATS_INC_FREEHIT(x)  atomic_inc_unchecked(&(x)->freehit)
++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
++#define STATS_INC_SANITIZED(x)        atomic_inc_unchecked(&(x)->sanitized)
++#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
+ #else
+ #define       STATS_INC_ACTIVE(x)     do { } while (0)
+ #define       STATS_DEC_ACTIVE(x)     do { } while (0)
+@@ -304,6 +307,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
+ #define STATS_INC_ALLOCMISS(x)        do { } while (0)
+ #define STATS_INC_FREEHIT(x)  do { } while (0)
+ #define STATS_INC_FREEMISS(x) do { } while (0)
++#define STATS_INC_SANITIZED(x)        do { } while (0)
++#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
+ #endif
+ #if DEBUG
+@@ -410,7 +415,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
+  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
+  */
+ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+-                                      const struct page *page, void *obj)
++                                      const struct page *page, const void *obj)
+ {
+       u32 offset = (obj - page->s_mem);
+       return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+@@ -1290,7 +1295,7 @@ void __init kmem_cache_init(void)
+       create_boot_cache(kmem_cache, "kmem_cache",
+               offsetof(struct kmem_cache, node) +
+                                 nr_node_ids * sizeof(struct kmem_cache_node *),
+-                                SLAB_HWCACHE_ALIGN);
++                                SLAB_HWCACHE_ALIGN, 0, 0);
+       list_add(&kmem_cache->list, &slab_caches);
+       slab_state = PARTIAL;
+@@ -1298,8 +1303,8 @@ void __init kmem_cache_init(void)
+        * Initialize the caches that provide memory for the  kmem_cache_node
+        * structures first.  Without this, further allocations will bug.
+        */
+-      kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
+-                              kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
++      kmalloc_caches[INDEX_NODE] = create_kmalloc_cache_usercopy("kmalloc-node",
++                              kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS, 0, kmalloc_size(INDEX_NODE));
+       slab_state = PARTIAL_NODE;
+       setup_kmalloc_cache_index_table();
+@@ -1544,7 +1549,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
+               while (!kstack_end(sptr)) {
+                       svalue = *sptr++;
+-                      if (kernel_text_address(svalue)) {
++                      if (kernel_text_address(ktva_ktla(svalue))) {
+                               *addr++ = svalue;
+                               size -= sizeof(unsigned long);
+                               if (size <= sizeof(unsigned long))
+@@ -1931,7 +1936,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+       cachep = find_mergeable(size, align, flags, name, ctor);
+       if (cachep) {
+-              cachep->refcount++;
++              atomic_inc(&cachep->refcount);
+               /*
+                * Adjust the object sizes so that we clear
+@@ -2060,6 +2065,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ #endif
+ #endif
++      flags = pax_sanitize_slab_flags(flags);
++
+       /*
+        * Check that size is in terms of words.  This is needed to avoid
+        * unaligned accesses for some archs when redzoning is used, and makes
+@@ -3524,6 +3531,20 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
+       struct array_cache *ac = cpu_cache_get(cachep);
+       check_irq_off();
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
++              STATS_INC_NOT_SANITIZED(cachep);
++      else {
++              memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
++
++              if (cachep->ctor)
++                      cachep->ctor(objp);
++
++              STATS_INC_SANITIZED(cachep);
++      }
++#endif
++
+       kmemleak_free_recursive(objp, cachep->flags);
+       objp = cache_free_debugcheck(cachep, objp, caller);
+@@ -3703,7 +3724,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
+       return ret;
+ }
+-void *__kmalloc_node(size_t size, gfp_t flags, int node)
++void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+       return __do_kmalloc_node(size, flags, node, _RET_IP_);
+ }
+@@ -3723,7 +3744,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
+  * @flags: the type of memory to allocate (see kmalloc).
+  * @caller: function caller for debug tracking of the caller
+  */
+-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
++static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
+                                         unsigned long caller)
+ {
+       struct kmem_cache *cachep;
+@@ -3823,6 +3844,7 @@ void kfree(const void *objp)
+       if (unlikely(ZERO_OR_NULL_PTR(objp)))
+               return;
++      VM_BUG_ON(!virt_addr_valid(objp));
+       local_irq_save(flags);
+       kfree_debugcheck(objp);
+       c = virt_to_cache(objp);
+@@ -4190,14 +4212,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+       }
+       /* cpu stats */
+       {
+-              unsigned long allochit = atomic_read(&cachep->allochit);
+-              unsigned long allocmiss = atomic_read(&cachep->allocmiss);
+-              unsigned long freehit = atomic_read(&cachep->freehit);
+-              unsigned long freemiss = atomic_read(&cachep->freemiss);
++              unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
++              unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
++              unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
++              unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
+               seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
+                          allochit, allocmiss, freehit, freemiss);
+       }
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      {
++              unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
++              unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
++
++              seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
++      }
++#endif
+ #endif
+ }
+@@ -4434,16 +4464,48 @@ static const struct file_operations proc_slabstats_operations = {
+ static int __init slab_proc_init(void)
+ {
+ #ifdef CONFIG_DEBUG_SLAB_LEAK
+-      proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
++      proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
+ #endif
+       return 0;
+ }
+ module_init(slab_proc_init);
+ #endif
++bool is_usercopy_object(const void *ptr)
++{
++      struct page *page;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return false;
++
++      if (!slab_is_available())
++              return false;
++
++      if (is_vmalloc_addr(ptr)
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++          && !object_starts_on_stack(ptr)
++#endif
++      ) {
++              struct vm_struct *vm = find_vm_area(ptr);
++              if (vm && (vm->flags & VM_USERCOPY))
++                      return true;
++              return false;
++      }
++
++      if (!virt_addr_valid(ptr))
++              return false;
++
++      page = virt_to_head_page(ptr);
++
++      if (!PageSlab(page))
++              return false;
++
++      return !!page->slab_cache->usersize;
++}
++
+ #ifdef CONFIG_HARDENED_USERCOPY
+ /*
+- * Rejects objects that are incorrectly sized.
++ * Detect unwanted object access
+  *
+  * Returns NULL if check passes, otherwise const char * to name of cache
+  * to indicate an error.
+@@ -4457,17 +4519,23 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
+       /* Find and validate object. */
+       cachep = page->slab_cache;
+-      objnr = obj_to_index(cachep, page, (void *)ptr);
++
++      objnr = obj_to_index(cachep, page, ptr);
+       BUG_ON(objnr >= cachep->num);
+       /* Find offset within object. */
+       offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+-      /* Allow address range falling entirely within object size. */
+-      if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+-              return NULL;
++      if (offset < cachep->useroffset)
++              return cachep->name;
+-      return cachep->name;
++      if (offset - cachep->useroffset >= cachep->usersize)
++              return cachep->name;
++
++      if (n > cachep->useroffset - offset + cachep->usersize)
++              return cachep->name;
++
++      return NULL;
+ }
+ #endif /* CONFIG_HARDENED_USERCOPY */
+diff --git a/mm/slab.h b/mm/slab.h
+index 9653f2e..9b9e8cd 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -21,8 +21,10 @@ struct kmem_cache {
+       unsigned int size;      /* The aligned/padded/added on size  */
+       unsigned int align;     /* Alignment as calculated */
+       unsigned long flags;    /* Active flags on the slab */
++      size_t useroffset;      /* USERCOPY region offset */
++      size_t usersize;        /* USERCOPY region size */
+       const char *name;       /* Slab name for sysfs */
+-      int refcount;           /* Use counter */
++      atomic_t refcount;      /* Use counter */
+       void (*ctor)(void *);   /* Called on object slot creation */
+       struct list_head list;  /* List of all slab caches on the system */
+ };
+@@ -71,6 +73,35 @@ extern struct list_head slab_caches;
+ /* The slab cache that manages slab cache information */
+ extern struct kmem_cache *kmem_cache;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++#ifdef CONFIG_X86_64
++#define PAX_MEMORY_SANITIZE_VALUE     '\xfe'
++#else
++#define PAX_MEMORY_SANITIZE_VALUE     '\xff'
++#endif
++enum pax_sanitize_mode {
++      PAX_SANITIZE_SLAB_OFF = 0,
++      PAX_SANITIZE_SLAB_FAST,
++      PAX_SANITIZE_SLAB_FULL,
++};
++
++extern enum pax_sanitize_mode pax_sanitize_slab;
++
++static inline unsigned long pax_sanitize_slab_flags(unsigned long flags)
++{
++      if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
++              flags |= SLAB_NO_SANITIZE;
++      else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
++              flags &= ~SLAB_NO_SANITIZE;
++      return flags;
++}
++#else
++static inline unsigned long pax_sanitize_slab_flags(unsigned long flags)
++{
++      return flags;
++}
++#endif
++
+ unsigned long calculate_alignment(unsigned long flags,
+               unsigned long align, unsigned long size);
+@@ -89,8 +120,11 @@ extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
+ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
+                       unsigned long flags);
++extern struct kmem_cache *create_kmalloc_cache_usercopy(const char *name, size_t size,
++                      unsigned long flags, size_t useroffset, size_t usersize);
+ extern void create_boot_cache(struct kmem_cache *, const char *name,
+-                      size_t size, unsigned long flags);
++                      size_t size, unsigned long flags,
++                      size_t useroffset, size_t usersize);
+ int slab_unmergeable(struct kmem_cache *s);
+ struct kmem_cache *find_mergeable(size_t size, size_t align,
+@@ -120,7 +154,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
+ /* Legal flag mask for kmem_cache_create(), for various configurations */
+ #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+-                       SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
++                       SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_NO_SANITIZE)
+ #if defined(CONFIG_DEBUG_SLAB)
+ #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
+@@ -345,6 +379,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+               return s;
+       page = virt_to_head_page(x);
++
++      BUG_ON(!PageSlab(page));
++
+       cachep = page->slab_cache;
+       if (slab_equal_or_root(cachep, s))
+               return cachep;
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 71f0b28..83ad94c 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -25,11 +25,35 @@
+ #include "slab.h"
+-enum slab_state slab_state;
++enum slab_state slab_state __read_only;
+ LIST_HEAD(slab_caches);
+ DEFINE_MUTEX(slab_mutex);
+ struct kmem_cache *kmem_cache;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
++static int __init pax_sanitize_slab_setup(char *str)
++{
++      if (!str)
++              return 0;
++
++      if (!strcmp(str, "0") || !strcmp(str, "off")) {
++              pr_info("PaX slab sanitization: %s\n", "disabled");
++              pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
++      } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
++              pr_info("PaX slab sanitization: %s\n", "fast");
++              pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
++      } else if (!strcmp(str, "full")) {
++              pr_info("PaX slab sanitization: %s\n", "full");
++              pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
++      } else
++              pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
++
++      return 0;
++}
++early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
++#endif
++
+ /*
+  * Set of flags that will prevent slab merging
+  */
+@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
+  * Merge control. If this is set then no merging of slab caches will occur.
+  * (Could be removed. This was introduced to pacify the merge skeptics.)
+  */
+-static int slab_nomerge;
++static int slab_nomerge __read_only = 1;
+ static int __init setup_slab_nomerge(char *str)
+ {
+@@ -244,7 +268,7 @@ int slab_unmergeable(struct kmem_cache *s)
+       /*
+        * We may have set a slab to be unmergeable during bootstrap.
+        */
+-      if (s->refcount < 0)
++      if (atomic_read(&s->refcount) < 0)
+               return 1;
+       return 0;
+@@ -323,12 +347,15 @@ unsigned long calculate_alignment(unsigned long flags,
+ static struct kmem_cache *create_cache(const char *name,
+               size_t object_size, size_t size, size_t align,
+-              unsigned long flags, void (*ctor)(void *),
++              unsigned long flags, size_t useroffset,
++              size_t usersize, void (*ctor)(void *),
+               struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+ {
+       struct kmem_cache *s;
+       int err;
++      BUG_ON(useroffset + usersize > object_size);
++
+       err = -ENOMEM;
+       s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
+       if (!s)
+@@ -339,6 +366,8 @@ static struct kmem_cache *create_cache(const char *name,
+       s->size = size;
+       s->align = align;
+       s->ctor = ctor;
++      s->useroffset = useroffset;
++      s->usersize = usersize;
+       err = init_memcg_params(s, memcg, root_cache);
+       if (err)
+@@ -348,7 +377,7 @@ static struct kmem_cache *create_cache(const char *name,
+       if (err)
+               goto out_free_cache;
+-      s->refcount = 1;
++      atomic_set(&s->refcount, 1);
+       list_add(&s->list, &slab_caches);
+ out:
+       if (err)
+@@ -362,11 +391,13 @@ out_free_cache:
+ }
+ /*
+- * kmem_cache_create - Create a cache.
++ * __kmem_cache_create_usercopy - Create a cache.
+  * @name: A string which is used in /proc/slabinfo to identify this cache.
+  * @size: The size of objects to be created in this cache.
+  * @align: The required alignment for the objects.
+  * @flags: SLAB flags
++ * @useroffset: USERCOPY region offset
++ * @usersize: USERCOPY region size
+  * @ctor: A constructor for the objects.
+  *
+  * Returns a ptr to the cache on success, NULL on failure.
+@@ -385,9 +416,10 @@ out_free_cache:
+  * cacheline.  This can be beneficial if you're counting cycles as closely
+  * as davem.
+  */
+-struct kmem_cache *
+-kmem_cache_create(const char *name, size_t size, size_t align,
+-                unsigned long flags, void (*ctor)(void *))
++static struct kmem_cache *
++__kmem_cache_create_usercopy(const char *name, size_t size, size_t align,
++                unsigned long flags, size_t useroffset, size_t usersize,
++                void (*ctor)(void *))
+ {
+       struct kmem_cache *s = NULL;
+       const char *cache_name;
+@@ -412,7 +444,10 @@ kmem_cache_create(const char *name, size_t size, size_t align,
+        */
+       flags &= CACHE_CREATE_MASK;
+-      s = __kmem_cache_alias(name, size, align, flags, ctor);
++      BUG_ON(!usersize && useroffset);
++      BUG_ON(size < usersize || size - usersize < useroffset);
++      if (!usersize)
++              s = __kmem_cache_alias(name, size, align, flags, ctor);
+       if (s)
+               goto out_unlock;
+@@ -424,7 +459,7 @@ kmem_cache_create(const char *name, size_t size, size_t align,
+       s = create_cache(cache_name, size, size,
+                        calculate_alignment(flags, align, size),
+-                       flags, ctor, NULL, NULL);
++                       flags, useroffset, usersize, ctor, NULL, NULL);
+       if (IS_ERR(s)) {
+               err = PTR_ERR(s);
+               kfree_const(cache_name);
+@@ -450,8 +485,25 @@ out_unlock:
+       }
+       return s;
+ }
++
++struct kmem_cache *
++kmem_cache_create(const char *name, size_t size, size_t align,
++                unsigned long flags, void (*ctor)(void *))
++{
++      return __kmem_cache_create_usercopy(name, size, align, flags, 0,
++              (flags & SLAB_USERCOPY) ? size : 0, ctor);
++}
+ EXPORT_SYMBOL(kmem_cache_create);
++struct kmem_cache *
++kmem_cache_create_usercopy(const char *name, size_t size, size_t align,
++                unsigned long flags, size_t useroffset, size_t usersize,
++                void (*ctor)(void *))
++{
++      return __kmem_cache_create_usercopy(name, size, align, flags, useroffset, usersize, ctor);
++}
++EXPORT_SYMBOL(kmem_cache_create_usercopy);
++
+ static int shutdown_cache(struct kmem_cache *s,
+               struct list_head *release, bool *need_rcu_barrier)
+ {
+@@ -473,7 +525,7 @@ static void release_caches(struct list_head *release, bool need_rcu_barrier)
+               rcu_barrier();
+       list_for_each_entry_safe(s, s2, release, list) {
+-#ifdef SLAB_SUPPORTS_SYSFS
++#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+               sysfs_slab_remove(s);
+ #else
+               slab_kmem_cache_release(s);
+@@ -533,7 +585,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
+       s = create_cache(cache_name, root_cache->object_size,
+                        root_cache->size, root_cache->align,
+-                       root_cache->flags, root_cache->ctor,
++                       root_cache->flags, root_cache->useroffset,
++                       root_cache->usersize, root_cache->ctor,
+                        memcg, root_cache);
+       /*
+        * If we could not create a memcg cache, do not complain, because
+@@ -718,8 +771,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+       kasan_cache_destroy(s);
+       mutex_lock(&slab_mutex);
+-      s->refcount--;
+-      if (s->refcount)
++      if (!atomic_dec_and_test(&s->refcount))
+               goto out_unlock;
+       err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
+@@ -770,13 +822,15 @@ bool slab_is_available(void)
+ #ifndef CONFIG_SLOB
+ /* Create a cache during boot when no slab services are available yet */
+ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
+-              unsigned long flags)
++              unsigned long flags, size_t useroffset, size_t usersize)
+ {
+       int err;
+       s->name = name;
+       s->size = s->object_size = size;
+       s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
++      s->useroffset = useroffset;
++      s->usersize = usersize;
+       slab_init_memcg_params(s);
+@@ -786,23 +840,29 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
+               panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
+                                       name, size, err);
+-      s->refcount = -1;       /* Exempt from merging for now */
++      atomic_set(&s->refcount, -1);   /* Exempt from merging for now */
+ }
+-struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
+-                              unsigned long flags)
++struct kmem_cache *__init create_kmalloc_cache_usercopy(const char *name, size_t size,
++                              unsigned long flags, size_t useroffset, size_t usersize)
+ {
+       struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
+       if (!s)
+               panic("Out of memory when creating slab %s\n", name);
+-      create_boot_cache(s, name, size, flags);
++      create_boot_cache(s, name, size, flags, useroffset, usersize);
+       list_add(&s->list, &slab_caches);
+-      s->refcount = 1;
++      atomic_set(&s->refcount, 1);
+       return s;
+ }
++struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
++                              unsigned long flags)
++{
++      return create_kmalloc_cache_usercopy(name, size, flags, 0, 0);
++}
++
+ struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+ EXPORT_SYMBOL(kmalloc_caches);
+@@ -811,6 +871,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+ EXPORT_SYMBOL(kmalloc_dma_caches);
+ #endif
++#ifdef CONFIG_PAX_USERCOPY
++struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
++EXPORT_SYMBOL(kmalloc_usercopy_caches);
++#endif
++
+ /*
+  * Conversion table for small slabs sizes / 8 to the index in the
+  * kmalloc array. This is necessary for slabs < 192 since we have non power
+@@ -875,6 +940,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+               return kmalloc_dma_caches[index];
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY
++      if (unlikely((flags & GFP_USERCOPY)))
++              return kmalloc_usercopy_caches[index];
++
++#endif
++
+       return kmalloc_caches[index];
+ }
+@@ -952,8 +1024,8 @@ void __init setup_kmalloc_cache_index_table(void)
+ static void __init new_kmalloc_cache(int idx, unsigned long flags)
+ {
+-      kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
+-                                      kmalloc_info[idx].size, flags);
++      kmalloc_caches[idx] = create_kmalloc_cache_usercopy(kmalloc_info[idx].name,
++                                      kmalloc_info[idx].size, flags, 0, kmalloc_info[idx].size);
+ }
+ /*
+@@ -998,6 +1070,23 @@ void __init create_kmalloc_caches(unsigned long flags)
+               }
+       }
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY
++      for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
++              struct kmem_cache *s = kmalloc_caches[i];
++
++              if (s) {
++                      int size = kmalloc_size(i);
++                      char *n = kasprintf(GFP_NOWAIT,
++                               "usercopy-kmalloc-%d", size);
++
++                      BUG_ON(!n);
++                      kmalloc_usercopy_caches[i] = create_kmalloc_cache_usercopy(n,
++                              size, flags, 0, size);
++              }
++      }
++#endif
++
+ }
+ #endif /* !CONFIG_SLOB */
+@@ -1013,6 +1102,12 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+       flags |= __GFP_COMP;
+       page = alloc_pages(flags, order);
++#ifdef CONFIG_SLOB
++      if (page) {
++              page->private = 1UL << order;
++              __SetPageSlab(page);
++      }
++#endif
+       ret = page ? page_address(page) : NULL;
+       kmemleak_alloc(ret, size, 1, flags);
+       kasan_kmalloc_large(ret, size, flags);
+@@ -1102,6 +1197,9 @@ static void print_slabinfo_header(struct seq_file *m)
+ #ifdef CONFIG_DEBUG_SLAB
+       seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
+       seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      seq_puts(m, " : pax <sanitized> <not_sanitized>");
++#endif
+ #endif
+       seq_putc(m, '\n');
+ }
+@@ -1231,7 +1329,7 @@ static int __init slab_proc_init(void)
+ module_init(slab_proc_init);
+ #endif /* CONFIG_SLABINFO */
+-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
++static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
+                                          gfp_t flags)
+ {
+       void *ret;
+diff --git a/mm/slob.c b/mm/slob.c
+index 5ec1580..eea07f2 100644
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -67,6 +67,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/list.h>
+ #include <linux/kmemleak.h>
++#include <linux/vmalloc.h>
+ #include <trace/events/kmem.h>
+@@ -157,7 +158,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
+ /*
+  * Return the size of a slob block.
+  */
+-static slobidx_t slob_units(slob_t *s)
++static slobidx_t slob_units(const slob_t *s)
+ {
+       if (s->units > 0)
+               return s->units;
+@@ -167,7 +168,7 @@ static slobidx_t slob_units(slob_t *s)
+ /*
+  * Return the next free slob block pointer after this one.
+  */
+-static slob_t *slob_next(slob_t *s)
++static slob_t *slob_next(const slob_t *s)
+ {
+       slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
+       slobidx_t next;
+@@ -182,14 +183,14 @@ static slob_t *slob_next(slob_t *s)
+ /*
+  * Returns true if s is the last free block in its page.
+  */
+-static int slob_last(slob_t *s)
++static int slob_last(const slob_t *s)
+ {
+       return !((unsigned long)slob_next(s) & ~PAGE_MASK);
+ }
+-static void *slob_new_pages(gfp_t gfp, int order, int node)
++static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
+ {
+-      void *page;
++      struct page *page;
+ #ifdef CONFIG_NUMA
+       if (node != NUMA_NO_NODE)
+@@ -201,14 +202,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
+       if (!page)
+               return NULL;
+-      return page_address(page);
++      __SetPageSlab(page);
++      return page;
+ }
+-static void slob_free_pages(void *b, int order)
++static void slob_free_pages(struct page *sp, int order)
+ {
+       if (current->reclaim_state)
+               current->reclaim_state->reclaimed_slab += 1 << order;
+-      free_pages((unsigned long)b, order);
++      __ClearPageSlab(sp);
++      page_mapcount_reset(sp);
++      sp->private = 0;
++      __free_pages(sp, order);
+ }
+ /*
+@@ -253,6 +258,7 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align)
+                       }
+                       sp->units -= units;
++                      BUG_ON(sp->units < 0);
+                       if (!sp->units)
+                               clear_slob_page_free(sp);
+                       return cur;
+@@ -313,15 +319,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+       /* Not enough space: must allocate a new page */
+       if (!b) {
+-              b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
+-              if (!b)
++              sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
++              if (!sp)
+                       return NULL;
+-              sp = virt_to_page(b);
+-              __SetPageSlab(sp);
++              b = page_address(sp);
+               spin_lock_irqsave(&slob_lock, flags);
+               sp->units = SLOB_UNITS(PAGE_SIZE);
+               sp->freelist = b;
++              sp->private = 0;
+               INIT_LIST_HEAD(&sp->lru);
+               set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
+               set_slob_page_free(sp, slob_list);
+@@ -337,7 +343,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+ /*
+  * slob_free: entry point into the slob allocator.
+  */
+-static void slob_free(void *block, int size)
++static void slob_free(struct kmem_cache *c, void *block, int size)
+ {
+       struct page *sp;
+       slob_t *prev, *next, *b = (slob_t *)block;
+@@ -349,7 +355,8 @@ static void slob_free(void *block, int size)
+               return;
+       BUG_ON(!size);
+-      sp = virt_to_page(block);
++      sp = virt_to_head_page(block);
++      BUG_ON(virt_to_page(block) != sp);
+       units = SLOB_UNITS(size);
+       spin_lock_irqsave(&slob_lock, flags);
+@@ -359,12 +366,15 @@ static void slob_free(void *block, int size)
+               if (slob_page_free(sp))
+                       clear_slob_page_free(sp);
+               spin_unlock_irqrestore(&slob_lock, flags);
+-              __ClearPageSlab(sp);
+-              page_mapcount_reset(sp);
+-              slob_free_pages(b, 0);
++              slob_free_pages(sp, 0);
+               return;
+       }
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
++              memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
++#endif
++
+       if (!slob_page_free(sp)) {
+               /* This slob page is about to become partially free. Easy! */
+               sp->units = units;
+@@ -424,11 +434,10 @@ out:
+  */
+ static __always_inline void *
+-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
++__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
+ {
+-      unsigned int *m;
+-      int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+-      void *ret;
++      slob_t *m;
++      void *ret = NULL;
+       gfp &= gfp_allowed_mask;
+@@ -442,27 +451,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
+               if (!m)
+                       return NULL;
+-              *m = size;
++              BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
++              BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
++              m[0].units = size;
++              m[1].units = align;
+               ret = (void *)m + align;
+               trace_kmalloc_node(caller, ret,
+                                  size, size + align, gfp, node);
+       } else {
+               unsigned int order = get_order(size);
++              struct page *page;
+               if (likely(order))
+                       gfp |= __GFP_COMP;
+-              ret = slob_new_pages(gfp, order, node);
++              page = slob_new_pages(gfp, order, node);
++              if (page) {
++                      ret = page_address(page);
++                      page->private = size;
++              }
+               trace_kmalloc_node(caller, ret,
+                                  size, PAGE_SIZE << order, gfp, node);
+       }
+-      kmemleak_alloc(ret, size, 1, gfp);
+       return ret;
+ }
+-void *__kmalloc(size_t size, gfp_t gfp)
++static __always_inline void *
++__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
++{
++      int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++      void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
++
++      if (!ZERO_OR_NULL_PTR(ret))
++              kmemleak_alloc(ret, size, 1, gfp);
++      return ret;
++}
++
++void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
+ {
+       return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
+ }
+@@ -491,39 +518,140 @@ void kfree(const void *block)
+               return;
+       kmemleak_free(block);
+-      sp = virt_to_page(block);
+-      if (PageSlab(sp)) {
++      VM_BUG_ON(!virt_addr_valid(block));
++      sp = virt_to_head_page(block);
++      BUG_ON(virt_to_page(block) != sp);
++      VM_BUG_ON(!PageSlab(sp));
++      if (!sp->private) {
+               int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+-              unsigned int *m = (unsigned int *)(block - align);
+-              slob_free(m, *m + align);
+-      } else
++              slob_t *m = (slob_t *)(block - align);
++
++              BUG_ON(sp->units < 0);
++              slob_free(NULL, m, m[0].units + align);
++      } else {
++              __ClearPageSlab(sp);
++              page_mapcount_reset(sp);
++              sp->private = 0;
+               __free_pages(sp, compound_order(sp));
++      }
+ }
+ EXPORT_SYMBOL(kfree);
++bool is_usercopy_object(const void *ptr)
++{
++      struct page *page;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return false;
++
++      if (!slab_is_available())
++              return false;
++
++      if (is_vmalloc_addr(ptr)
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++          && !object_starts_on_stack(ptr)
++#endif
++      ) {
++              struct vm_struct *vm = find_vm_area(ptr);
++              if (vm && (vm->flags & VM_USERCOPY))
++                      return true;
++              return false;
++      }
++
++      if (!virt_addr_valid(ptr))
++              return false;
++
++      page = virt_to_head_page(ptr);
++      BUG_ON(virt_to_page(ptr) != page);
++
++      if (!PageSlab(page))
++              return false;
++
++      // PAX: TODO check SLAB_USERCOPY
++
++      return false;
++}
++
++#ifdef CONFIG_HARDENED_USERCOPY
++const char *__check_heap_object(const void *ptr, unsigned long n,
++                              struct page *page)
++{
++      const slob_t *free;
++      const void *base;
++      unsigned long flags;
++
++      BUG_ON(virt_to_page(ptr) != page);
++
++      if (page->private) {
++              base = page_address(page);
++              if (base <= ptr && n <= page->private - (ptr - base))
++                      return NULL;
++              return "<slob 1>";
++      }
++
++      /* some tricky double walking to find the chunk */
++      spin_lock_irqsave(&slob_lock, flags);
++      base = (const void *)((unsigned long)ptr & PAGE_MASK);
++      free = page->freelist;
++
++      while (!slob_last(free) && (const void *)free <= ptr) {
++              base = free + slob_units(free);
++              free = slob_next(free);
++      }
++
++      while (base < (const void *)free) {
++              slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
++              int size = SLOB_UNIT * SLOB_UNITS(m + align);
++              int offset;
++
++              if (ptr < base + align)
++                      break;
++
++              offset = ptr - base - align;
++              if (offset >= m) {
++                      base += size;
++                      continue;
++              }
++
++              if (n > m - offset)
++                      break;
++
++              spin_unlock_irqrestore(&slob_lock, flags);
++              return NULL;
++      }
++
++      spin_unlock_irqrestore(&slob_lock, flags);
++      return "<slob 2>";
++}
++#endif
++
+ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
+ size_t ksize(const void *block)
+ {
+       struct page *sp;
+       int align;
+-      unsigned int *m;
++      slob_t *m;
+       BUG_ON(!block);
+       if (unlikely(block == ZERO_SIZE_PTR))
+               return 0;
+-      sp = virt_to_page(block);
+-      if (unlikely(!PageSlab(sp)))
+-              return PAGE_SIZE << compound_order(sp);
++      sp = virt_to_head_page(block);
++      BUG_ON(virt_to_page(block) != sp);
++      VM_BUG_ON(!PageSlab(sp));
++      if (sp->private)
++              return sp->private;
+       align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+-      m = (unsigned int *)(block - align);
+-      return SLOB_UNITS(*m) * SLOB_UNIT;
++      m = (slob_t *)(block - align);
++      return SLOB_UNITS(m[0].units) * SLOB_UNIT;
+ }
+ EXPORT_SYMBOL(ksize);
+ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+ {
++      flags = pax_sanitize_slab_flags(flags);
++
+       if (flags & SLAB_DESTROY_BY_RCU) {
+               /* leave room for rcu footer at the end of object */
+               c->size += sizeof(struct slob_rcu);
+@@ -534,23 +662,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+ {
+-      void *b;
++      void *b = NULL;
+       flags &= gfp_allowed_mask;
+       lockdep_trace_alloc(flags);
++#ifdef CONFIG_PAX_USERCOPY
++      b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
++#else
+       if (c->size < PAGE_SIZE) {
+               b = slob_alloc(c->size, flags, c->align, node);
+               trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+                                           SLOB_UNITS(c->size) * SLOB_UNIT,
+                                           flags, node);
+       } else {
+-              b = slob_new_pages(flags, get_order(c->size), node);
++              struct page *sp;
++
++              sp = slob_new_pages(flags, get_order(c->size), node);
++              if (sp) {
++                      b = page_address(sp);
++                      sp->private = c->size;
++              }
+               trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+                                           PAGE_SIZE << get_order(c->size),
+                                           flags, node);
+       }
++#endif
+       if (b && c->ctor)
+               c->ctor(b);
+@@ -566,7 +704,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+ EXPORT_SYMBOL(kmem_cache_alloc);
+ #ifdef CONFIG_NUMA
+-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
+ {
+       return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+ }
+@@ -579,12 +717,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+ EXPORT_SYMBOL(kmem_cache_alloc_node);
+ #endif
+-static void __kmem_cache_free(void *b, int size)
++static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
+ {
+-      if (size < PAGE_SIZE)
+-              slob_free(b, size);
++      struct page *sp;
++
++      BUG_ON(virt_to_page(b) != virt_to_head_page(b));
++      sp = virt_to_head_page(b);
++      BUG_ON(!PageSlab(sp));
++      if (!sp->private)
++              slob_free(c, b, size);
+       else
+-              slob_free_pages(b, get_order(size));
++              slob_free_pages(sp, get_order(size));
+ }
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -592,22 +735,36 @@ static void kmem_rcu_free(struct rcu_head *head)
+       struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
+       void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
+-      __kmem_cache_free(b, slob_rcu->size);
++      __kmem_cache_free(NULL, b, slob_rcu->size);
+ }
+ void kmem_cache_free(struct kmem_cache *c, void *b)
+ {
++      int size = c->size;
++
++#ifdef CONFIG_PAX_USERCOPY
++      if (size + c->align < PAGE_SIZE) {
++              size += c->align;
++              b -= c->align;
++      }
++#endif
++
+       kmemleak_free_recursive(b, c->flags);
+       if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+               struct slob_rcu *slob_rcu;
+-              slob_rcu = b + (c->size - sizeof(struct slob_rcu));
+-              slob_rcu->size = c->size;
++              slob_rcu = b + (size - sizeof(struct slob_rcu));
++              slob_rcu->size = size;
+               call_rcu(&slob_rcu->head, kmem_rcu_free);
+       } else {
+-              __kmem_cache_free(b, c->size);
++              __kmem_cache_free(c, b, size);
+       }
++#ifdef CONFIG_PAX_USERCOPY
++      trace_kfree(_RET_IP_, b);
++#else
+       trace_kmem_cache_free(_RET_IP_, b);
++#endif
++
+ }
+ EXPORT_SYMBOL(kmem_cache_free);
+diff --git a/mm/slub.c b/mm/slub.c
+index 9adae58..5527bad 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -34,6 +34,7 @@
+ #include <linux/stacktrace.h>
+ #include <linux/prefetch.h>
+ #include <linux/memcontrol.h>
++#include <linux/vmalloc.h>
+ #include <trace/events/kmem.h>
+@@ -214,7 +215,7 @@ struct track {
+ enum track_item { TRACK_ALLOC, TRACK_FREE };
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_add(struct kmem_cache *);
+ static int sysfs_slab_alias(struct kmem_cache *, const char *);
+ static void memcg_propagate_slab_attrs(struct kmem_cache *s);
+@@ -240,30 +241,40 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
+  *                    Core slab cache functions
+  *******************************************************************/
++static const unsigned long global_rand __latent_entropy;
++
+ static inline void *get_freepointer(struct kmem_cache *s, void *object)
+ {
+-      return *(void **)(object + s->offset);
++      unsigned long freepointer_addr = (unsigned long)object + s->offset;
++      return (void *)(*(unsigned long *)freepointer_addr ^ global_rand ^ freepointer_addr);
+ }
+ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+ {
+-      prefetch(object + s->offset);
++      unsigned long freepointer_addr = (unsigned long)object + s->offset;
++      if (object) {
++              void **freepointer_ptr = (void **)(*(unsigned long *)freepointer_addr ^ global_rand ^ freepointer_addr);
++              prefetch(freepointer_ptr);
++      }
+ }
+ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
+ {
++      unsigned long freepointer_addr;
+       void *p;
+       if (!debug_pagealloc_enabled())
+               return get_freepointer(s, object);
+-      probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
+-      return p;
++      freepointer_addr = (unsigned long)object + s->offset;
++      probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
++      return (void *)((unsigned long)p ^ global_rand ^ freepointer_addr);
+ }
+ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
+ {
+-      *(void **)(object + s->offset) = fp;
++      unsigned long freepointer_addr = (unsigned long)object + s->offset;
++      *(void **)freepointer_addr = (void *)((unsigned long)fp ^ global_rand ^ freepointer_addr);
+ }
+ /* Loop over all objects in a slab */
+@@ -569,7 +580,7 @@ static void print_track(const char *s, struct track *t)
+       if (!t->addr)
+               return;
+-      pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
++      pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
+              s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+ #ifdef CONFIG_STACKTRACE
+       {
+@@ -2896,6 +2907,23 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
+       void *tail_obj = tail ? : head;
+       struct kmem_cache_cpu *c;
+       unsigned long tid;
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      if (!(s->flags & SLAB_NO_SANITIZE)) {
++              int offset = s->offset ? 0 : sizeof(void *);
++              void *x = head;
++
++              while (1) {
++                      memset(x + offset, PAX_MEMORY_SANITIZE_VALUE, s->object_size - offset);
++                      if (s->ctor)
++                              s->ctor(x);
++                      if (x == tail_obj)
++                              break;
++                      x = get_freepointer(s, x);
++              }
++      }
++#endif
++
+ redo:
+       /*
+        * Determine the currently cpus per cpu slab.
+@@ -3699,7 +3727,7 @@ static int __init setup_slub_min_objects(char *str)
+ __setup("slub_min_objects=", setup_slub_min_objects);
+-void *__kmalloc(size_t size, gfp_t flags)
++void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
+ {
+       struct kmem_cache *s;
+       void *ret;
+@@ -3737,7 +3765,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+       return ptr;
+ }
+-void *__kmalloc_node(size_t size, gfp_t flags, int node)
++void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+       struct kmem_cache *s;
+       void *ret;
+@@ -3768,9 +3796,41 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
+ EXPORT_SYMBOL(__kmalloc_node);
+ #endif
++bool is_usercopy_object(const void *ptr)
++{
++      struct page *page;
++
++      if (ZERO_OR_NULL_PTR(ptr))
++              return false;
++
++      if (!slab_is_available())
++              return false;
++
++      if (is_vmalloc_addr(ptr)
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++          && !object_starts_on_stack(ptr)
++#endif
++      ) {
++              struct vm_struct *vm = find_vm_area(ptr);
++              if (vm && (vm->flags & VM_USERCOPY))
++                      return true;
++              return false;
++      }
++
++      if (!virt_addr_valid(ptr))
++              return false;
++
++      page = virt_to_head_page(ptr);
++
++      if (!PageSlab(page))
++              return false;
++
++      return !!page->slab_cache->usersize;
++}
++
+ #ifdef CONFIG_HARDENED_USERCOPY
+ /*
+- * Rejects objects that are incorrectly sized.
++ * Detect unwanted object access
+  *
+  * Returns NULL if check passes, otherwise const char * to name of cache
+  * to indicate an error.
+@@ -3780,15 +3840,15 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
+ {
+       struct kmem_cache *s;
+       unsigned long offset;
+-      size_t object_size;
+       /* Find object and usable object size. */
+       s = page->slab_cache;
+-      object_size = slab_ksize(s);
++#ifdef CONFIG_BROKEN_SECURITY
+       /* Reject impossible pointers. */
+       if (ptr < page_address(page))
+               return s->name;
++#endif
+       /* Find offset within object. */
+       offset = (ptr - page_address(page)) % s->size;
+@@ -3800,11 +3860,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
+               offset -= s->red_left_pad;
+       }
+-      /* Allow address range falling entirely within object size. */
+-      if (offset <= object_size && n <= object_size - offset)
+-              return NULL;
++      if (offset < s->useroffset)
++              return s->name;
+-      return s->name;
++      if (offset - s->useroffset >= s->usersize)
++              return s->name;
++
++      if (n > s->useroffset - offset + s->usersize)
++              return s->name;
++
++      return NULL;
+ }
+ #endif /* CONFIG_HARDENED_USERCOPY */
+@@ -3846,6 +3911,7 @@ void kfree(const void *x)
+       if (unlikely(ZERO_OR_NULL_PTR(x)))
+               return;
++      VM_BUG_ON(!virt_addr_valid(x));
+       page = virt_to_head_page(x);
+       if (unlikely(!PageSlab(page))) {
+               BUG_ON(!PageCompound(page));
+@@ -4116,7 +4182,7 @@ void __init kmem_cache_init(void)
+       kmem_cache = &boot_kmem_cache;
+       create_boot_cache(kmem_cache_node, "kmem_cache_node",
+-              sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
++              sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
+       register_hotmemory_notifier(&slab_memory_callback_nb);
+@@ -4126,7 +4192,7 @@ void __init kmem_cache_init(void)
+       create_boot_cache(kmem_cache, "kmem_cache",
+                       offsetof(struct kmem_cache, node) +
+                               nr_node_ids * sizeof(struct kmem_cache_node *),
+-                     SLAB_HWCACHE_ALIGN);
++                     SLAB_HWCACHE_ALIGN, 0, 0);
+       kmem_cache = bootstrap(&boot_kmem_cache);
+@@ -4166,7 +4232,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+       s = find_mergeable(size, align, flags, name, ctor);
+       if (s) {
+-              s->refcount++;
++              atomic_inc(&s->refcount);
+               /*
+                * Adjust the object sizes so that we clear
+@@ -4182,7 +4248,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+               }
+               if (sysfs_slab_alias(s, name)) {
+-                      s->refcount--;
++                      atomic_dec(&s->refcount);
+                       s = NULL;
+               }
+       }
+@@ -4194,6 +4260,8 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
+ {
+       int err;
++      flags = pax_sanitize_slab_flags(flags);
++
+       err = kmem_cache_open(s, flags);
+       if (err)
+               return err;
+@@ -4299,7 +4367,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+ }
+ #endif
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int count_inuse(struct page *page)
+ {
+       return page->inuse;
+@@ -4580,7 +4648,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
+               len += sprintf(buf + len, "%7ld ", l->count);
+               if (l->addr)
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++                      len += sprintf(buf + len, "%pS", NULL);
++#else
+                       len += sprintf(buf + len, "%pS", (void *)l->addr);
++#endif
+               else
+                       len += sprintf(buf + len, "<not-available>");
+@@ -4678,12 +4750,12 @@ static void __init resiliency_test(void)
+       validate_slab_cache(kmalloc_caches[9]);
+ }
+ #else
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static void resiliency_test(void) {};
+ #endif
+ #endif
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ enum slab_stat_type {
+       SL_ALL,                 /* All slabs */
+       SL_PARTIAL,             /* Only partially allocated slabs */
+@@ -4920,13 +4992,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
+ {
+       if (!s->ctor)
+               return 0;
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++      return sprintf(buf, "%pS\n", NULL);
++#else
+       return sprintf(buf, "%pS\n", s->ctor);
++#endif
+ }
+ SLAB_ATTR_RO(ctor);
+ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
+ {
+-      return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
++      return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
+ }
+ SLAB_ATTR_RO(aliases);
+@@ -5014,6 +5090,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+ SLAB_ATTR_RO(cache_dma);
+ #endif
++#ifdef CONFIG_PAX_USERCOPY
++static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
++{
++      return sprintf(buf, "%d\n", !!s->usersize);
++}
++SLAB_ATTR_RO(usercopy);
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
++{
++      return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
++}
++SLAB_ATTR_RO(sanitize);
++#endif
++
+ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
+ {
+       return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
+@@ -5069,7 +5161,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
+        * as well as cause other issues like converting a mergeable
+        * cache into an umergeable one.
+        */
+-      if (s->refcount > 1)
++      if (atomic_read(&s->refcount) > 1)
+               return -EINVAL;
+       s->flags &= ~SLAB_TRACE;
+@@ -5187,7 +5279,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
+ static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
+                                                       size_t length)
+ {
+-      if (s->refcount > 1)
++      if (atomic_read(&s->refcount) > 1)
+               return -EINVAL;
+       s->flags &= ~SLAB_FAILSLAB;
+@@ -5319,7 +5411,7 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
+ STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
+ #endif
+-static struct attribute *slab_attrs[] = {
++static struct attribute *slab_attrs[] __read_only = {
+       &slab_size_attr.attr,
+       &object_size_attr.attr,
+       &objs_per_slab_attr.attr,
+@@ -5354,6 +5446,12 @@ static struct attribute *slab_attrs[] = {
+ #ifdef CONFIG_ZONE_DMA
+       &cache_dma_attr.attr,
+ #endif
++#ifdef CONFIG_PAX_USERCOPY
++      &usercopy_attr.attr,
++#endif
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++      &sanitize_attr.attr,
++#endif
+ #ifdef CONFIG_NUMA
+       &remote_node_defrag_ratio_attr.attr,
+ #endif
+@@ -5597,6 +5695,7 @@ static char *create_unique_id(struct kmem_cache *s)
+       return name;
+ }
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_add(struct kmem_cache *s)
+ {
+       int err;
+@@ -5668,6 +5767,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
+       kobject_del(&s->kobj);
+       kobject_put(&s->kobj);
+ }
++#endif
+ /*
+  * Need to buffer aliases during bootup until sysfs becomes
+@@ -5681,6 +5781,7 @@ struct saved_alias {
+ static struct saved_alias *alias_list;
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+ {
+       struct saved_alias *al;
+@@ -5703,6 +5804,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+       alias_list = al;
+       return 0;
+ }
++#endif
+ static int __init slab_sysfs_init(void)
+ {
+diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
+index 574c67b..e890af8 100644
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -203,7 +203,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+-              pud_populate(&init_mm, pud, p);
++              pud_populate_kernel(&init_mm, pud, p);
+       }
+       return pud;
+ }
+@@ -215,7 +215,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+-              pgd_populate(&init_mm, pgd, p);
++              pgd_populate_kernel(&init_mm, pgd, p);
+       }
+       return pgd;
+ }
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 1e168bf..2dc7328 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -749,7 +749,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+       for (i = 0; i < nr_pages; i++) {
+               if (PageHWPoison(&memmap[i])) {
+-                      atomic_long_sub(1, &num_poisoned_pages);
++                      atomic_long_sub_unchecked(1, &num_poisoned_pages);
+                       ClearPageHWPoison(&memmap[i]);
+               }
+       }
+diff --git a/mm/swap.c b/mm/swap.c
+index 75c63bb..a4dce20 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -91,6 +91,13 @@ static void __put_compound_page(struct page *page)
+       if (!PageHuge(page))
+               __page_cache_release(page);
+       dtor = get_compound_page_dtor(page);
++      if (!PageHuge(page))
++              BUG_ON(dtor != free_compound_page
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++                      && dtor != free_transhuge_page
++#endif
++              );
++
+       (*dtor)(page);
+ }
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 2657acc..7eedf77 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -90,7 +90,7 @@ static DEFINE_MUTEX(swapon_mutex);
+ static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
+ /* Activity counter to indicate that a swapon or swapoff has occurred */
+-static atomic_t proc_poll_event = ATOMIC_INIT(0);
++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
+ static inline unsigned char swap_count(unsigned char ent)
+ {
+@@ -1979,7 +1979,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+       spin_unlock(&swap_lock);
+       err = 0;
+-      atomic_inc(&proc_poll_event);
++      atomic_inc_unchecked(&proc_poll_event);
+       wake_up_interruptible(&proc_poll_wait);
+ out_dput:
+@@ -1996,8 +1996,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
+       poll_wait(file, &proc_poll_wait, wait);
+-      if (seq->poll_event != atomic_read(&proc_poll_event)) {
+-              seq->poll_event = atomic_read(&proc_poll_event);
++      if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
++              seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+               return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+       }
+@@ -2095,7 +2095,7 @@ static int swaps_open(struct inode *inode, struct file *file)
+               return ret;
+       seq = file->private_data;
+-      seq->poll_event = atomic_read(&proc_poll_event);
++      seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+       return 0;
+ }
+@@ -2543,7 +2543,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+               (frontswap_map) ? "FS" : "");
+       mutex_unlock(&swapon_mutex);
+-      atomic_inc(&proc_poll_event);
++      atomic_inc_unchecked(&proc_poll_event);
+       wake_up_interruptible(&proc_poll_wait);
+       if (S_ISREG(inode->i_mode))
+diff --git a/mm/usercopy.c b/mm/usercopy.c
+index 3c8da0a..3e4bdaf 100644
+--- a/mm/usercopy.c
++++ b/mm/usercopy.c
+@@ -16,15 +16,9 @@
+ #include <linux/mm.h>
+ #include <linux/slab.h>
++#include <linux/ratelimit.h>
+ #include <asm/sections.h>
+-enum {
+-      BAD_STACK = -1,
+-      NOT_STACK = 0,
+-      GOOD_FRAME,
+-      GOOD_STACK,
+-};
+-
+ /*
+  * Checks if a given pointer and length is contained by the current
+  * stack frame (if possible).
+@@ -35,11 +29,13 @@ enum {
+  *    GOOD_STACK: fully on the stack (when can't do frame-checking)
+  *    BAD_STACK: error condition (invalid stack position or bad stack frame)
+  */
+-static noinline int check_stack_object(const void *obj, unsigned long len)
++static noinline int check_stack_object(unsigned long obj, unsigned long len)
+ {
+-      const void * const stack = task_stack_page(current);
+-      const void * const stackend = stack + THREAD_SIZE;
+-      int ret;
++      unsigned long stack = (unsigned long)task_stack_page(current);
++      unsigned long stackend = (unsigned long)stack + THREAD_SIZE;
++
++      if (obj + len < obj)
++              return BAD_STACK;
+       /* Object is not on the stack at all. */
+       if (obj + len <= stack || stackend <= obj)
+@@ -54,25 +50,29 @@ static noinline int check_stack_object(const void *obj, unsigned long len)
+               return BAD_STACK;
+       /* Check if object is safely within a valid frame. */
+-      ret = arch_within_stack_frames(stack, stackend, obj, len);
+-      if (ret)
+-              return ret;
+-
+-      return GOOD_STACK;
++      return arch_within_stack_frames(stack, stackend, obj, len);
+ }
+-static void report_usercopy(const void *ptr, unsigned long len,
+-                          bool to_user, const char *type)
++static DEFINE_RATELIMIT_STATE(usercopy_ratelimit, 15 * HZ, 3);
++
++static __noreturn void report_usercopy(const void *ptr, unsigned long len,
++                                     bool to_user, const char *type)
+ {
+-      pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+-              to_user ? "exposure" : "overwrite",
+-              to_user ? "from" : "to", ptr, type ? : "unknown", len);
++      if (__ratelimit(&usercopy_ratelimit)) {
++              pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++                      to_user ? "exposure" : "overwrite",
++                      to_user ? "from" : "to", ptr, type ? : "unknown", len);
++              dump_stack();
++      }
++      do_group_exit(SIGKILL);
++#ifdef CONFIG_BROKEN_SECURITY
+       /*
+        * For greater effect, it would be nice to do do_group_exit(),
+        * but BUG() actually hooks all the lock-breaking and per-arch
+        * Oops code, so that is used here instead.
+        */
+       BUG();
++#endif
+ }
+ /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+@@ -252,10 +252,15 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+               goto report;
+       /* Check for bad stack object. */
+-      switch (check_stack_object(ptr, n)) {
++      switch (check_stack_object((unsigned long)ptr, n)) {
+       case NOT_STACK:
+               /* Object is not touching the current process stack. */
+-              break;
++              /* Check for object in kernel to avoid text exposure. */
++              err = check_kernel_text_object(ptr, n);
++              if (err)
++                      break;
++              return;
++
+       case GOOD_FRAME:
+       case GOOD_STACK:
+               /*
+@@ -264,16 +269,12 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+                * process stack (when frame checking not available).
+                */
+               return;
+-      default:
++
++      case BAD_STACK:
+               err = "<process stack>";
+-              goto report;
++              break;
+       }
+-      /* Check for object in kernel to avoid text exposure. */
+-      err = check_kernel_text_object(ptr, n);
+-      if (!err)
+-              return;
+-
+ report:
+       report_usercopy(ptr, n, to_user, err);
+ }
+diff --git a/mm/util.c b/mm/util.c
+index 662cddf..ad8d778 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -239,6 +239,12 @@ int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+       mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++      if (mm->pax_flags & MF_PAX_RANDMMAP)
++              mm->mmap_base += mm->delta_mmap;
++#endif
++
+       mm->get_unmapped_area = arch_get_unmapped_area;
+ }
+ #endif
+@@ -432,6 +438,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
+ unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
+ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ int overcommit_ratio_handler(struct ctl_table *table, int write,
+                            void __user *buffer, size_t *lenp,
+@@ -611,6 +618,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
+       if (!mm->arg_end)
+               goto out_mm;    /* Shh! No looking before we're done */
++      if (gr_acl_handle_procpidmem(task))
++              goto out_mm;
++
+       down_read(&mm->mmap_sem);
+       arg_start = mm->arg_start;
+       arg_end = mm->arg_end;
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 91f44e7..8500d40 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -43,20 +43,65 @@ struct vfree_deferred {
+       struct work_struct wq;
+ };
+ static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
++static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
++
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++struct stack_deferred_llist {
++      struct llist_head list;
++      void *stack;
++      void *lowmem_stack;
++};
++
++struct stack_deferred {
++      struct stack_deferred_llist list;
++      struct work_struct wq;
++};
++
++static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
++#endif
+ static void __vunmap(const void *, int);
+-static void free_work(struct work_struct *w)
++static void vfree_work(struct work_struct *w)
+ {
+       struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+       struct llist_node *llnode = llist_del_all(&p->list);
+       while (llnode) {
+-              void *p = llnode;
++              void *x = llnode;
+               llnode = llist_next(llnode);
+-              __vunmap(p, 1);
++              __vunmap(x, 1);
+       }
+ }
++static void vunmap_work(struct work_struct *w)
++{
++      struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
++      struct llist_node *llnode = llist_del_all(&p->list);
++      while (llnode) {
++              void *x = llnode;
++              llnode = llist_next(llnode);
++              __vunmap(x, 0);
++      }
++}
++
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static void unmap_work(struct work_struct *w)
++{
++      struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
++      struct llist_node *llnode = llist_del_all(&p->list.list);
++      while (llnode) {
++              struct stack_deferred_llist *x =
++                      llist_entry((struct llist_head *)llnode,
++                                   struct stack_deferred_llist, list);
++              void *stack = ACCESS_ONCE(x->stack);
++              void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
++              llnode = llist_next(llnode);
++              __vunmap(stack, 0);
++              free_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
++      }
++}
++#endif
++
+ /*** Page table manipulation functions ***/
+ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+@@ -64,10 +109,23 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+       pte_t *pte;
+       pte = pte_offset_kernel(pmd, addr);
++      pax_open_kernel();
+       do {
+-              pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+-              WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
++                      BUG_ON(!pte_exec(*pte));
++                      set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
++                      continue;
++              }
++#endif
++
++              {
++                      pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
++                      WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++              }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
++      pax_close_kernel();
+ }
+ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
+@@ -130,16 +188,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+       pte = pte_alloc_kernel(pmd, addr);
+       if (!pte)
+               return -ENOMEM;
++
++      pax_open_kernel();
+       do {
+               struct page *page = pages[*nr];
+-              if (WARN_ON(!pte_none(*pte)))
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++              if (pgprot_val(prot) & _PAGE_NX)
++#endif
++
++              if (!pte_none(*pte)) {
++                      pax_close_kernel();
++                      WARN_ON(1);
+                       return -EBUSY;
+-              if (WARN_ON(!page))
++              }
++              if (!page) {
++                      pax_close_kernel();
++                      WARN_ON(1);
+                       return -ENOMEM;
++              }
+               set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
+               (*nr)++;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
++      pax_close_kernel();
+       return 0;
+ }
+@@ -149,7 +220,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+       pmd_t *pmd;
+       unsigned long next;
+-      pmd = pmd_alloc(&init_mm, pud, addr);
++      pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+       do {
+@@ -166,7 +237,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+       pud_t *pud;
+       unsigned long next;
+-      pud = pud_alloc(&init_mm, pgd, addr);
++      pud = pud_alloc_kernel(&init_mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+@@ -226,6 +297,12 @@ int is_vmalloc_or_module_addr(const void *x)
+       if (addr >= MODULES_VADDR && addr < MODULES_END)
+               return 1;
+ #endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++      if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
++              return 1;
++#endif
++
+       return is_vmalloc_addr(x);
+ }
+@@ -246,8 +323,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
+       if (!pgd_none(*pgd)) {
+               pud_t *pud = pud_offset(pgd, addr);
++#ifdef CONFIG_X86
++              if (!pud_large(*pud))
++#endif
+               if (!pud_none(*pud)) {
+                       pmd_t *pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_X86
++                      if (!pmd_large(*pmd))
++#endif
+                       if (!pmd_none(*pmd)) {
+                               pte_t *ptep, pte;
+@@ -350,7 +433,7 @@ static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
+  * Allocate a region of KVA of the specified size and alignment, within the
+  * vstart and vend.
+  */
+-static struct vmap_area *alloc_vmap_area(unsigned long size,
++static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
+                               unsigned long align,
+                               unsigned long vstart, unsigned long vend,
+                               int node, gfp_t gfp_mask)
+@@ -1228,13 +1311,27 @@ void __init vmalloc_init(void)
+       for_each_possible_cpu(i) {
+               struct vmap_block_queue *vbq;
+               struct vfree_deferred *p;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++              struct stack_deferred *p2;
++#endif
+               vbq = &per_cpu(vmap_block_queue, i);
+               spin_lock_init(&vbq->lock);
+               INIT_LIST_HEAD(&vbq->free);
++
+               p = &per_cpu(vfree_deferred, i);
+               init_llist_head(&p->list);
+-              INIT_WORK(&p->wq, free_work);
++              INIT_WORK(&p->wq, vfree_work);
++
++              p = &per_cpu(vunmap_deferred, i);
++              init_llist_head(&p->list);
++              INIT_WORK(&p->wq, vunmap_work);
++
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++              p2 = &per_cpu(stack_deferred, i);
++              init_llist_head(&p2->list.list);
++              INIT_WORK(&p2->wq, unmap_work);
++#endif
+       }
+       /* Import existing vmlist entries. */
+@@ -1359,6 +1456,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+       struct vm_struct *area;
+       BUG_ON(in_interrupt());
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++      if (flags & VM_KERNEXEC) {
++              if (start != VMALLOC_START || end != VMALLOC_END)
++                      return NULL;
++              start = (unsigned long)MODULES_EXEC_VADDR;
++              end = (unsigned long)MODULES_EXEC_END;
++      }
++#endif
++
+       if (flags & VM_IOREMAP)
+               align = 1ul << clamp_t(int, fls_long(size),
+                                      PAGE_SHIFT, IOREMAP_MAX_ORDER);
+@@ -1371,7 +1478,11 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+       if (unlikely(!area))
+               return NULL;
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++      if (!(flags & VM_NO_GUARD) || (start >= VMALLOC_START && end <= VMALLOC_END))
++#else
+       if (!(flags & VM_NO_GUARD))
++#endif
+               size += PAGE_SIZE;
+       va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
+@@ -1553,13 +1664,36 @@ EXPORT_SYMBOL(vfree);
+  */
+ void vunmap(const void *addr)
+ {
+-      BUG_ON(in_interrupt());
+-      might_sleep();
+-      if (addr)
++      if (!addr)
++              return;
++      if (unlikely(in_interrupt())) {
++              struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
++              if (pax_llist_add((struct llist_node *)addr, &p->list))
++                      schedule_work(&p->wq);
++      } else {
++              might_sleep();
+               __vunmap(addr, 0);
++      }
+ }
+ EXPORT_SYMBOL(vunmap);
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++void unmap_process_stacks(struct task_struct *task)
++{
++      if (unlikely(in_interrupt())) {
++              struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
++              struct stack_deferred_llist *list = task->stack;
++              list->stack = task->stack;
++              list->lowmem_stack = task->lowmem_stack;
++              if (llist_add((struct llist_node *)&list->list, &p->list.list))
++                      schedule_work(&p->wq);
++      } else {
++              __vunmap(task->stack, 0);
++              free_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
++      }
++}
++#endif
++
+ /**
+  *    vmap  -  map an array of pages into virtually contiguous space
+  *    @pages:         array of page pointers
+@@ -1581,6 +1715,11 @@ void *vmap(struct page **pages, unsigned int count,
+       if (count > totalram_pages)
+               return NULL;
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++      if (!(pgprot_val(prot) & _PAGE_NX))
++              flags |= VM_KERNEXEC;
++#endif
++
+       size = (unsigned long)count << PAGE_SHIFT;
+       area = get_vm_area_caller(size, flags, __builtin_return_address(0));
+       if (!area)
+@@ -1684,6 +1823,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+       if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+               goto fail;
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++      if (!(pgprot_val(prot) & _PAGE_NX)) {
++              vm_flags |= VM_KERNEXEC;
++              start = VMALLOC_START;
++              end = VMALLOC_END;
++      }
++#endif
++
+       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
+                               vm_flags, start, end, node, gfp_mask, caller);
+       if (!area)
+@@ -1737,6 +1884,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
+                               gfp_mask, prot, 0, node, caller);
+ }
++void *vmalloc_usercopy(unsigned long size)
++{
++      return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
++                                  GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
++                                  VM_USERCOPY, NUMA_NO_NODE,
++                                  __builtin_return_address(0));
++}
++
+ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
+ {
+       return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
+@@ -1751,6 +1906,16 @@ static inline void *__vmalloc_node_flags(unsigned long size,
+                                       node, __builtin_return_address(0));
+ }
++#if defined(CONFIG_GRKERNSEC_KSTACKOVERFLOW) && defined(CONFIG_X86_64)
++void *vzalloc_irq_stack(void)
++{
++      return __vmalloc_node(IRQ_STACK_SIZE, IRQ_STACK_SIZE,
++                            GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO,
++                            PAGE_KERNEL, NUMA_NO_NODE,
++                            __builtin_return_address(0));
++}
++#endif
++
+ /**
+  *    vmalloc  -  allocate virtually contiguous memory
+  *    @size:          allocation size
+@@ -1860,10 +2025,9 @@ EXPORT_SYMBOL(vzalloc_node);
+  *    For tight control over page level allocator and protection flags
+  *    use __vmalloc() instead.
+  */
+-
+ void *vmalloc_exec(unsigned long size)
+ {
+-      return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++      return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
+                             NUMA_NO_NODE, __builtin_return_address(0));
+ }
+@@ -2170,6 +2334,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+ {
+       struct vm_struct *area;
++      BUG_ON(vma->vm_mirror);
++
+       size = PAGE_ALIGN(size);
+       if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
+@@ -2539,7 +2705,7 @@ found:
+       /* insert all vm's */
+       for (area = 0; area < nr_vms; area++)
+               setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
+-                               pcpu_get_vm_areas);
++                               __builtin_return_address(0));
+       kfree(vas);
+       return vms;
+@@ -2652,7 +2818,11 @@ static int s_show(struct seq_file *m, void *p)
+               v->addr, v->addr + v->size, v->size);
+       if (v->caller)
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              seq_printf(m, " %pK", v->caller);
++#else
+               seq_printf(m, " %pS", v->caller);
++#endif
+       if (v->nr_pages)
+               seq_printf(m, " pages=%d", v->nr_pages);
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 89cec42..673413a 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -27,6 +27,7 @@
+ #include <linux/mm_inline.h>
+ #include <linux/page_ext.h>
+ #include <linux/page_owner.h>
++#include <linux/grsecurity.h>
+ #include "internal.h"
+@@ -86,8 +87,8 @@ void vm_events_fold_cpu(int cpu)
+  *
+  * vm_stat contains the global counters
+  */
+-atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
+-atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
++atomic_long_unchecked_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
++atomic_long_unchecked_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
+ EXPORT_SYMBOL(vm_zone_stat);
+ EXPORT_SYMBOL(vm_node_stat);
+@@ -611,13 +612,13 @@ static int fold_diff(int *zone_diff, int *node_diff)
+       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+               if (zone_diff[i]) {
+-                      atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
++                      atomic_long_add_unchecked(zone_diff[i], &vm_zone_stat[i]);
+                       changes++;
+       }
+       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+               if (node_diff[i]) {
+-                      atomic_long_add(node_diff[i], &vm_node_stat[i]);
++                      atomic_long_add_unchecked(node_diff[i], &vm_node_stat[i]);
+                       changes++;
+       }
+       return changes;
+@@ -657,7 +658,7 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
+                       v = this_cpu_xchg(p->vm_stat_diff[i], 0);
+                       if (v) {
+-                              atomic_long_add(v, &zone->vm_stat[i]);
++                              atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+                               global_zone_diff[i] += v;
+ #ifdef CONFIG_NUMA
+                               /* 3 seconds idle till flush */
+@@ -706,7 +707,7 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
+                       v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
+                       if (v) {
+-                              atomic_long_add(v, &pgdat->vm_stat[i]);
++                              atomic_long_add_unchecked(v, &pgdat->vm_stat[i]);
+                               global_node_diff[i] += v;
+                       }
+               }
+@@ -740,7 +741,7 @@ void cpu_vm_stats_fold(int cpu)
+                               v = p->vm_stat_diff[i];
+                               p->vm_stat_diff[i] = 0;
+-                              atomic_long_add(v, &zone->vm_stat[i]);
++                              atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+                               global_zone_diff[i] += v;
+                       }
+       }
+@@ -756,7 +757,7 @@ void cpu_vm_stats_fold(int cpu)
+                               v = p->vm_node_stat_diff[i];
+                               p->vm_node_stat_diff[i] = 0;
+-                              atomic_long_add(v, &pgdat->vm_stat[i]);
++                              atomic_long_add_unchecked(v, &pgdat->vm_stat[i]);
+                               global_node_diff[i] += v;
+                       }
+       }
+@@ -776,8 +777,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
+               if (pset->vm_stat_diff[i]) {
+                       int v = pset->vm_stat_diff[i];
+                       pset->vm_stat_diff[i] = 0;
+-                      atomic_long_add(v, &zone->vm_stat[i]);
+-                      atomic_long_add(v, &vm_zone_stat[i]);
++                      atomic_long_add_unchecked(v, &zone->vm_stat[i]);
++                      atomic_long_add_unchecked(v, &vm_zone_stat[i]);
+               }
+ }
+ #endif
+@@ -807,7 +808,7 @@ unsigned long sum_zone_node_page_state(int node,
+ unsigned long node_page_state(struct pglist_data *pgdat,
+                               enum node_stat_item item)
+ {
+-      long x = atomic_long_read(&pgdat->vm_stat[item]);
++      long x = atomic_long_read_unchecked(&pgdat->vm_stat[item]);
+ #ifdef CONFIG_SMP
+       if (x < 0)
+               x = 0;
+@@ -1556,10 +1557,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
+       stat_items_size += sizeof(struct vm_event_state);
+ #endif
+-      v = kmalloc(stat_items_size, GFP_KERNEL);
++      v = kzalloc(stat_items_size, GFP_KERNEL);
+       m->private = v;
+       if (!v)
+               return ERR_PTR(-ENOMEM);
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++        if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++                && !in_group_p(grsec_proc_gid)
++#endif
++        )
++              return (unsigned long *)m->private + *pos;
++#endif
++#endif
++
+       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+               v[i] = global_page_state(i);
+       v += NR_VM_ZONE_STAT_ITEMS;
+@@ -1656,7 +1669,7 @@ int vmstat_refresh(struct ctl_table *table, int write,
+       if (err)
+               return err;
+       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
+-              val = atomic_long_read(&vm_zone_stat[i]);
++              val = atomic_long_read_unchecked(&vm_zone_stat[i]);
+               if (val < 0) {
+                       switch (i) {
+                       case NR_PAGES_SCANNED:
+@@ -1856,10 +1869,16 @@ static int __init setup_vmstat(void)
+       cpu_notifier_register_done();
+ #endif
+ #ifdef CONFIG_PROC_FS
+-      proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+-      proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+-      proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+-      proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
++      {
++              mode_t gr_mode = S_IRUGO;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++              gr_mode = S_IRUSR;
++#endif
++              proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
++              proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
++              proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
++              proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
++      }
+ #endif
+       return 0;
+ }
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 8de138d..df7e387 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -496,7 +496,7 @@ out:
+       return NOTIFY_DONE;
+ }
+-static struct notifier_block vlan_notifier_block __read_mostly = {
++static struct notifier_block vlan_notifier_block = {
+       .notifier_call = vlan_device_event,
+ };
+@@ -571,8 +571,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
+               err = -EPERM;
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       break;
+-              if ((args.u.name_type >= 0) &&
+-                  (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
++              if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
+                       struct vlan_net *vn;
+                       vn = net_generic(net, vlan_net_id);
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index 1270207..d165bb5 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -248,7 +248,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
+       return dev_net(real_dev);
+ }
+-struct rtnl_link_ops vlan_link_ops __read_mostly = {
++struct rtnl_link_ops vlan_link_ops = {
+       .kind           = "vlan",
+       .maxtype        = IFLA_VLAN_MAX,
+       .policy         = vlan_policy,
+diff --git a/net/9p/mod.c b/net/9p/mod.c
+index 6ab36ae..6f1841b 100644
+--- a/net/9p/mod.c
++++ b/net/9p/mod.c
+@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
+ void v9fs_register_trans(struct p9_trans_module *m)
+ {
+       spin_lock(&v9fs_trans_lock);
+-      list_add_tail(&m->list, &v9fs_trans_list);
++      pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
+       spin_unlock(&v9fs_trans_lock);
+ }
+ EXPORT_SYMBOL(v9fs_register_trans);
+@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
+ void v9fs_unregister_trans(struct p9_trans_module *m)
+ {
+       spin_lock(&v9fs_trans_lock);
+-      list_del_init(&m->list);
++      pax_list_del_init((struct list_head *)&m->list);
+       spin_unlock(&v9fs_trans_lock);
+ }
+ EXPORT_SYMBOL(v9fs_unregister_trans);
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 7bc2208..79c8068 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
+       oldfs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+-      ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
++      ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
+       set_fs(oldfs);
+       if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
+index af46bc4..f9adfcd 100644
+--- a/net/appletalk/atalk_proc.c
++++ b/net/appletalk/atalk_proc.c
+@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
+       struct proc_dir_entry *p;
+       int rc = -ENOMEM;
+-      atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
++      atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
+       if (!atalk_proc_dir)
+               goto out;
+diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
+index 876fbe8..8bbea9f 100644
+--- a/net/atm/atm_misc.c
++++ b/net/atm/atm_misc.c
+@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
+       if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
+               return 1;
+       atm_return(vcc, truesize);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return 0;
+ }
+ EXPORT_SYMBOL(atm_charge);
+@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
+               }
+       }
+       atm_return(vcc, guess);
+-      atomic_inc(&vcc->stats->rx_drop);
++      atomic_inc_unchecked(&vcc->stats->rx_drop);
+       return NULL;
+ }
+ EXPORT_SYMBOL(atm_alloc_charge);
+@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
+ void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+       __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
+ void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
+       __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index e574a7e..2f5a14d 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
+ }
+ static struct lane2_ops lane2_ops = {
+-      lane2_resolve,          /* resolve,             spec 3.1.3 */
+-      lane2_associate_req,    /* associate_req,       spec 3.1.4 */
+-      NULL                    /* associate indicator, spec 3.1.5 */
++      .resolve = lane2_resolve,
++      .associate_req = lane2_associate_req,
++      .associate_indicator = NULL
+ };
+ static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+diff --git a/net/atm/lec.h b/net/atm/lec.h
+index 4149db1..f2ab682 100644
+--- a/net/atm/lec.h
++++ b/net/atm/lec.h
+@@ -48,7 +48,7 @@ struct lane2_ops {
+                             const u8 *tlvs, u32 sizeoftlvs);
+       void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
+                                    const u8 *tlvs, u32 sizeoftlvs);
+-};
++} __no_const;
+ /*
+  * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
+diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
+index 9e60e74..a89fdeb 100644
+--- a/net/atm/mpoa_caches.c
++++ b/net/atm/mpoa_caches.c
+@@ -535,33 +535,32 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
+ static const struct in_cache_ops ingress_ops = {
+-      in_cache_add_entry,               /* add_entry       */
+-      in_cache_get,                     /* get             */
+-      in_cache_get_with_mask,           /* get_with_mask   */
+-      in_cache_get_by_vcc,              /* get_by_vcc      */
+-      in_cache_put,                     /* put             */
+-      in_cache_remove_entry,            /* remove_entry    */
+-      cache_hit,                        /* cache_hit       */
+-      clear_count_and_expired,          /* clear_count     */
+-      check_resolving_entries,          /* check_resolving */
+-      refresh_entries,                  /* refresh         */
+-      in_destroy_cache                  /* destroy_cache   */
++      .add_entry = in_cache_add_entry,
++      .get = in_cache_get,
++      .get_with_mask = in_cache_get_with_mask,
++      .get_by_vcc = in_cache_get_by_vcc,
++      .put = in_cache_put,
++      .remove_entry = in_cache_remove_entry,
++      .cache_hit = cache_hit,
++      .clear_count = clear_count_and_expired,
++      .check_resolving = check_resolving_entries,
++      .refresh = refresh_entries,
++      .destroy_cache = in_destroy_cache
+ };
+ static const struct eg_cache_ops egress_ops = {
+-      eg_cache_add_entry,               /* add_entry        */
+-      eg_cache_get_by_cache_id,         /* get_by_cache_id  */
+-      eg_cache_get_by_tag,              /* get_by_tag       */
+-      eg_cache_get_by_vcc,              /* get_by_vcc       */
+-      eg_cache_get_by_src_ip,           /* get_by_src_ip    */
+-      eg_cache_put,                     /* put              */
+-      eg_cache_remove_entry,            /* remove_entry     */
+-      update_eg_cache_entry,            /* update           */
+-      clear_expired,                    /* clear_expired    */
+-      eg_destroy_cache                  /* destroy_cache    */
++      .add_entry = eg_cache_add_entry,
++      .get_by_cache_id = eg_cache_get_by_cache_id,
++      .get_by_tag = eg_cache_get_by_tag,
++      .get_by_vcc = eg_cache_get_by_vcc,
++      .get_by_src_ip = eg_cache_get_by_src_ip,
++      .put = eg_cache_put,
++      .remove_entry = eg_cache_remove_entry,
++      .update = update_eg_cache_entry,
++      .clear_expired = clear_expired,
++      .destroy_cache = eg_destroy_cache
+ };
+-
+ void atm_mpoa_init_cache(struct mpoa_client *mpc)
+ {
+       mpc->in_ops = &ingress_ops;
+diff --git a/net/atm/proc.c b/net/atm/proc.c
+index bbb6461..cf04016 100644
+--- a/net/atm/proc.c
++++ b/net/atm/proc.c
+@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
+   const struct k_atm_aal_stats *stats)
+ {
+       seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
+-                 atomic_read(&stats->tx), atomic_read(&stats->tx_err),
+-                 atomic_read(&stats->rx), atomic_read(&stats->rx_err),
+-                 atomic_read(&stats->rx_drop));
++                 atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
++                 atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
++                 atomic_read_unchecked(&stats->rx_drop));
+ }
+ static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
+diff --git a/net/atm/resources.c b/net/atm/resources.c
+index 0447d5d..3cf4728 100644
+--- a/net/atm/resources.c
++++ b/net/atm/resources.c
+@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
+ static void copy_aal_stats(struct k_atm_aal_stats *from,
+     struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+       __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
+ static void subtract_aal_stats(struct k_atm_aal_stats *from,
+     struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
+       __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
+index 919a5ce..cc6b444 100644
+--- a/net/ax25/sysctl_net_ax25.c
++++ b/net/ax25/sysctl_net_ax25.c
+@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
+ {
+       char path[sizeof("net/ax25/") + IFNAMSIZ];
+       int k;
+-      struct ctl_table *table;
++      ctl_table_no_const *table;
+       table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
+       if (!table)
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 19b0abd..9a487ee 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -361,7 +361,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+       /* randomize initial seqno to avoid collision */
+       get_random_bytes(&random_seqno, sizeof(random_seqno));
+-      atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
++      atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
+       hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
+       ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
+@@ -973,9 +973,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+       batadv_ogm_packet->tvlv_len = htons(tvlv_len);
+       /* change sequence number to network order */
+-      seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno);
++      seqno = (u32)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
+       batadv_ogm_packet->seqno = htonl(seqno);
+-      atomic_inc(&hard_iface->bat_iv.ogm_seqno);
++      atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
+       batadv_iv_ogm_slide_own_bcast_window(hard_iface);
+@@ -1673,7 +1673,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
+               return;
+       /* could be changed by schedule_own_packet() */
+-      if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
++      if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
+       if (ogm_packet->flags & BATADV_DIRECTLINK)
+               has_directlink_flag = true;
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
+index 0934730..a8189fc 100644
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -469,7 +469,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
+       frag_header.packet_type = BATADV_UNICAST_FRAG;
+       frag_header.version = BATADV_COMPAT_VERSION;
+       frag_header.ttl = BATADV_TTL;
+-      frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
++      frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
+       frag_header.reserved = 0;
+       frag_header.no = 0;
+       frag_header.total_size = htons(skb->len);
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index 3d19947..5c61638 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -758,7 +758,7 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+               if (!primary_if)
+                       goto out;
+               orig_addr = primary_if->net_dev->dev_addr;
+-              orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++              orig_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+       } else {
+               orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
+                                                    vid);
+@@ -834,7 +834,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+        * value is used later to check if the node which sent (or re-routed
+        * last time) the packet had an updated information or not
+        */
+-      curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++      curr_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+       if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+               orig_node = batadv_orig_hash_find(bat_priv,
+                                                 unicast_packet->dest);
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 7527c06..42024c6 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -180,7 +180,7 @@ static void batadv_interface_set_rx_mode(struct net_device *dev)
+ {
+ }
+-static int batadv_interface_tx(struct sk_buff *skb,
++static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
+                              struct net_device *soft_iface)
+ {
+       struct ethhdr *ethhdr;
+@@ -332,7 +332,7 @@ send:
+                               primary_if->net_dev->dev_addr);
+               /* set broadcast sequence number */
+-              seqno = atomic_inc_return(&bat_priv->bcast_seqno);
++              seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
+               bcast_packet->seqno = htonl(seqno);
+               batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
+@@ -835,8 +835,8 @@ static int batadv_softif_init_late(struct net_device *dev)
+       atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
+       atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
+-      atomic_set(&bat_priv->bcast_seqno, 1);
+-      atomic_set(&bat_priv->tt.vn, 0);
++      atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
++      atomic_set_unchecked(&bat_priv->tt.vn, 0);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+ #ifdef CONFIG_BATMAN_ADV_BLA
+@@ -851,7 +851,7 @@ static int batadv_softif_init_late(struct net_device *dev)
+       /* randomize initial seqno to avoid collision */
+       get_random_bytes(&random_seqno, sizeof(random_seqno));
+-      atomic_set(&bat_priv->frag_seqno, random_seqno);
++      atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
+       bat_priv->primary_if = NULL;
+       bat_priv->num_ifaces = 0;
+@@ -1069,7 +1069,7 @@ bool batadv_softif_is_valid(const struct net_device *net_dev)
+       return false;
+ }
+-struct rtnl_link_ops batadv_link_ops __read_mostly = {
++struct rtnl_link_ops batadv_link_ops = {
+       .kind           = "batadv",
+       .priv_size      = sizeof(struct batadv_priv),
+       .setup          = batadv_softif_init_early,
+diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
+index fe9ca94..ae07bdc 100644
+--- a/net/batman-adv/sysfs.c
++++ b/net/batman-adv/sysfs.c
+@@ -146,7 +146,7 @@ struct batadv_attribute batadv_attr_##_name = {            \
+ #define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func)                 \
+ ssize_t batadv_store_##_name(struct kobject *kobj,                    \
+-                           struct attribute *attr, char *buff,        \
++                           struct kobj_attribute *attr, char *buff,   \
+                            size_t count)                              \
+ {                                                                     \
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
+@@ -158,7 +158,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                 \
+ #define BATADV_ATTR_SIF_SHOW_BOOL(_name)                              \
+ ssize_t batadv_show_##_name(struct kobject *kobj,                     \
+-                          struct attribute *attr, char *buff)         \
++                          struct kobj_attribute *attr, char *buff)    \
+ {                                                                     \
+       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);    \
+                                                                       \
+@@ -178,7 +178,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                  \
+ #define BATADV_ATTR_SIF_STORE_UINT(_name, _var, _min, _max, _post_func)       \
+ ssize_t batadv_store_##_name(struct kobject *kobj,                    \
+-                           struct attribute *attr, char *buff,        \
++                           struct kobj_attribute *attr, char *buff,   \
+                            size_t count)                              \
+ {                                                                     \
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
+@@ -191,7 +191,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                 \
+ #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var)                                \
+ ssize_t batadv_show_##_name(struct kobject *kobj,                     \
+-                          struct attribute *attr, char *buff)         \
++                          struct kobj_attribute *attr, char *buff)    \
+ {                                                                     \
+       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);    \
+                                                                       \
+@@ -209,7 +209,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                  \
+ #define BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func)                        \
+ ssize_t batadv_store_vlan_##_name(struct kobject *kobj,                       \
+-                                struct attribute *attr, char *buff,   \
++                                struct kobj_attribute *attr, char *buff,\
+                                 size_t count)                         \
+ {                                                                     \
+       struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+@@ -225,7 +225,7 @@ ssize_t batadv_store_vlan_##_name(struct kobject *kobj,                    \
+ #define BATADV_ATTR_VLAN_SHOW_BOOL(_name)                             \
+ ssize_t batadv_show_vlan_##_name(struct kobject *kobj,                        \
+-                               struct attribute *attr, char *buff)    \
++                               struct kobj_attribute *attr, char *buff)\
+ {                                                                     \
+       struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+       struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
+@@ -247,7 +247,7 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj,                     \
+ #define BATADV_ATTR_HIF_STORE_UINT(_name, _var, _min, _max, _post_func)       \
+ ssize_t batadv_store_##_name(struct kobject *kobj,                    \
+-                           struct attribute *attr, char *buff,        \
++                           struct kobj_attribute *attr, char *buff,   \
+                            size_t count)                              \
+ {                                                                     \
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
+@@ -268,7 +268,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                 \
+ #define BATADV_ATTR_HIF_SHOW_UINT(_name, _var)                                \
+ ssize_t batadv_show_##_name(struct kobject *kobj,                     \
+-                          struct attribute *attr, char *buff)         \
++                          struct kobj_attribute *attr, char *buff)            \
+ {                                                                     \
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
+       struct batadv_hard_iface *hard_iface;                           \
+@@ -338,13 +338,13 @@ static int batadv_store_bool_attr(char *buff, size_t count,
+ static inline ssize_t
+ __batadv_store_bool_attr(char *buff, size_t count,
+                        void (*post_func)(struct net_device *),
+-                       struct attribute *attr,
++                       struct kobj_attribute *attr,
+                        atomic_t *attr_store, struct net_device *net_dev)
+ {
+       bool changed;
+       int ret;
+-      ret = batadv_store_bool_attr(buff, count, net_dev, attr->name,
++      ret = batadv_store_bool_attr(buff, count, net_dev, attr->attr.name,
+                                    attr_store, &changed);
+       if (post_func && changed)
+               post_func(net_dev);
+@@ -393,13 +393,13 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
+ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
+                                       int min, int max,
+                                       void (*post_func)(struct net_device *),
+-                                      const struct attribute *attr,
++                                      const struct kobj_attribute *attr,
+                                       atomic_t *attr_store,
+                                       struct net_device *net_dev)
+ {
+       int ret;
+-      ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
++      ret = batadv_store_uint_attr(buff, count, net_dev, attr->attr.name, min, max,
+                                    attr_store);
+       if (post_func && ret)
+               post_func(net_dev);
+@@ -408,7 +408,7 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
+ }
+ static ssize_t batadv_show_bat_algo(struct kobject *kobj,
+-                                  struct attribute *attr, char *buff)
++                                  struct kobj_attribute *attr, char *buff)
+ {
+       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+@@ -422,7 +422,7 @@ static void batadv_post_gw_reselect(struct net_device *net_dev)
+       batadv_gw_reselect(bat_priv);
+ }
+-static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
++static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct kobj_attribute *attr,
+                                  char *buff)
+ {
+       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+@@ -447,7 +447,7 @@ static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
+ }
+ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
+-                                  struct attribute *attr, char *buff,
++                                  struct kobj_attribute *attr, char *buff,
+                                   size_t count)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+@@ -515,7 +515,7 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
+ }
+ static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
+-                                   struct attribute *attr, char *buff)
++                                   struct kobj_attribute *attr, char *buff)
+ {
+       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+       u32 down, up;
+@@ -528,7 +528,7 @@ static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
+ }
+ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
+-                                    struct attribute *attr, char *buff,
++                                    struct kobj_attribute *attr, char *buff,
+                                     size_t count)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+@@ -549,7 +549,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
+  * error code in case of failure
+  */
+ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
+-                                        struct attribute *attr, char *buff)
++                                        struct kobj_attribute *attr, char *buff)
+ {
+       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+@@ -568,7 +568,7 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
+  * Return: 'count' on success or a negative error code in case of failure
+  */
+ static ssize_t batadv_store_isolation_mark(struct kobject *kobj,
+-                                         struct attribute *attr, char *buff,
++                                         struct kobj_attribute *attr, char *buff,
+                                          size_t count)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+@@ -805,7 +805,7 @@ void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+ }
+ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
+-                                    struct attribute *attr, char *buff)
++                                    struct kobj_attribute *attr, char *buff)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+       struct batadv_hard_iface *hard_iface;
+@@ -829,7 +829,7 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
+ }
+ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
+-                                     struct attribute *attr, char *buff,
++                                     struct kobj_attribute *attr, char *buff,
+                                      size_t count)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+@@ -887,7 +887,7 @@ out:
+ }
+ static ssize_t batadv_show_iface_status(struct kobject *kobj,
+-                                      struct attribute *attr, char *buff)
++                                      struct kobj_attribute *attr, char *buff)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+       struct batadv_hard_iface *hard_iface;
+@@ -934,7 +934,7 @@ static ssize_t batadv_show_iface_status(struct kobject *kobj,
+  * Return: 'count' on success or a negative error code in case of failure
+  */
+ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
+-                                              struct attribute *attr,
++                                              struct kobj_attribute *attr,
+                                               char *buff, size_t count)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+@@ -972,7 +972,7 @@ out:
+ }
+ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
+-                                             struct attribute *attr,
++                                             struct kobj_attribute *attr,
+                                              char *buff)
+ {
+       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
+index c76021b..3aef377 100644
+--- a/net/batman-adv/sysfs.h
++++ b/net/batman-adv/sysfs.h
+@@ -37,9 +37,9 @@ struct net_device;
+ struct batadv_attribute {
+       struct attribute attr;
+-      ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
++      ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+                       char *buf);
+-      ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
++      ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+                        char *buf, size_t count);
+ };
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 7e6df7a..474128b 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -664,7 +664,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                  "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+                  addr, BATADV_PRINT_VID(vid),
+-                 (u8)atomic_read(&bat_priv->tt.vn));
++                 (u8)atomic_read_unchecked(&bat_priv->tt.vn));
+       ether_addr_copy(tt_local->common.addr, addr);
+       /* The local entry has to be marked as NEW to avoid to send it in
+@@ -894,7 +894,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+       }
+       (*tt_data)->flags = BATADV_NO_FLAGS;
+-      (*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
++      (*tt_data)->ttvn = atomic_read_unchecked(&bat_priv->tt.vn);
+       (*tt_data)->num_vlan = htons(num_vlan);
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+@@ -1011,7 +1011,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
+       seq_printf(seq,
+                  "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
+-                 net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
++                 net_dev->name, (u8)atomic_read_unchecked(&bat_priv->tt.vn));
+       seq_puts(seq,
+                "       Client         VID Flags    Last seen (CRC       )\n");
+@@ -2818,7 +2818,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+       spin_lock_bh(&bat_priv->tt.commit_lock);
+-      my_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++      my_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+       req_ttvn = tt_data->ttvn;
+       orig_node = batadv_orig_hash_find(bat_priv, req_src);
+@@ -2857,7 +2857,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+                      bat_priv->tt.last_changeset_len);
+               spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+       } else {
+-              req_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++              req_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+               /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+                * in the initial part
+@@ -3376,10 +3376,10 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
+       batadv_tt_local_update_crc(bat_priv);
+       /* Increment the TTVN only once per OGM interval */
+-      atomic_inc(&bat_priv->tt.vn);
++      atomic_inc_unchecked(&bat_priv->tt.vn);
+       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                  "Local changes committed, updating to ttvn %u\n",
+-                 (u8)atomic_read(&bat_priv->tt.vn));
++                 (u8)atomic_read_unchecked(&bat_priv->tt.vn));
+       /* reset the sending counter */
+       atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index a64522c..168782d 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -84,7 +84,7 @@ enum batadv_dhcp_recipient {
+ struct batadv_hard_iface_bat_iv {
+       unsigned char *ogm_buff;
+       int ogm_buff_len;
+-      atomic_t ogm_seqno;
++      atomic_unchecked_t ogm_seqno;
+ };
+ /**
+@@ -633,7 +633,7 @@ enum batadv_counters {
+  * @work: work queue callback item for translation table purging
+  */
+ struct batadv_priv_tt {
+-      atomic_t vn;
++      atomic_unchecked_t vn;
+       atomic_t ogm_append_cnt;
+       atomic_t local_changes;
+       struct list_head changes_list;
+@@ -1042,7 +1042,7 @@ struct batadv_priv {
+       atomic_t bonding;
+       atomic_t fragmentation;
+       atomic_t packet_size_max;
+-      atomic_t frag_seqno;
++      atomic_unchecked_t frag_seqno;
+ #ifdef CONFIG_BATMAN_ADV_BLA
+       atomic_t bridge_loop_avoidance;
+ #endif
+@@ -1059,7 +1059,7 @@ struct batadv_priv {
+ #endif
+       u32 isolation_mark;
+       u32 isolation_mark_mask;
+-      atomic_t bcast_seqno;
++      atomic_unchecked_t bcast_seqno;
+       atomic_t bcast_queue_left;
+       atomic_t batman_queue_left;
+       char num_ifaces;
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 96f04b7..753db63 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -1482,7 +1482,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+                       uf.event_mask[1] = *((u32 *) f->event_mask + 1);
+               }
+-              len = min_t(unsigned int, len, sizeof(uf));
++              len = min((size_t)len, sizeof(uf));
+               if (copy_from_user(&uf, optval, len)) {
+                       err = -EFAULT;
+                       break;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index d4cad29b0..25c71a9 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3548,8 +3548,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+                       break;
+               case L2CAP_CONF_RFC:
+-                      if (olen == sizeof(rfc))
+-                              memcpy(&rfc, (void *)val, olen);
++                      if (olen != sizeof(rfc))
++                              break;
++
++                      memcpy(&rfc, (void *)val, olen);
+                       if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
+                           rfc.mode != chan->mode)
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index a8ba752..de24ce0 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+       struct sock *sk = sock->sk;
+       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+       struct l2cap_options opts;
+-      int len, err = 0;
++      int err = 0;
++      size_t len = optlen;
+       u32 opt;
+       BT_DBG("sk %p", sk);
+@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+               opts.max_tx   = chan->max_tx;
+               opts.txwin_size = chan->tx_win;
+-              len = min_t(unsigned int, sizeof(opts), optlen);
++              len = min(sizeof(opts), len);
+               if (copy_from_user((char *) &opts, optval, len)) {
+                       err = -EFAULT;
+                       break;
+@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+       struct bt_security sec;
+       struct bt_power pwr;
+       struct l2cap_conn *conn;
+-      int len, err = 0;
++      int err = 0;
++      size_t len = optlen;
+       u32 opt;
+       BT_DBG("sk %p", sk);
+@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+               sec.level = BT_SECURITY_LOW;
+-              len = min_t(unsigned int, sizeof(sec), optlen);
++              len = min(sizeof(sec), len);
+               if (copy_from_user((char *) &sec, optval, len)) {
+                       err = -EFAULT;
+                       break;
+@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+               pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+-              len = min_t(unsigned int, sizeof(pwr), optlen);
++              len = min(sizeof(pwr), len);
+               if (copy_from_user((char *) &pwr, optval, len)) {
+                       err = -EFAULT;
+                       break;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 7511df7..a670df3 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -690,7 +690,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+       struct sock *sk = sock->sk;
+       struct bt_security sec;
+       int err = 0;
+-      size_t len;
++      size_t len = optlen;
+       u32 opt;
+       BT_DBG("sk %p", sk);
+@@ -712,7 +712,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+               sec.level = BT_SECURITY_LOW;
+-              len = min_t(unsigned int, sizeof(sec), optlen);
++              len = min(sizeof(sec), len);
+               if (copy_from_user((char *) &sec, optval, len)) {
+                       err = -EFAULT;
+                       break;
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index 8e385a0..a5bdd8e 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+       BT_DBG("tty %p id %d", tty, tty->index);
+       BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+-             dev->channel, dev->port.count);
++             dev->channel, atomic_read(&dev->port.count));
+       err = tty_port_open(&dev->port, tty, filp);
+       if (err)
+@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
+       struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+       BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
+-                                              dev->port.count);
++                                              atomic_read(&dev->port.count));
+       tty_port_close(&dev->port, tty, filp);
+ }
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 77e7f69..6572d43 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -982,13 +982,13 @@ static void __net_exit brnf_exit_net(struct net *net)
+       brnet->enabled = false;
+ }
+-static struct pernet_operations brnf_net_ops __read_mostly = {
++static struct pernet_operations brnf_net_ops = {
+       .exit = brnf_exit_net,
+       .id   = &brnf_net_id,
+       .size = sizeof(struct brnf_net),
+ };
+-static struct notifier_block brnf_notifier __read_mostly = {
++static struct notifier_block brnf_notifier = {
+       .notifier_call = brnf_device_event,
+ };
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index f2a29e4..34963c3 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -1395,7 +1395,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
+       .get_link_af_size       = br_get_link_af_size_filtered,
+ };
+-struct rtnl_link_ops br_link_ops __read_mostly = {
++struct rtnl_link_ops br_link_ops = {
+       .kind                   = "bridge",
+       .priv_size              = sizeof(struct net_bridge),
+       .setup                  = br_dev_setup,
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 0833c25..c649cbf 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1547,7 +1547,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+                       tmp.valid_hooks = t->table->valid_hooks;
+               }
+               mutex_unlock(&ebt_mutex);
+-              if (copy_to_user(user, &tmp, *len) != 0) {
++              if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
+                       BUGPRINT("c2u Didn't work\n");
+                       ret = -EFAULT;
+                       break;
+@@ -2351,7 +2351,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+                       goto out;
+               tmp.valid_hooks = t->valid_hooks;
+-              if (copy_to_user(user, &tmp, *len) != 0) {
++              if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
+                       ret = -EFAULT;
+                       break;
+               }
+@@ -2362,7 +2362,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+               tmp.entries_size = t->table->entries_size;
+               tmp.valid_hooks = t->table->valid_hooks;
+-              if (copy_to_user(user, &tmp, *len) != 0) {
++              if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
+                       ret = -EFAULT;
+                       break;
+               }
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index f5afda1..dcf770a 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -10,6 +10,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+ #include <linux/pkt_sched.h>
++#include <linux/sched.h>
+ #include <net/caif/caif_layer.h>
+ #include <net/caif/cfpkt.h>
+ #include <net/caif/cfctrl.h>
+@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
+       memset(&dev_info, 0, sizeof(dev_info));
+       dev_info.id = 0xff;
+       cfsrvl_init(&this->serv, 0, &dev_info, false);
+-      atomic_set(&this->req_seq_no, 1);
+-      atomic_set(&this->rsp_seq_no, 1);
++      atomic_set_unchecked(&this->req_seq_no, 1);
++      atomic_set_unchecked(&this->rsp_seq_no, 1);
+       this->serv.layer.receive = cfctrl_recv;
+       sprintf(this->serv.layer.name, "ctrl");
+       this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
+                             struct cfctrl_request_info *req)
+ {
+       spin_lock_bh(&ctrl->info_list_lock);
+-      atomic_inc(&ctrl->req_seq_no);
+-      req->sequence_no = atomic_read(&ctrl->req_seq_no);
++      atomic_inc_unchecked(&ctrl->req_seq_no);
++      req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
+       list_add_tail(&req->list, &ctrl->list);
+       spin_unlock_bh(&ctrl->info_list_lock);
+ }
+@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+                       if (p != first)
+                               pr_warn("Requests are not received in order\n");
+-                      atomic_set(&ctrl->rsp_seq_no,
++                      atomic_set_unchecked(&ctrl->rsp_seq_no,
+                                        p->sequence_no);
+                       list_del(&p->list);
+                       goto out;
+diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
+index 3408ed5..885aab5 100644
+--- a/net/caif/chnl_net.c
++++ b/net/caif/chnl_net.c
+@@ -213,7 +213,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
+       }
+ }
+-static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct chnl_net *priv;
+       struct cfpkt *pkt = NULL;
+@@ -514,7 +514,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
+ };
+-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
++static struct rtnl_link_ops ipcaif_link_ops = {
+       .kind           = "caif",
+       .priv_size      = sizeof(struct chnl_net),
+       .setup          = ipcaif_net_setup,
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 1108079..1871d16 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -890,7 +890,7 @@ static const struct net_proto_family can_family_ops = {
+ };
+ /* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
++static struct notifier_block can_netdev_notifier = {
+       .notifier_call = can_notifier,
+ };
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 8e999ff..684a43e 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1674,7 +1674,7 @@ static int __init bcm_module_init(void)
+       }
+       /* create /proc/net/can-bcm directory */
+-      proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
++      proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
+       return 0;
+ }
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 4551687..4e82e9b 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
+                "default: " __stringify(CGW_DEFAULT_HOPS) ")");
+ static HLIST_HEAD(cgw_list);
+-static struct notifier_block notifier;
+ static struct kmem_cache *cgw_cache __read_mostly;
+@@ -992,6 +991,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
+       return err;
+ }
++static struct notifier_block notifier = {
++      .notifier_call = cgw_notifier
++};
++
+ static __init int cgw_module_init(void)
+ {
+       /* sanitize given module parameter */
+@@ -1007,7 +1010,6 @@ static __init int cgw_module_init(void)
+               return -ENOMEM;
+       /* set notifier */
+-      notifier.notifier_call = cgw_notifier;
+       register_netdevice_notifier(&notifier);
+       if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
+diff --git a/net/can/proc.c b/net/can/proc.c
+index 85ef7bb..84c0fec 100644
+--- a/net/can/proc.c
++++ b/net/can/proc.c
+@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
+ void can_init_proc(void)
+ {
+       /* create /proc/net/can directory */
+-      can_dir = proc_mkdir("can", init_net.proc_net);
++      can_dir = proc_mkdir_restrict("can", init_net.proc_net);
+       if (!can_dir) {
+               pr_info("can: failed to create /proc/net/can.\n");
+diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
+index bddfcf6..36880cd 100644
+--- a/net/ceph/ceph_common.c
++++ b/net/ceph/ceph_common.c
+@@ -5,7 +5,7 @@
+ #include <linux/fs.h>
+ #include <linux/inet.h>
+ #include <linux/in6.h>
+-#include <linux/key.h>
++#include <linux/key-type.h>
+ #include <keys/ceph-type.h>
+ #include <linux/module.h>
+ #include <linux/mount.h>
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index a550289..218652a 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con);
+ #define MAX_ADDR_STR_LEN      64      /* 54 is enough */
+ static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
+-static atomic_t addr_str_seq = ATOMIC_INIT(0);
++static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
+ static struct page *zero_page;                /* used in certain error cases */
+@@ -198,7 +198,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
+       struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
+       struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
+-      i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
++      i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
+       s = addr_str[i];
+       switch (ss->ss_family) {
+diff --git a/net/compat.c b/net/compat.c
+index 1cd2ec0..2650ce6 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -58,7 +58,7 @@ int get_compat_msghdr(struct msghdr *kmsg,
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+-      kmsg->msg_control = compat_ptr(tmp3);
++      kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
+       if (save_addr)
+               *save_addr = compat_ptr(uaddr);
+@@ -98,20 +98,20 @@ int get_compat_msghdr(struct msghdr *kmsg,
+ #define CMSG_COMPAT_FIRSTHDR(msg)                     \
+       (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ?     \
+-       (struct compat_cmsghdr __user *)((msg)->msg_control) :         \
++       (struct compat_cmsghdr __force_user *)((msg)->msg_control) :           \
+        (struct compat_cmsghdr __user *)NULL)
+ #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
+       ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
+        (ucmlen) <= (unsigned long) \
+        ((mhdr)->msg_controllen - \
+-        ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
++        ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
+ static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
+               struct compat_cmsghdr __user *cmsg, int cmsg_len)
+ {
+       char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
+-      if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
++      if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
+                       msg->msg_controllen)
+               return NULL;
+       return (struct compat_cmsghdr __user *)ptr;
+@@ -201,7 +201,7 @@ Efault:
+ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
+ {
+-      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+       struct compat_cmsghdr cmhdr;
+       struct compat_timeval ctv;
+       struct compat_timespec cts[3];
+@@ -257,7 +257,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
+ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
+ {
+-      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++      struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+       int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
+       int fdnum = scm->fp->count;
+       struct file **fp = scm->fp->fp;
+@@ -358,7 +358,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
+               return -EFAULT;
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
++      err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
+       set_fs(old_fs);
+       return err;
+@@ -420,7 +420,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
+       len = sizeof(ktime);
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+-      err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
++      err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
+       set_fs(old_fs);
+       if (!err) {
+@@ -563,7 +563,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+       case MCAST_JOIN_GROUP:
+       case MCAST_LEAVE_GROUP:
+       {
+-              struct compat_group_req __user *gr32 = (void *)optval;
++              struct compat_group_req __user *gr32 = (void __user *)optval;
+               struct group_req __user *kgr =
+                       compat_alloc_user_space(sizeof(struct group_req));
+               u32 interface;
+@@ -584,7 +584,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+       case MCAST_BLOCK_SOURCE:
+       case MCAST_UNBLOCK_SOURCE:
+       {
+-              struct compat_group_source_req __user *gsr32 = (void *)optval;
++              struct compat_group_source_req __user *gsr32 = (void __user *)optval;
+               struct group_source_req __user *kgsr = compat_alloc_user_space(
+                       sizeof(struct group_source_req));
+               u32 interface;
+@@ -605,7 +605,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+       }
+       case MCAST_MSFILTER:
+       {
+-              struct compat_group_filter __user *gf32 = (void *)optval;
++              struct compat_group_filter __user *gf32 = (void __user *)optval;
+               struct group_filter __user *kgf;
+               u32 interface, fmode, numsrc;
+@@ -643,7 +643,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
+       char __user *optval, int __user *optlen,
+       int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
+ {
+-      struct compat_group_filter __user *gf32 = (void *)optval;
++      struct compat_group_filter __user *gf32 = (void __user *)optval;
+       struct group_filter __user *kgf;
+       int __user      *koptlen;
+       u32 interface, fmode, numsrc;
+@@ -787,7 +787,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
+       if (call < SYS_SOCKET || call > SYS_SENDMMSG)
+               return -EINVAL;
+-      if (copy_from_user(a, args, nas[call]))
++      if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
+               return -EFAULT;
+       a0 = a[0];
+       a1 = a[1];
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index b7de71f..808387d 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -360,7 +360,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+       }
+       kfree_skb(skb);
+-      atomic_inc(&sk->sk_drops);
++      atomic_inc_unchecked(&sk->sk_drops);
+       sk_mem_reclaim_partial(sk);
+       return err;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ea63120..7fbab94 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1768,7 +1768,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ {
+       if (skb_orphan_frags(skb, GFP_ATOMIC) ||
+           unlikely(!is_skb_forwardable(dev, skb))) {
+-              atomic_long_inc(&dev->rx_dropped);
++              atomic_long_inc_unchecked(&dev->rx_dropped);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+@@ -3005,7 +3005,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
+ out_kfree_skb:
+       kfree_skb(skb);
+ out_null:
+-      atomic_long_inc(&dev->tx_dropped);
++      atomic_long_inc_unchecked(&dev->tx_dropped);
+       return NULL;
+ }
+@@ -3425,7 +3425,7 @@ recursion_alert:
+       rc = -ENETDOWN;
+       rcu_read_unlock_bh();
+-      atomic_long_inc(&dev->tx_dropped);
++      atomic_long_inc_unchecked(&dev->tx_dropped);
+       kfree_skb_list(skb);
+       return rc;
+ out:
+@@ -3778,7 +3778,7 @@ drop:
+       local_irq_restore(flags);
+-      atomic_long_inc(&skb->dev->rx_dropped);
++      atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+       kfree_skb(skb);
+       return NET_RX_DROP;
+ }
+@@ -3855,7 +3855,7 @@ int netif_rx_ni(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+-static void net_tx_action(struct softirq_action *h)
++static __latent_entropy void net_tx_action(void)
+ {
+       struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+@@ -4218,9 +4218,9 @@ ncls:
+       } else {
+ drop:
+               if (!deliver_exact)
+-                      atomic_long_inc(&skb->dev->rx_dropped);
++                      atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+               else
+-                      atomic_long_inc(&skb->dev->rx_nohandler);
++                      atomic_long_inc_unchecked(&skb->dev->rx_nohandler);
+               kfree_skb(skb);
+               /* Jamal, now you will not able to escape explaining
+                * me how you were going to use this. :-)
+@@ -5187,7 +5187,7 @@ out_unlock:
+       return work;
+ }
+-static void net_rx_action(struct softirq_action *h)
++static __latent_entropy void net_rx_action(void)
+ {
+       struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+       unsigned long time_limit = jiffies + 2;
+@@ -7520,9 +7520,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+       } else {
+               netdev_stats_to_stats64(storage, &dev->stats);
+       }
+-      storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
+-      storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
+-      storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
++      storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
++      storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
++      storage->rx_nohandler += atomic_long_read_unchecked(&dev->rx_nohandler);
+       return storage;
+ }
+ EXPORT_SYMBOL(dev_get_stats);
+@@ -8144,7 +8144,7 @@ static void __net_exit netdev_exit(struct net *net)
+       kfree(net->dev_index_head);
+ }
+-static struct pernet_operations __net_initdata netdev_net_ops = {
++static struct pernet_operations __net_initconst netdev_net_ops = {
+       .init = netdev_init,
+       .exit = netdev_exit,
+ };
+@@ -8244,7 +8244,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
+       rtnl_unlock();
+ }
+-static struct pernet_operations __net_initdata default_device_ops = {
++static struct pernet_operations __net_initconst default_device_ops = {
+       .exit = default_device_exit,
+       .exit_batch = default_device_exit_batch,
+ };
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index b94b1d2..da3ed7c 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
+       no_module = !dev;
+       if (no_module && capable(CAP_NET_ADMIN))
+               no_module = request_module("netdev-%s", name);
+-      if (no_module && capable(CAP_SYS_MODULE))
++      if (no_module && capable(CAP_SYS_MODULE)) {
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++              ___request_module(true, "grsec_modharden_netdev", "%s", name);
++#else
+               request_module("%s", name);
++#endif
++      }
+ }
+ EXPORT_SYMBOL(dev_load);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index cb06ace..3cab3fc 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -596,7 +596,11 @@ do_pass:
+               /* Unknown instruction. */
+               default:
+-                      goto err;
++                      WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
++                                     fp->code, fp->jt, fp->jf, fp->k);
++                      kfree(addrs);
++                      BUG();
++                      return -EINVAL;
+               }
+               insn++;
+@@ -640,7 +644,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
+       u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
+       int pc, ret = 0;
+-      BUILD_BUG_ON(BPF_MEMWORDS > 16);
++      BUILD_BUG_ON(BPF_MEMWORDS != 16);
+       masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
+       if (!masks)
+@@ -1086,7 +1090,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
+       if (!fp)
+               return -ENOMEM;
+-      memcpy(fp->insns, fprog->filter, fsize);
++      memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
+       fp->len = fprog->len;
+       /* Since unattached filters are not copied back to user
+diff --git a/net/core/flow.c b/net/core/flow.c
+index 3937b1b..b18d1cb 100644
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
+ static int flow_entry_valid(struct flow_cache_entry *fle,
+                               struct netns_xfrm *xfrm)
+ {
+-      if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
++      if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
+               return 0;
+       if (fle->object && !fle->object->ops->check(fle->object))
+               return 0;
+@@ -238,7 +238,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
+               if (fcp->hash_count > 2 * fc->high_watermark ||
+                   atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
+-                      atomic_inc(&net->xfrm.flow_cache_genid);
++                      atomic_inc_unchecked(&net->xfrm.flow_cache_genid);
+                       flo = ERR_PTR(-ENOBUFS);
+                       goto ret_object;
+               }
+@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
+                       hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+                       fcp->hash_count++;
+               }
+-      } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
++      } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
+               flo = fle->object;
+               if (!flo)
+                       goto ret_object;
+@@ -274,7 +274,7 @@ nocache:
+       }
+       flo = resolver(net, key, family, dir, flo, ctx);
+       if (fle) {
+-              fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
++              fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
+               if (!IS_ERR(flo))
+                       fle->object = flo;
+               else
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index cf26e04c4..e70ca13 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -860,7 +860,7 @@ static void neigh_probe(struct neighbour *neigh)
+               skb = skb_clone(skb, GFP_ATOMIC);
+       write_unlock(&neigh->lock);
+       neigh->ops->solicit(neigh, skb);
+-      atomic_inc(&neigh->probes);
++      atomic_inc_unchecked(&neigh->probes);
+       kfree_skb(skb);
+ }
+@@ -916,7 +916,7 @@ static void neigh_timer_handler(unsigned long arg)
+                       neigh_dbg(2, "neigh %p is probed\n", neigh);
+                       neigh->nud_state = NUD_PROBE;
+                       neigh->updated = jiffies;
+-                      atomic_set(&neigh->probes, 0);
++                      atomic_set_unchecked(&neigh->probes, 0);
+                       notify = 1;
+                       next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
+               }
+@@ -926,7 +926,7 @@ static void neigh_timer_handler(unsigned long arg)
+       }
+       if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
+-          atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
++          atomic_read_unchecked(&neigh->probes) >= neigh_max_probes(neigh)) {
+               neigh->nud_state = NUD_FAILED;
+               notify = 1;
+               neigh_invalidate(neigh);
+@@ -970,7 +970,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+                   NEIGH_VAR(neigh->parms, APP_PROBES)) {
+                       unsigned long next, now = jiffies;
+-                      atomic_set(&neigh->probes,
++                      atomic_set_unchecked(&neigh->probes,
+                                  NEIGH_VAR(neigh->parms, UCAST_PROBES));
+                       neigh->nud_state     = NUD_INCOMPLETE;
+                       neigh->updated = now;
+@@ -1156,7 +1156,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+       if (new != old) {
+               neigh_del_timer(neigh);
+               if (new & NUD_PROBE)
+-                      atomic_set(&neigh->probes, 0);
++                      atomic_set_unchecked(&neigh->probes, 0);
+               if (new & NUD_IN_TIMER)
+                       neigh_add_timer(neigh, (jiffies +
+                                               ((new & NUD_REACHABLE) ?
+@@ -1244,7 +1244,7 @@ void __neigh_set_probe_once(struct neighbour *neigh)
+       if (!(neigh->nud_state & NUD_FAILED))
+               return;
+       neigh->nud_state = NUD_INCOMPLETE;
+-      atomic_set(&neigh->probes, neigh_max_probes(neigh));
++      atomic_set_unchecked(&neigh->probes, neigh_max_probes(neigh));
+       neigh_add_timer(neigh,
+                       jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
+ }
+@@ -2184,7 +2184,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
+       ci.ndm_refcnt    = atomic_read(&neigh->refcnt) - 1;
+       read_unlock_bh(&neigh->lock);
+-      if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
++      if (nla_put_u32(skb, NDA_PROBES, atomic_read_unchecked(&neigh->probes)) ||
+           nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+               goto nla_put_failure;
+@@ -2872,7 +2872,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
+                          void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+       int size, ret;
+-      struct ctl_table tmp = *ctl;
++      ctl_table_no_const tmp = *ctl;
+       tmp.extra1 = &zero;
+       tmp.extra2 = &unres_qlen_max;
+@@ -2935,7 +2935,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
+                                          void __user *buffer,
+                                          size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table tmp = *ctl;
++      ctl_table_no_const tmp = *ctl;
+       int ret;
+       tmp.extra1 = &zero;
+diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
+index 14d0934..f2a895f 100644
+--- a/net/core/net-procfs.c
++++ b/net/core/net-procfs.c
+@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
+       struct rtnl_link_stats64 temp;
+       const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
+-      seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
++      if (gr_proc_is_restricted())
++              seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
++                 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
++                 dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
++                 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
++      else
++              seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+                  "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
+                  dev->name, stats->rx_bytes, stats->rx_packets,
+                  stats->rx_errors,
+@@ -167,7 +173,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
+       return 0;
+ }
+-static const struct seq_operations dev_seq_ops = {
++const struct seq_operations dev_seq_ops = {
+       .start = dev_seq_start,
+       .next  = dev_seq_next,
+       .stop  = dev_seq_stop,
+@@ -197,7 +203,7 @@ static const struct seq_operations softnet_seq_ops = {
+ static int softnet_seq_open(struct inode *inode, struct file *file)
+ {
+-      return seq_open(file, &softnet_seq_ops);
++      return seq_open_restrict(file, &softnet_seq_ops);
+ }
+ static const struct file_operations softnet_seq_fops = {
+@@ -284,8 +290,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
+               else
+                       seq_printf(seq, "%04x", ntohs(pt->type));
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++              seq_printf(seq, " %-8s %pf\n",
++                         pt->dev ? pt->dev->name : "", NULL);
++#else
+               seq_printf(seq, " %-8s %pf\n",
+                          pt->dev ? pt->dev->name : "", pt->func);
++#endif
+       }
+       return 0;
+@@ -348,7 +359,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
+       remove_proc_entry("dev", net->proc_net);
+ }
+-static struct pernet_operations __net_initdata dev_proc_ops = {
++static struct pernet_operations __net_initconst dev_proc_ops = {
+       .init = dev_proc_net_init,
+       .exit = dev_proc_net_exit,
+ };
+@@ -410,7 +421,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
+       remove_proc_entry("dev_mcast", net->proc_net);
+ }
+-static struct pernet_operations __net_initdata dev_mc_net_ops = {
++static struct pernet_operations __net_initconst dev_mc_net_ops = {
+       .init = dev_mc_net_init,
+       .exit = dev_mc_net_exit,
+ };
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 6e4f347..8eff663 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -290,7 +290,7 @@ static ssize_t carrier_changes_show(struct device *dev,
+ {
+       struct net_device *netdev = to_net_dev(dev);
+       return sprintf(buf, fmt_dec,
+-                     atomic_read(&netdev->carrier_changes));
++                     atomic_read_unchecked(&netdev->carrier_changes));
+ }
+ static DEVICE_ATTR_RO(carrier_changes);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 2c2eb1b..2f3b518 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -526,7 +526,7 @@ static __net_exit void net_ns_net_exit(struct net *net)
+       ns_free_inum(&net->ns);
+ }
+-static struct pernet_operations __net_initdata net_ns_ops = {
++static struct pernet_operations __net_initconst net_ns_ops = {
+       .init = net_ns_net_init,
+       .exit = net_ns_net_exit,
+ };
+@@ -775,7 +775,7 @@ static int __register_pernet_operations(struct list_head *list,
+       int error;
+       LIST_HEAD(net_exit_list);
+-      list_add_tail(&ops->list, list);
++      pax_list_add_tail((struct list_head *)&ops->list, list);
+       if (ops->init || (ops->id && ops->size)) {
+               for_each_net(net) {
+                       error = ops_init(ops, net);
+@@ -788,7 +788,7 @@ static int __register_pernet_operations(struct list_head *list,
+ out_undo:
+       /* If I have an error cleanup all namespaces I initialized */
+-      list_del(&ops->list);
++      pax_list_del((struct list_head *)&ops->list);
+       ops_exit_list(ops, &net_exit_list);
+       ops_free_list(ops, &net_exit_list);
+       return error;
+@@ -799,7 +799,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
+       struct net *net;
+       LIST_HEAD(net_exit_list);
+-      list_del(&ops->list);
++      pax_list_del((struct list_head *)&ops->list);
+       for_each_net(net)
+               list_add_tail(&net->exit_list, &net_exit_list);
+       ops_exit_list(ops, &net_exit_list);
+@@ -933,7 +933,7 @@ int register_pernet_device(struct pernet_operations *ops)
+       mutex_lock(&net_mutex);
+       error = register_pernet_operations(&pernet_list, ops);
+       if (!error && (first_device == &pernet_list))
+-              first_device = &ops->list;
++              first_device = (struct list_head *)&ops->list;
+       mutex_unlock(&net_mutex);
+       return error;
+ }
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 53599bd..cbd0b29 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+       struct udphdr *udph;
+       struct iphdr *iph;
+       struct ethhdr *eth;
+-      static atomic_t ip_ident;
++      static atomic_unchecked_t ip_ident;
+       struct ipv6hdr *ip6h;
+       WARN_ON_ONCE(!irqs_disabled());
+@@ -455,7 +455,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+               put_unaligned(0x45, (unsigned char *)iph);
+               iph->tos      = 0;
+               put_unaligned(htons(ip_len), &(iph->tot_len));
+-              iph->id       = htons(atomic_inc_return(&ip_ident));
++              iph->id       = htons(atomic_inc_return_unchecked(&ip_ident));
+               iph->frag_off = 0;
+               iph->ttl      = 64;
+               iph->protocol = IPPROTO_UDP;
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index bbd118b..c1c33449 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -3865,7 +3865,7 @@ static int __net_init pg_net_init(struct net *net)
+       pn->net = net;
+       INIT_LIST_HEAD(&pn->pktgen_threads);
+       pn->pktgen_exiting = false;
+-      pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
++      pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
+       if (!pn->proc_dir) {
+               pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
+               return -ENODEV;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 189cc78..d76c934 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -61,7 +61,7 @@ struct rtnl_link {
+       rtnl_doit_func          doit;
+       rtnl_dumpit_func        dumpit;
+       rtnl_calcit_func        calcit;
+-};
++} __no_const;
+ static DEFINE_MUTEX(rtnl_mutex);
+@@ -329,10 +329,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
+        * to use the ops for creating device. So do not
+        * fill up dellink as well. That disables rtnl_dellink.
+        */
+-      if (ops->setup && !ops->dellink)
+-              ops->dellink = unregister_netdevice_queue;
++      if (ops->setup && !ops->dellink) {
++              pax_open_kernel();
++              const_cast(ops->dellink) = unregister_netdevice_queue;
++              pax_close_kernel();
++      }
+-      list_add_tail(&ops->list, &link_ops);
++      pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(__rtnl_link_register);
+@@ -379,7 +382,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
+       for_each_net(net) {
+               __rtnl_kill_links(net, ops);
+       }
+-      list_del(&ops->list);
++      pax_list_del((struct list_head *)&ops->list);
+ }
+ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+@@ -1296,7 +1299,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+           (dev->ifalias &&
+            nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
+           nla_put_u32(skb, IFLA_CARRIER_CHANGES,
+-                      atomic_read(&dev->carrier_changes)) ||
++                      atomic_read_unchecked(&dev->carrier_changes)) ||
+           nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
+               goto nla_put_failure;
+@@ -3829,7 +3832,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+               __rtnl_unlock();
+               rtnl = net->rtnl;
+               {
+-                      struct netlink_dump_control c = {
++                      netlink_dump_control_no_const c = {
+                               .dump           = dumpit,
+                               .min_dump_alloc = min_dump_alloc,
+                       };
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 2696aef..dbd5807 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -215,9 +215,9 @@ EXPORT_SYMBOL(__scm_send);
+ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ {
+       struct cmsghdr __user *cm
+-              = (__force struct cmsghdr __user *)msg->msg_control;
++              = (struct cmsghdr __force_user *)msg->msg_control;
+       struct cmsghdr cmhdr;
+-      int cmlen = CMSG_LEN(len);
++      size_t cmlen = CMSG_LEN(len);
+       int err;
+       if (MSG_CMSG_COMPAT & msg->msg_flags)
+@@ -238,7 +238,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+       err = -EFAULT;
+       if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
+               goto out;
+-      if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
++      if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
+               goto out;
+       cmlen = CMSG_SPACE(len);
+       if (msg->msg_controllen < cmlen)
+@@ -254,7 +254,7 @@ EXPORT_SYMBOL(put_cmsg);
+ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ {
+       struct cmsghdr __user *cm
+-              = (__force struct cmsghdr __user*)msg->msg_control;
++              = (struct cmsghdr __force_user *)msg->msg_control;
+       int fdmax = 0;
+       int fdnum = scm->fp->count;
+@@ -274,7 +274,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+       if (fdnum < fdmax)
+               fdmax = fdnum;
+-      for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
++      for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
+            i++, cmfptr++)
+       {
+               struct socket *sock;
+@@ -303,7 +303,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+       if (i > 0)
+       {
+-              int cmlen = CMSG_LEN(i*sizeof(int));
++              size_t cmlen = CMSG_LEN(i*sizeof(int));
+               err = put_user(SOL_SOCKET, &cm->cmsg_level);
+               if (!err)
+                       err = put_user(SCM_RIGHTS, &cm->cmsg_type);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 3864b4b6..d2cbe83 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1047,7 +1047,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start += off;
+       /* {transport,network,mac}_header and tail are relative to skb->head */
+-      skb->transport_header += off;
++      if (skb_transport_header_was_set(skb))
++              skb->transport_header += off;
+       skb->network_header   += off;
+       if (skb_mac_header_was_set(skb))
+               skb->mac_header += off;
+@@ -2174,7 +2175,7 @@ EXPORT_SYMBOL(__skb_checksum);
+ __wsum skb_checksum(const struct sk_buff *skb, int offset,
+                   int len, __wsum csum)
+ {
+-      const struct skb_checksum_ops ops = {
++      static const struct skb_checksum_ops ops = {
+               .update  = csum_partial_ext,
+               .combine = csum_block_add_ext,
+       };
+@@ -3432,12 +3433,14 @@ void __init skb_init(void)
+       skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+                                             sizeof(struct sk_buff),
+                                             0,
+-                                            SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++                                            SLAB_HWCACHE_ALIGN|SLAB_PANIC|
++                                            SLAB_NO_SANITIZE,
+                                             NULL);
+       skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
+                                               sizeof(struct sk_buff_fclones),
+                                               0,
+-                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC|
++                                              SLAB_NO_SANITIZE,
+                                               NULL);
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index fd7b41e..71dae11 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -411,13 +411,13 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+       struct sk_buff_head *list = &sk->sk_receive_queue;
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               trace_sock_rcvqueue_full(sk, skb);
+               return -ENOMEM;
+       }
+       if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               return -ENOBUFS;
+       }
+@@ -463,7 +463,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+       skb->dev = NULL;
+       if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               goto discard_and_relse;
+       }
+       if (nested)
+@@ -481,7 +481,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+               mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+       } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+               bh_unlock_sock(sk);
+-              atomic_inc(&sk->sk_drops);
++              atomic_inc_unchecked(&sk->sk_drops);
+               goto discard_and_relse;
+       }
+@@ -889,19 +889,6 @@ set_rcvbuf:
+               }
+               break;
+-      case SO_ATTACH_BPF:
+-              ret = -EINVAL;
+-              if (optlen == sizeof(u32)) {
+-                      u32 ufd;
+-
+-                      ret = -EFAULT;
+-                      if (copy_from_user(&ufd, optval, sizeof(ufd)))
+-                              break;
+-
+-                      ret = sk_attach_bpf(ufd, sk);
+-              }
+-              break;
+-
+       case SO_ATTACH_REUSEPORT_CBPF:
+               ret = -EINVAL;
+               if (optlen == sizeof(struct sock_fprog)) {
+@@ -915,6 +902,20 @@ set_rcvbuf:
+               }
+               break;
++#ifndef GRKERNSEC_BPF_HARDEN
++      case SO_ATTACH_BPF:
++              ret = -EINVAL;
++              if (optlen == sizeof(u32)) {
++                      u32 ufd;
++
++                      ret = -EFAULT;
++                      if (copy_from_user(&ufd, optval, sizeof(ufd)))
++                              break;
++
++                      ret = sk_attach_bpf(ufd, sk);
++              }
++              break;
++
+       case SO_ATTACH_REUSEPORT_EBPF:
+               ret = -EINVAL;
+               if (optlen == sizeof(u32)) {
+@@ -928,6 +929,8 @@ set_rcvbuf:
+               }
+               break;
++#endif
++
+       case SO_DETACH_FILTER:
+               ret = sk_detach_filter(sk);
+               break;
+@@ -1037,12 +1040,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+               struct timeval tm;
+       } v;
+-      int lv = sizeof(int);
+-      int len;
++      unsigned int lv = sizeof(int);
++      unsigned int len;
+       if (get_user(len, optlen))
+               return -EFAULT;
+-      if (len < 0)
++      if (len > INT_MAX)
+               return -EINVAL;
+       memset(&v, 0, sizeof(v));
+@@ -1180,11 +1183,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+       case SO_PEERNAME:
+       {
+-              char address[128];
++              char address[_K_SS_MAXSIZE];
+               if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
+                       return -ENOTCONN;
+-              if (lv < len)
++              if (lv < len || sizeof address < len)
+                       return -EINVAL;
+               if (copy_to_user(optval, address, len))
+                       return -EFAULT;
+@@ -1272,7 +1275,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+       if (len > lv)
+               len = lv;
+-      if (copy_to_user(optval, &v, len))
++      if (len > sizeof(v) || copy_to_user(optval, &v, len))
+               return -EFAULT;
+ lenout:
+       if (put_user(len, optlen))
+@@ -1536,7 +1539,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+               newsk->sk_dst_cache     = NULL;
+               newsk->sk_wmem_queued   = 0;
+               newsk->sk_forward_alloc = 0;
+-              atomic_set(&newsk->sk_drops, 0);
++              atomic_set_unchecked(&newsk->sk_drops, 0);
+               newsk->sk_send_head     = NULL;
+               newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+@@ -1565,7 +1568,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+               newsk->sk_err      = 0;
+               newsk->sk_priority = 0;
+               newsk->sk_incoming_cpu = raw_smp_processor_id();
+-              atomic64_set(&newsk->sk_cookie, 0);
++              atomic64_set_unchecked(&newsk->sk_cookie, 0);
+               cgroup_sk_alloc(&newsk->sk_cgrp_data);
+@@ -2497,7 +2500,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+        */
+       smp_wmb();
+       atomic_set(&sk->sk_refcnt, 1);
+-      atomic_set(&sk->sk_drops, 0);
++      atomic_set_unchecked(&sk->sk_drops, 0);
+ }
+ EXPORT_SYMBOL(sock_init_data);
+@@ -2621,6 +2624,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
+ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+                      int level, int type)
+ {
++      struct sock_extended_err ee;
+       struct sock_exterr_skb *serr;
+       struct sk_buff *skb;
+       int copied, err;
+@@ -2642,7 +2646,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+       sock_recv_timestamp(msg, sk, skb);
+       serr = SKB_EXT_ERR(skb);
+-      put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
++      ee = serr->ee;
++      put_cmsg(msg, level, type, sizeof ee, &ee);
+       msg->msg_flags |= MSG_ERRQUEUE;
+       err = copied;
+@@ -3094,7 +3099,7 @@ static __net_exit void proto_exit_net(struct net *net)
+ }
+-static __net_initdata struct pernet_operations proto_net_ops = {
++static __net_initconst struct pernet_operations proto_net_ops = {
+       .init = proto_init_net,
+       .exit = proto_exit_net,
+ };
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index 6b10573..af9e62e 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -14,7 +14,7 @@
+ #include <linux/inet_diag.h>
+ #include <linux/sock_diag.h>
+-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
++static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
+ static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
+ static DEFINE_MUTEX(sock_diag_table_mutex);
+ static struct workqueue_struct *broadcast_wq;
+@@ -22,12 +22,12 @@ static struct workqueue_struct *broadcast_wq;
+ static u64 sock_gen_cookie(struct sock *sk)
+ {
+       while (1) {
+-              u64 res = atomic64_read(&sk->sk_cookie);
++              u64 res = atomic64_read_unchecked(&sk->sk_cookie);
+               if (res)
+                       return res;
+-              res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
+-              atomic64_cmpxchg(&sk->sk_cookie, 0, res);
++              res = atomic64_inc_return_unchecked(&sock_net(sk)->cookie_gen);
++              atomic64_cmpxchg_unchecked(&sk->sk_cookie, 0, res);
+       }
+ }
+@@ -67,7 +67,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
+       mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+       mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+       mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+-      mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
++      mem[SK_MEMINFO_DROPS] = atomic_read_unchecked(&sk->sk_drops);
+       return nla_put(skb, attrtype, sizeof(mem), &mem);
+ }
+@@ -193,8 +193,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
+       mutex_lock(&sock_diag_table_mutex);
+       if (sock_diag_handlers[hndl->family])
+               err = -EBUSY;
+-      else
++      else {
++              pax_open_kernel();
+               sock_diag_handlers[hndl->family] = hndl;
++              pax_close_kernel();
++      }
+       mutex_unlock(&sock_diag_table_mutex);
+       return err;
+@@ -210,7 +213,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
+       mutex_lock(&sock_diag_table_mutex);
+       BUG_ON(sock_diag_handlers[family] != hnld);
++      pax_open_kernel();
+       sock_diag_handlers[family] = NULL;
++      pax_close_kernel();
+       mutex_unlock(&sock_diag_table_mutex);
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_unregister);
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 0df2aa6..7db59f7 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+ {
+       unsigned int orig_size, size;
+       int ret, i;
+-      struct ctl_table tmp = {
++      ctl_table_no_const tmp = {
+               .data = &size,
+               .maxlen = sizeof(size),
+               .mode = table->mode
+@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
+                            void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+       char id[IFNAMSIZ];
+-      struct ctl_table tbl = {
++      ctl_table_no_const tbl = {
+               .data = id,
+               .maxlen = IFNAMSIZ,
+       };
+@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
+ static int proc_do_rss_key(struct ctl_table *table, int write,
+                          void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-      struct ctl_table fake_table;
++      ctl_table_no_const fake_table;
+       char buf[NETDEV_RSS_KEY_LEN * 3];
+       snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
+@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
+               .mode           = 0444,
+               .proc_handler   = proc_do_rss_key,
+       },
+-#ifdef CONFIG_BPF_JIT
++#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
+       {
+               .procname       = "bpf_jit_enable",
+               .data           = &bpf_jit_enable,
+@@ -428,13 +428,12 @@ static struct ctl_table netns_core_table[] = {
+ static __net_init int sysctl_core_net_init(struct net *net)
+ {
+-      struct ctl_table *tbl;
++      ctl_table_no_const *tbl = NULL;
+       net->core.sysctl_somaxconn = SOMAXCONN;
+-      tbl = netns_core_table;
+       if (!net_eq(net, &init_net)) {
+-              tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
++              tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
+               if (tbl == NULL)
+                       goto err_dup;
+@@ -444,17 +443,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
+               if (net->user_ns != &init_user_ns) {
+                       tbl[0].procname = NULL;
+               }
+-      }
+-
+-      net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
++              net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
++      } else
++              net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
+       if (net->core.sysctl_hdr == NULL)
+               goto err_reg;
+       return 0;
+ err_reg:
+-      if (tbl != netns_core_table)
+-              kfree(tbl);
++      kfree(tbl);
+ err_dup:
+       return -ENOMEM;
+ }
+@@ -469,7 +467,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
+       kfree(tbl);
+ }
+-static __net_initdata struct pernet_operations sysctl_core_ops = {
++static __net_initconst struct pernet_operations sysctl_core_ops = {
+       .init = sysctl_core_net_init,
+       .exit = sysctl_core_net_exit,
+ };
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index 13d6b1a..eaa0cee 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -1524,7 +1524,12 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
+       struct linkinfo_dn link;
+       unsigned int r_len;
+       void *r_data = NULL;
+-      unsigned int val;
++      struct optdata_dn opt;
++      struct accessdata_dn acc;
++      u8 mode;
++      int val;
++      unsigned long window;
++      unsigned char rem;
+       if(get_user(r_len , optlen))
+               return -EFAULT;
+@@ -1533,25 +1538,29 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
+       case DSO_CONDATA:
+               if (r_len > sizeof(struct optdata_dn))
+                       r_len = sizeof(struct optdata_dn);
+-              r_data = &scp->conndata_in;
++              opt = scp->conndata_in;
++              r_data = &opt;
+               break;
+       case DSO_DISDATA:
+               if (r_len > sizeof(struct optdata_dn))
+                       r_len = sizeof(struct optdata_dn);
+-              r_data = &scp->discdata_in;
++              opt = scp->discdata_in;
++              r_data = &opt;
+               break;
+       case DSO_CONACCESS:
+               if (r_len > sizeof(struct accessdata_dn))
+                       r_len = sizeof(struct accessdata_dn);
+-              r_data = &scp->accessdata;
++              acc = scp->accessdata;
++              r_data = &acc;
+               break;
+       case DSO_ACCEPTMODE:
+               if (r_len > sizeof(unsigned char))
+                       r_len = sizeof(unsigned char);
+-              r_data = &scp->accept_mode;
++              mode = scp->accept_mode;
++              r_data = &mode;
+               break;
+       case DSO_LINKINFO:
+@@ -1601,7 +1610,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
+       case DSO_MAXWINDOW:
+               if (r_len > sizeof(unsigned long))
+                       r_len = sizeof(unsigned long);
+-              r_data = &scp->max_window;
++              window = scp->max_window;
++              r_data = &window;
+               break;
+       case DSO_NODELAY:
+@@ -1621,13 +1631,15 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
+       case DSO_SERVICES:
+               if (r_len > sizeof(unsigned char))
+                       r_len = sizeof(unsigned char);
+-              r_data = &scp->services_rem;
++              rem = scp->services_rem;
++              r_data = &rem;
+               break;
+       case DSO_INFO:
+               if (r_len > sizeof(unsigned char))
+                       r_len = sizeof(unsigned char);
+-              r_data = &scp->info_rem;
++              rem = scp->info_rem;
++              r_data = &rem;
+               break;
+       }
+diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
+index b2c26b0..41f803e 100644
+--- a/net/decnet/dn_dev.c
++++ b/net/decnet/dn_dev.c
+@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
+               .extra1 = &min_t3,
+               .extra2 = &max_t3
+       },
+-      {0}
++      { }
+       },
+ };
+diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
+index 5325b54..a0d4d69 100644
+--- a/net/decnet/sysctl_net_decnet.c
++++ b/net/decnet/sysctl_net_decnet.c
+@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
+       if (len > *lenp) len = *lenp;
+-      if (copy_to_user(buffer, addr, len))
++      if (len > sizeof addr || copy_to_user(buffer, addr, len))
+               return -EFAULT;
+       *lenp = len;
+@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
+       if (len > *lenp) len = *lenp;
+-      if (copy_to_user(buffer, devname, len))
++      if (len > sizeof devname || copy_to_user(buffer, devname, len))
+               return -EFAULT;
+       *lenp = len;
+diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
+index 7e68bc6..09a6073 100644
+--- a/net/dsa/dsa.c
++++ b/net/dsa/dsa.c
+@@ -269,7 +269,7 @@ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol)
+ int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds)
+ {
+       struct net_device *master;
+-      struct ethtool_ops *cpu_ops;
++      ethtool_ops_no_const *cpu_ops;
+       master = ds->dst->master_netdev;
+       if (ds->master_netdev)
+@@ -1045,7 +1045,7 @@ static struct packet_type dsa_pack_type __read_mostly = {
+       .func   = dsa_switch_rcv,
+ };
+-static struct notifier_block dsa_netdevice_nb __read_mostly = {
++static struct notifier_block dsa_netdevice_nb = {
+       .notifier_call  = dsa_slave_netdevice_event,
+ };
+diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
+index 00077a9..c513046 100644
+--- a/net/dsa/dsa_priv.h
++++ b/net/dsa/dsa_priv.h
+@@ -60,7 +60,7 @@ void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds);
+ /* slave.c */
+ extern const struct dsa_device_ops notag_netdev_ops;
+ void dsa_slave_mii_bus_init(struct dsa_switch *ds);
+-void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops);
++void dsa_cpu_port_ethtool_init(ethtool_ops_no_const *ops);
+ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
+                    int port, const char *name);
+ void dsa_slave_destroy(struct net_device *slave_dev);
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index fc91967..b11a825 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -906,7 +906,7 @@ static void dsa_slave_poll_controller(struct net_device *dev)
+ }
+ #endif
+-void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
++void dsa_cpu_port_ethtool_init(ethtool_ops_no_const *ops)
+ {
+       ops->get_sset_count = dsa_cpu_port_get_sset_count;
+       ops->get_ethtool_stats = dsa_cpu_port_get_ethtool_stats;
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 16737cd..81
\ No newline at end of file
diff --git a/test/grsecurity-3.1-4.8.6-201611091800.patch.sig b/test/grsecurity-3.1-4.8.6-201611091800.patch.sig
new file mode 100644 (file)
index 0000000..c88394a
Binary files /dev/null and b/test/grsecurity-3.1-4.8.6-201611091800.patch.sig differ